aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbootstrap-hc2vpp-perf.sh117
-rw-r--r--resources/libraries/python/honeycomb/HoneycombSetup.py176
-rw-r--r--resources/libraries/python/honeycomb/HoneycombUtil.py15
-rw-r--r--resources/libraries/python/honeycomb/Performance.py129
-rw-r--r--resources/libraries/python/ssh.py28
-rw-r--r--resources/libraries/robot/honeycomb/honeycomb.robot86
-rw-r--r--resources/libraries/robot/honeycomb/performance.robot120
-rwxr-xr-xresources/tools/scripts/download_hc_pkgs.sh6
-rwxr-xr-xresources/tools/scripts/topo_installation.py78
-rw-r--r--resources/tools/testbed-setup/playbooks/01-host-setup.yaml2
-rwxr-xr-xresources/traffic_scripts/honeycomb/read_vpp_version.py360
-rw-r--r--tests/vpp/perf/honeycomb/__init__.robot61
-rw-r--r--tests/vpp/perf/honeycomb/localhostp1-tcp-netconf16t-crud-read.robot87
-rw-r--r--tests/vpp/perf/honeycomb/localhostp1-tcp-netconf1t-crud-read.robot87
-rw-r--r--tests/vpp/perf/honeycomb/localhostp1-tcp-netconf8t-crud-read.robot97
-rw-r--r--topologies/available/lf_testbed1.yaml10
-rw-r--r--topologies/available/lf_testbed2.yaml10
-rw-r--r--topologies/available/lf_testbed3.yaml10
18 files changed, 1430 insertions, 49 deletions
diff --git a/bootstrap-hc2vpp-perf.sh b/bootstrap-hc2vpp-perf.sh
new file mode 100755
index 0000000000..81c80fd375
--- /dev/null
+++ b/bootstrap-hc2vpp-perf.sh
@@ -0,0 +1,117 @@
+#!/bin/bash
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -x
+
+STREAM=$1
+OS=$2
+ODL=$3
+
+# Space separated list of available testbeds, described by topology files
+TOPOLOGIES="topologies/available/lf_testbed1.yaml \
+ topologies/available/lf_testbed2.yaml \
+ topologies/available/lf_testbed3.yaml"
+
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+# Reservation dir
+RESERVATION_DIR="/tmp/reservation_dir"
+INSTALLATION_DIR="/tmp/install_dir"
+
+PYBOT_ARGS="-W 150 -L TRACE"
+
+ARCHIVE_ARTIFACTS=(log.html output.xml report.html honeycomb.log)
+
+WORKING_TOPOLOGY=""
+export PYTHONPATH=${SCRIPT_DIR}
+
+sudo apt-get -y update
+sudo apt-get -y install libpython2.7-dev python-virtualenv
+
+virtualenv --system-site-packages env
+. env/bin/activate
+
+echo pip install
+pip install -r requirements.txt
+
+# We iterate over available topologies and wait until we reserve topology
+while :; do
+ for TOPOLOGY in ${TOPOLOGIES};
+ do
+ python ${SCRIPT_DIR}/resources/tools/topo_reservation.py -t ${TOPOLOGY}
+ if [ $? -eq 0 ]; then
+ WORKING_TOPOLOGY=${TOPOLOGY}
+ echo "Reserved: ${WORKING_TOPOLOGY}"
+ break
+ fi
+ done
+
+ if [ ! -z "${WORKING_TOPOLOGY}" ]; then
+ # Exit the infinite while loop if we made a reservation
+ break
+ fi
+
+ # Wait ~3minutes before next try
+ SLEEP_TIME=$[ ( $RANDOM % 20 ) + 180 ]s
+ echo "Sleeping ${SLEEP_TIME}"
+ sleep ${SLEEP_TIME}
+done
+
+function cancel_all {
+ python ${SCRIPT_DIR}/resources/tools/topo_installation.py -c -d ${INSTALLATION_DIR} -t $1 -hc True
+ python ${SCRIPT_DIR}/resources/tools/topo_reservation.py -c -t $1
+}
+
+# On script exit we cancel the reservation and installation and delete all vpp
+# packages
+trap "cancel_all ${WORKING_TOPOLOGY}" EXIT
+
+# Download VPP and HC packages from the current branch
+echo Downloading packages...
+bash ${SCRIPT_DIR}/resources/tools/download_hc_pkgs.sh ${STREAM} 'ubuntu1604'
+
+if [ "${OS}" == "centos7" ]; then
+ VPP_PKGS=(*.rpm)
+else
+ VPP_PKGS=(*.deb)
+fi
+echo ${VPP_PKGS[@]}
+
+# Install packages
+python ${SCRIPT_DIR}/resources/tools/topo_installation.py -t ${WORKING_TOPOLOGY} \
+ -d ${INSTALLATION_DIR} \
+ -p ${VPP_PKGS[@]} \
+ -hc True
+if [ $? -eq 0 ]; then
+ echo "VPP Installed on hosts from: ${WORKING_TOPOLOGY}"
+else
+ echo "Failed to copy vpp deb files to DUTs"
+ exit 1
+fi
+
+# run full performance test suite and exit on fail
+ pybot ${PYBOT_ARGS} \
+ -v TOPOLOGY_PATH:${WORKING_TOPOLOGY} \
+ -s "tests.perf.honeycomb" \
+ --variable install_dir:${INSTALLATION_DIR} \
+ tests/
+ RETURN_STATUS=$(echo $?)
+
+# Archive artifacts
+mkdir archive
+for i in ${ARCHIVE_ARTIFACTS[@]}; do
+ cp $( readlink -f ${i} | tr '\n' ' ' ) archive/
+done
+
+exit ${RETURN_STATUS}
diff --git a/resources/libraries/python/honeycomb/HoneycombSetup.py b/resources/libraries/python/honeycomb/HoneycombSetup.py
index 53130f405b..13b8b971b6 100644
--- a/resources/libraries/python/honeycomb/HoneycombSetup.py
+++ b/resources/libraries/python/honeycomb/HoneycombSetup.py
@@ -25,7 +25,6 @@ from resources.libraries.python.honeycomb.HoneycombUtil \
import HoneycombUtil as HcUtil
from resources.libraries.python.ssh import SSH
from resources.libraries.python.topology import NodeType
-from resources.libraries.python.DUTSetup import DUTSetup
class HoneycombSetup(object):
@@ -107,6 +106,34 @@ class HoneycombSetup(object):
format(errors))
@staticmethod
+ def restart_honeycomb_on_dut(node):
+ """Restart Honeycomb on specified DUT nodes.
+
+ This keyword restarts the Honeycomb service on specified DUTs. Use the
+ keyword "Check Honeycomb Startup State" to check if the Honeycomb is up
+ and running.
+
+ :param node: Node to restart Honeycomb on.
+ :type node: dict
+ :raises HoneycombError: If Honeycomb fails to start.
+ """
+
+ logger.console("\n(re)Starting Honeycomb service ...")
+
+ cmd = "sudo service honeycomb restart"
+
+ ssh = SSH()
+ ssh.connect(node)
+ (ret_code, _, _) = ssh.exec_command_sudo(cmd)
+ if int(ret_code) != 0:
+ raise HoneycombError('Node {0} failed to restart Honeycomb.'.
+ format(node['host']))
+ else:
+ logger.info(
+ "Honeycomb service restart is in progress on node {0}".format(
+ node['host']))
+
+ @staticmethod
def check_honeycomb_startup_state(*nodes):
"""Check state of Honeycomb service during startup on specified nodes.
@@ -130,8 +157,14 @@ class HoneycombSetup(object):
for node in nodes:
if node['type'] == NodeType.DUT:
HoneycombSetup.print_ports(node)
- status_code, _ = HTTPRequest.get(node, path,
- enable_logging=False)
+ try:
+ status_code, _ = HTTPRequest.get(node, path,
+ enable_logging=False)
+ except HTTPRequestError:
+ ssh = SSH()
+ ssh.connect(node)
+ ssh.exec_command("tail -n 100 /var/log/syslog")
+ raise
if status_code == HTTPCodes.OK:
logger.info("Honeycomb on node {0} is up and running".
format(node['host']))
@@ -479,7 +512,7 @@ class HoneycombSetup(object):
for feature in features:
cmd += " {0}".format(feature)
- ret_code, _, stderr = ssh.exec_command_sudo(cmd, timeout=120)
+ ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=120)
if int(ret_code) != 0:
raise HoneycombError("Feature install did not succeed.")
@@ -590,7 +623,140 @@ class HoneycombSetup(object):
ssh = SSH()
ssh.connect(node)
cmd = "service vpp stop"
- ret_code, _, _ = ssh.exec_command_sudo(cmd)
+ ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=80)
if int(ret_code) != 0:
raise RuntimeError("Could not stop VPP service on node {0}".format(
node['host']))
+
+
+class HoneycombStartupConfig(object):
+ """Generator for Honeycomb startup configuration.
+ """
+ def __init__(self):
+ """Initializer."""
+
+ self.template = """
+ #!/bin/sh -
+ STATUS=100
+
+ while [ $STATUS -eq 100 ]
+ do
+ {java_call} -jar $(dirname $0)/{jar_filename}
+ STATUS=$?
+ echo "Honeycomb exited with status: $STATUS"
+ if [ $STATUS -eq 100 ]
+ then
+ echo "Restarting..."
+ fi
+ done
+ """
+
+ self.java_call = "{scheduler} {affinity} java {jit_mode} {params}"
+
+ self.scheduler = ""
+ self.core_affinity = ""
+ self.jit_mode = ""
+ self.params = ""
+ self.numa = ""
+
+ self.config = ""
+ self.ssh = SSH()
+
+ def apply_config(self, node):
+ """Generate configuration file /opt/honeycomb/honeycomb on the specified
+ node.
+
+ :param node: Honeycomb node.
+ :type node: dict
+ """
+
+ self.ssh.connect(node)
+ _, filename, _ = self.ssh.exec_command("ls /opt/honeycomb | grep .jar")
+
+ java_call = self.java_call.format(scheduler=self.scheduler,
+ affinity=self.core_affinity,
+ jit_mode=self.jit_mode,
+ params=self.params)
+ self.config = self.template.format(java_call=java_call,
+ jar_filename=filename)
+
+ self.ssh.connect(node)
+ cmd = "echo '{config}' > /tmp/honeycomb " \
+ "&& chmod +x /tmp/honeycomb " \
+ "&& sudo mv -f /tmp/honeycomb /opt/honeycomb".format(
+ config=self.config)
+ self.ssh.exec_command(cmd)
+
+ def set_cpu_scheduler(self, scheduler="FIFO"):
+ """Use alternate CPU scheduler.
+
+ Note: OTHER scheduler doesn't load-balance over isolcpus.
+
+ :param scheduler: CPU scheduler to use.
+ :type scheduler: str
+ """
+
+ schedulers = {"FIFO": "-f 99", # First In, First Out
+ "RR": "-r 99", # Round Robin
+ "OTHER": "-o", # Ubuntu default
+ }
+ self.scheduler = "chrt {0}".format(schedulers[scheduler])
+
+ def set_cpu_core_affinity(self, low, high=None):
+ """Set core affinity for the honeycomb process and subprocesses.
+
+ :param low: Lowest core ID number.
+ :param high: Highest core ID number. Leave empty to use a single core.
+ :type low: int
+ :type high: int
+ """
+
+ self.core_affinity = "taskset -c {low}-{high}".format(
+ low=low, high=high if high else low)
+
+ def set_jit_compiler_mode(self, jit_mode):
+ """Set running mode for Java's JIT compiler.
+
+ :param jit_mode: Desiret JIT mode.
+ :type jit_mode: str
+ """
+
+ modes = {"client": "-client", # Default
+ "server": "-server", # Higher performance but longer warmup
+ "classic": "-classic" # Disables JIT compiler
+ }
+
+ self.jit_mode = modes[jit_mode]
+
+ def set_memory_size(self, mem_min, mem_max=None):
+ """Set minimum and maximum memory use for the JVM.
+
+ :param mem_min: Minimum amount of memory (MB).
+ :param mem_max: Maximum amount of memory (MB). Default is 4 times
+ minimum value.
+ :type mem_min: int
+ :type mem_max: int
+ """
+
+ self.params += " -Xms{min}m -Xmx{max}m".format(
+ min=mem_min, max=mem_max if mem_max else mem_min*4)
+
+ def set_metaspace_size(self, mem_min, mem_max=None):
+ """Set minimum and maximum memory used for class metadata in the JVM.
+
+ :param mem_min: Minimum metaspace size (MB).
+ :param mem_max: Maximum metaspace size (MB). Defailt is 4 times
+ minimum value.
+ :type mem_min: int
+ :type mem_max: int
+ """
+
+ self.params += " -XX:MetaspaceSize={min}m " \
+ "-XX:MaxMetaspaceSize={max}m".format(
+ min=mem_min, max=mem_max if mem_max else mem_min*4)
+
+ def set_numa_optimization(self):
+ """Use optimization of memory use and garbage collection for NUMA
+ architectures."""
+
+ self.params += " -XX:+UseNUMA -XX:+UseParallelGC"
diff --git a/resources/libraries/python/honeycomb/HoneycombUtil.py b/resources/libraries/python/honeycomb/HoneycombUtil.py
index a718a242f2..24f81af7b3 100644
--- a/resources/libraries/python/honeycomb/HoneycombUtil.py
+++ b/resources/libraries/python/honeycomb/HoneycombUtil.py
@@ -399,16 +399,23 @@ class HoneycombUtil(object):
return HTTPRequest.delete(node, path)
@staticmethod
- def archive_honeycomb_log(node):
+ def archive_honeycomb_log(node, perf=False):
"""Copy honeycomb log file from DUT node to VIRL for archiving.
:param node: Honeycomb node.
+ :param perf: Alternate handling, for use with performance test topology.
:type node: dict
+ :type perf: bool
"""
ssh = SSH()
ssh.connect(node)
- cmd = "cp /var/log/honeycomb/honeycomb.log /scratch/"
-
- ssh.exec_command_sudo(cmd)
+ if not perf:
+ cmd = "cp /var/log/honeycomb/honeycomb.log /scratch/"
+ ssh.exec_command_sudo(cmd)
+ else:
+ ssh.scp(
+ ".",
+ "/var/log/honeycomb/honeycomb.log",
+ get=True)
diff --git a/resources/libraries/python/honeycomb/Performance.py b/resources/libraries/python/honeycomb/Performance.py
new file mode 100644
index 0000000000..1c6b0bc522
--- /dev/null
+++ b/resources/libraries/python/honeycomb/Performance.py
@@ -0,0 +1,129 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Implementation of keywords for testing Honeycomb performance."""
+
+from resources.libraries.python.ssh import SSH
+from resources.libraries.python.constants import Constants as Const
+from resources.libraries.python.honeycomb.HoneycombUtil import HoneycombError
+
+
+class Performance(object):
+ """Keywords used in Honeycomb performance testing."""
+
+ def __init__(self):
+ """Initializer."""
+ pass
+
+ @staticmethod
+ def configure_netconf_threads(node, threads):
+ """Set Honeycomb's Netconf thread count in configuration.
+
+ :param node: Honeycomb node.
+ :param threads: Number of threads.
+ :type node: dict
+ :type threads: int
+ :raises HoneycombError: If the operation fails.
+ """
+
+ find = "netconf-netty-threads"
+ replace = '\\"netconf-netty-threads\\": {0},'.format(threads)
+
+ argument = '"/{0}/c\\ {1}"'.format(find, replace)
+ path = "{0}/config/honeycomb.json".format(Const.REMOTE_HC_DIR)
+ command = "sed -i {0} {1}".format(argument, path)
+
+ ssh = SSH()
+ ssh.connect(node)
+ (ret_code, _, stderr) = ssh.exec_command_sudo(command)
+ if ret_code != 0:
+ raise HoneycombError("Failed to modify configuration on "
+ "node {0}, {1}".format(node, stderr))
+
+ @staticmethod
+ def run_traffic_script_on_dut(node, script, cores, reserved=2,
+ *args, **kwargs):
+ """Copy traffic script over to the specified node and execute with
+ the provided arguments.
+
+ :param node: Node in topology.
+ :param script: Name of the script to execute.
+ :param cores: Number of processor cores to use.
+ :param reserved: Number of cores reserved for other tasks. Default is 2,
+ one for system tasks and one for VPP main thread.
+ :param args: Sequential arguments for the script.
+ :param kwargs: Named arguments for the script.
+ :type node: dict
+ :type script: str
+ :type cores: int
+ :type reserved: int
+ :type args: list
+ :type kwargs: dict
+ """
+
+ path = "resources/traffic_scripts/honeycomb/{0}".format(script)
+
+ # Assemble arguments for traffic script
+ arguments = ""
+ for arg in args:
+ arguments += "{0} ".format(arg)
+
+ for key, value in kwargs.items():
+ arguments += "--{0} {1} ".format(key, value)
+
+ ssh = SSH()
+ ssh.connect(node)
+ ssh.scp(path, "/tmp")
+
+ # Use alternate scheduler, Ubuntu's default can't load-balance
+ # over isolcpus
+ scheduler = "chrt -f 99"
+ core_afi = "taskset -c {first}-{last}".format(
+ first=reserved, last=cores-1)
+
+ cmd = "{scheduler} {affinity} python /tmp/{script} {args}".format(
+ scheduler=scheduler,
+ affinity=core_afi,
+ script=script,
+ args=arguments)
+
+ ret_code, stdout, _ = ssh.exec_command_sudo(cmd, timeout=600)
+
+ ssh.exec_command("sudo pkill python ; rm /tmp/{0}".format(script))
+ if ret_code != 0:
+ raise HoneycombError("Traffic script failed to execute.")
+ for line in stdout.splitlines():
+ if "Avg. requests" in line:
+ return line
+
+ @staticmethod
+ def log_core_schedule(node, process):
+ """Determine which cores the process' threads are running on.
+
+ :param node: Honeycomb node.
+ :param process: Name of the process.
+ :type node: dict
+ :type process: str
+ """
+
+ # Get info on process and all of its children
+ cmd1 = """cat /proc/`pidof {0}`/task/*/stat""".format(process)
+
+ # Parse process ID, name and core index
+ cmd2 = """awk '{print $1" "$2" "$39}'"""
+
+ cmd = "{0} | {1}".format(cmd1, cmd2)
+
+ ssh = SSH()
+ ssh.connect(node)
+ ssh.exec_command(cmd)
diff --git a/resources/libraries/python/ssh.py b/resources/libraries/python/ssh.py
index db39a0701c..ca6d6556a1 100644
--- a/resources/libraries/python/ssh.py
+++ b/resources/libraries/python/ssh.py
@@ -307,17 +307,35 @@ class SSH(object):
"""
chan.close()
- def scp(self, local_path, remote_path):
- """Copy files from local_path to remote_path.
+ def scp(self, local_path, remote_path, get=False):
+ """Copy files from local_path to remote_path or vice versa.
connect() method has to be called first!
+
+ :param local_path: Path to local file that should be uploaded; or
+ path where to save remote file.
+ :param remote_path: Remote path where to place uploaded file; or
+ path to remote file which should be downloaded.
+ :param get: scp operation to perform. Default is put.
+ :type local_path: str
+ :type remote_path: str
+ :type get: bool
"""
- logger.trace('SCP {0} to {1}:{2}'.format(
- local_path, self._ssh.get_transport().getpeername(), remote_path))
+ if not get:
+ logger.trace('SCP {0} to {1}:{2}'.format(
+ local_path, self._ssh.get_transport().getpeername(),
+ remote_path))
+ else:
+ logger.trace('SCP {0}:{1} to {2}'.format(
+ self._ssh.get_transport().getpeername(), remote_path,
+ local_path))
# SCPCLient takes a paramiko transport as its only argument
scp = SCPClient(self._ssh.get_transport(), socket_timeout=10)
start = time()
- scp.put(local_path, remote_path)
+ if not get:
+ scp.put(local_path, remote_path)
+ else:
+ scp.get(remote_path, local_path)
scp.close()
end = time()
logger.trace('SCP took {0} seconds'.format(end-start))
diff --git a/resources/libraries/robot/honeycomb/honeycomb.robot b/resources/libraries/robot/honeycomb/honeycomb.robot
index 9017584f5a..d0f9f0fbf6 100644
--- a/resources/libraries/robot/honeycomb/honeycomb.robot
+++ b/resources/libraries/robot/honeycomb/honeycomb.robot
@@ -73,6 +73,7 @@
| | ... | *Example:*
| | ...
| | ... | \| Clear persisted Honeycomb configuration \| ${nodes['DUT1']} \|
+| | ...
| | [Arguments] | @{duts}
| | Clear persisted Honeycomb config | @{duts}
@@ -86,14 +87,34 @@
| | ...
| | ... | \| Restart Honeycomb and VPP and clear persisted configuration \
| | ... | \| ${nodes['DUT1']} \|
+| | ...
| | [Arguments] | ${node}
| | Stop Honeycomb service on DUTs | ${node}
| | Clear persisted Honeycomb configuration | ${node}
| | Setup DUT | ${node}
+| | Sleep | 10s | Wait 10sec so VPP is up for sure.
| | Configure Honeycomb service on DUTs | ${node}
| Restart Honeycomb and VPP
-| | [Documentation] | Restarts Honeycomb service and wait until it starts up.
+| | [Documentation] | Stops the Honeycomb service and verifies it is stopped.
+| | ... | Then restarts VPP, starts Honeycomb again and verifies it is running.
+| | ...
+| | ... | *Arguments:*
+| | ... | - node - information about a DUT node. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Restart Honeycomb and VPP \| ${nodes['DUT1']} \|
+| | ...
+| | [Arguments] | ${node}
+| | Stop Honeycomb service on DUTs | ${node}
+| | Setup DUT | ${node}
+| | Sleep | 10s | Wait 10sec so VPP is up for sure.
+| | Configure Honeycomb service on DUTs | ${node}
+
+| Restart Honeycomb and VPP in performance test
+| | [Documentation] | Stops Honeycomb and VPP and verifies HC is stopped.
+| | ... | Then restarts VPP, starts Honeycomb again and verifies it is running.
| | ...
| | ... | *Arguments:*
| | ... | - node - information about a DUT node. Type: dictionary
@@ -101,10 +122,15 @@
| | ... | *Example:*
| | ...
| | ... | \| Restart Honeycomb and VPP \| ${nodes['DUT1']} \|
+| | ...
| | [Arguments] | ${node}
| | Stop Honeycomb service on DUTs | ${node}
+| | Stop VPP service on DUT | ${node}
| | Setup DUT | ${node}
+| | Sleep | 10s | Wait 10sec so VPP is up for sure.
| | Configure Honeycomb service on DUTs | ${node}
+| | Wait until keyword succeeds | 2min | 16sec
+| | ... | Check honeycomb startup state | ${node}
| Archive Honeycomb log file
| | [Documentation] | Copy honeycomb.log file from Honeycomb node\
@@ -112,12 +138,14 @@
| | ...
| | ... | *Arguments:*
| | ... | - node - information about a DUT node. Type: dictionary
+| | ... | - perf - Running on performance testbed? Yes/no Type: boolean
| | ...
| | ... | *Example:*
| | ...
| | ... | \| Archive Honeycomb log file \| ${nudes['DUT1']} \|
-| | [Arguments] | ${node}
-| | Archive Honeycomb log | ${node}
+| | ...
+| | [Arguments] | ${node} | ${perf}=${False}
+| | Archive Honeycomb log | ${node} | ${perf}
| Configure ODL Client Service On DUT
| | [Documentation] | Configure and start ODL client, then repeatedly check if
@@ -131,6 +159,7 @@
| | ...
| | ... | \| Configure ODL Client Service on DUT \| ${nodes['DUT1']} \
| | ... | \| carbon-SR1 \|
+| | ...
| | [Arguments] | ${node} | ${odl_name}
| | Copy ODL Client | ${node} | ${odl_name} | /mnt/common | /tmp
| | Setup ODL Client | ${node} | /tmp
@@ -153,6 +182,7 @@
| | ... | *Example:*
| | ...
| | ... | \| Configure Honeycomb for functional testing \| ${nodes['DUT1']} \|
+| | ...
| | [Arguments] | ${node}
| | Configure Restconf binding address | ${node}
| | Configure Log Level | ${node} | TRACE
@@ -172,6 +202,7 @@
| | ... | *Example:*
| | ...
| | ... | \| Configure ODL Client for functional testing \| ${nodes['DUT1']} \|
+| | ...
| | [Arguments] | ${node}
| | ${use_odl_client}= | Get Variable Value | ${HC_ODL}
| | Run Keyword If | '${use_odl_client}' != '${NONE}'
@@ -192,6 +223,7 @@
| | ... | *Example:*
| | ...
| | ... | \| Set Up Honeycomb Functional Test Suite \| ${nodes['DUT1']} \|
+| | ...
| | [Arguments] | ${node}
| | Setup DUT | ${node}
| | Configure all TGs for traffic script
@@ -209,6 +241,7 @@
| | ... | *Example:*
| | ...
| | ... | \| Tear Down Honeycomb Functional Test Suite \| ${nodes['DUT1']} \|
+| | ...
| | [Arguments] | ${node}
| | ${use_odl_client}= | Get Variable Value | ${HC_ODL}
| | Run Keyword If | '${use_odl_client}' != '${NONE}'
@@ -227,6 +260,7 @@
| | ... | *Example:*
| | ...
| | ... | \| Enable Honeycomb Feature \| ${nodes['DUT1']} \| NSH \|
+| | ...
| | [arguments] | ${node} | ${feature}
| | Manage Honeycomb Features | ${node} | ${feature}
@@ -240,5 +274,49 @@
| | ... | *Example:*
| | ...
| | ... | \| Disable Honeycomb Feature \| ${nodes['DUT1']} \| NSH \|
+| | ...
| | [arguments] | ${node} | ${feature}
-| | Manage Honeycomb Features | ${node} | ${feature} | disable=${True} \ No newline at end of file
+| | Manage Honeycomb Features | ${node} | ${feature} | disable=${True}
+
+| Stop VPP Service on DUT
+| | [Documentation] | Stop the VPP service on the specified node.
+| | ...
+| | ... | *Arguments:*
+| | ... | - node - information about a DUT node. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Stop VPP Service on DUT \| ${nodes['DUT1']} \|
+| | ...
+| | [Arguments] | ${node}
+| | Stop VPP Service | ${node}
+
+| Honeycomb Performance Suite Setup Generic
+| | [Documentation] | Generic test suite setup for Honeycomb performance tests.
+| | ... | Performs multiple attempts to start Honeycomb+VPP stack.
+| | ...
+| | ... | *Arguments:*
+| | ... | - node - information about a DUT node. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Honeycomb Performance Suite Setup Generic \| ${nodes['DUT1']} \|
+| | ...
+| | [Arguments] | ${node}
+| | Wait until keyword succeeds | 8min | 2min
+| | ... | Restart Honeycomb and VPP in Performance test | ${node}
+
+| Honeycomb Performance Suite Teardown Generic
+| | [Documentation] | Generic test suite teardown for Honeycomb performance
+| | ... | tests. Logs CPU usage before stopping Honeycomb.
+| | ...
+| | ... | *Arguments:*
+| | ... | - node - information about a DUT node. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Honeycomb Performance Suite Teardown Generic \| ${nodes['DUT1']} \|
+| | ...
+| | [Arguments] | ${node}
+| | Log Honeycomb and VPP process distribution on cores | ${node}
+| | Stop Honeycomb service on DUTs | ${node}
diff --git a/resources/libraries/robot/honeycomb/performance.robot b/resources/libraries/robot/honeycomb/performance.robot
new file mode 100644
index 0000000000..bd906e1742
--- /dev/null
+++ b/resources/libraries/robot/honeycomb/performance.robot
@@ -0,0 +1,120 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+| Library | resources.libraries.python.honeycomb.Performance
+| Library | resources.libraries.python.InterfaceUtil
+| Resource | resources/libraries/robot/honeycomb/honeycomb.robot
+| Documentation | Keywords used in Honeycomb performance testing.
+
+*** Keywords ***
+| Configure Honeycomb Netconf threads
+| | [Documentation] | Modify thread configuration of Honeycomb's Netconf server,
+| | ... | Requires a restart of Honeycomb to take effect.
+| | ...
+| | ... | *Arguments:*
+| | ... | - node - Node to change configuration on. Type: dictionary
+| | ... | - threads - Number of threads to configure. Type: integer
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Configure Honeycomb Netconf Threads \| ${nodes[DUT1]} \| ${2} \|
+| | ...
+| | [Arguments] | ${node} | ${threads}
+| | Configure Netconf Threads | ${node} | ${threads}
+
+| Run base operational read performance trial
+| | [Documentation] | Send Netconf requests over plain TCP to obtain VPP version
+| | ... | from Honeycomb operational data.
+| | ...
+| | ... | *Arguments:*
+| | ... | - node - Node to run test on. Type: dictionary
+| | ... | - cores - Number of available processor cores. Type: integer
+| | ... | - cycles - Number of test cycles to run. Final results will\
+| | ... | be averaged across all runs. Type: integer
+| | ... | - threads - Number of threads to use for generating traffic.\
+| | ... | Type: integer
+| | ... | - requests - Number of requests to send in each thread and cycle.\
+| | ... | Type: integer
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Run base operational read performance trial \| ${nodes[DUT1]} \
+| | ... | \| ${36} \| ${1} \| ${4} \| ${10000} \|
+| | ...
+| | [Arguments] | ${node} | ${cores} | ${cycles} | ${threads} | ${requests}
+| | ${result}= | Run traffic script on DUT | ${node} | read_vpp_version.py
+| | ... | ${cores} | cycles=${cycles} | threads=${threads}
+| | ... | requests=${requests}
+| | Set Test Message | ${result}
+
+| Generate VPP Startup Configuration for Honeycomb Test on DUT
+| | [Arguments] | ${node}
+| | [Documentation] | Create VPP base startup configuration on DUT, then restart
+| | ... | VPP to apply the configuration.
+| | ...
+| | ... | *Arguments:*
+| | ... | - node - VPP node to configure. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Generate VPP Startup Configuration for Honeycomb Test on DUT \
+| | ... | \| ${nodes[DUT1]} \|
+| | ...
+| | Import Library | resources.libraries.python.VppConfigGenerator
+| | ... | WITH NAME | VPP_config
+| | Run keyword | VPP_config.Set Node | ${node}
+| | Run keyword | VPP_config.Add Unix Log
+| | Run keyword | VPP_config.Add Unix CLI Listen
+| | Run keyword | VPP_config.Add Unix Nodaemon
+| | Run keyword | VPP_config.Add CPU Main Core | ${1}
+| | Run keyword | VPP_config.Apply Config
+
+| Log Honeycomb and VPP process distribution on cores
+| | [Documentation] | Log the distribution of VPP and Honeycomb child processes
+| | ... | over the CPU cores.
+| | ...
+| | ... | *Arguments:*
+| | ... | - node - Honeycomb node. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Log Honeycomb and VPP process distribution on cores \
+| | ... | \| ${nodes[DUT1]} \|
+| | ...
+| | [Arguments] | ${node}
+| | Log Core Schedule | ${node} | vpp
+| | Log Core Schedule | ${node} | java
+
+| Generate Honeycomb startup configuration for performance test
+| | [Documentation] | Create HC startup configuration and apply to config
+| | ... | file on DUT. Requires Honeycomb restart to take effect.
+| | ...
+| | ... | *Arguments:*
+| | ... | - node - Honeycomb node. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Generate Honeycomb startup configuration for performance test \
+| | ... | \| ${nodes[DUT1]} \|
+| | ...
+| | [Arguments] | ${node} | ${cores}
+| | Import Library | resources.libraries.python.honeycomb.HoneycombSetup.HoneycombStartupConfig
+| | ... | WITH NAME | HC_config
+| | Run Keyword | HC_config.Set CPU Scheduler | FIFO
+| | Run Keyword | HC_config.Set CPU Core Affinity | ${2} | ${cores}
+| | Run Keyword | HC_config.Set JIT Compiler Mode | server
+| | Run Keyword | HC_config.Set Memory Size | ${512} | ${2048}
+| | Run Keyword | HC_config.Set Metaspace Size | ${128} | ${512}
+| | Run Keyword | HC_config.Set NUMA Optimization
+| | Run Keyword | HC_config.apply config | ${node}
diff --git a/resources/tools/scripts/download_hc_pkgs.sh b/resources/tools/scripts/download_hc_pkgs.sh
index 23e0be4b16..1bda02505c 100755
--- a/resources/tools/scripts/download_hc_pkgs.sh
+++ b/resources/tools/scripts/download_hc_pkgs.sh
@@ -30,19 +30,19 @@ if [ "${OS}" == "ubuntu1404" ]; then
OS="ubuntu.trusty.main"
PACKAGE="deb deb.md5"
CLASS="deb"
- VPP_ARTIFACTS="vpp vpp-dbg vpp-dev vpp-lib vpp-plugins vpp-api-java"
+ VPP_ARTIFACTS="vpp vpp-dbg vpp-lib vpp-plugins"
DPDK_ARTIFACTS="vpp-dpdk-dkms"
elif [ "${OS}" == "ubuntu1604" ]; then
OS="ubuntu.xenial.main"
PACKAGE="deb deb.md5"
CLASS="deb"
- VPP_ARTIFACTS="vpp vpp-dbg vpp-dev vpp-lib vpp-plugins vpp-api-java"
+ VPP_ARTIFACTS="vpp vpp-dbg vpp-lib vpp-plugins"
DPDK_ARTIFACTS="vpp-dpdk-dkms"
elif [ "${OS}" == "centos7" ]; then
OS="centos7"
PACKAGE="rpm rpm.md5"
CLASS=""
- VPP_ARTIFACTS="vpp vpp-debuginfo vpp-devel vpp-lib vpp-plugins vpp-api-java"
+ VPP_ARTIFACTS="vpp vpp-debuginfo vpp-lib vpp-plugins"
DPDK_ARTIFACTS=""
fi
diff --git a/resources/tools/scripts/topo_installation.py b/resources/tools/scripts/topo_installation.py
index 0488bdae69..5c91abbd0f 100755
--- a/resources/tools/scripts/topo_installation.py
+++ b/resources/tools/scripts/topo_installation.py
@@ -85,15 +85,34 @@ def main():
help="Packages paths to copy")
parser.add_argument("-c", "--cancel", help="Cancel installation",
action="store_true")
+ parser.add_argument("-hc", "--honeycomb", help="Include Honeycomb package.",
+ required=False, default=False)
+
args = parser.parse_args()
topology_file = args.topo
packages = args.packages
install_dir = args.directory
cancel_installation = args.cancel
+ honeycomb = args.honeycomb
work_file = open(topology_file)
topology = load(work_file.read())['nodes']
+ def fix_interrupted(package):
+ """If there are interrupted installations, clean them up."""
+
+ cmd = "dpkg -l | grep {0}".format(package)
+ ret, _, _ = ssh.exec_command(cmd)
+ if ret == 0:
+ # Try to fix interrupted installations
+ cmd = 'dpkg --configure -a'
+ stdout = ssh_no_error(ssh, cmd, sudo=True)
+ print "###TI {}".format(stdout)
+ # Try to remove installed packages
+ cmd = 'apt-get purge -y "{0}.*"'.format(package)
+ stdout = ssh_no_error(ssh, cmd, sudo=True)
+ print "###TI {}".format(stdout)
+
ssh = SSH()
for node in topology:
if topology[node]['type'] == "DUT":
@@ -106,41 +125,44 @@ def main():
stdout = ssh_ignore_error(ssh, cmd)
print "###TI {}".format(stdout)
- cmd = "dpkg -l | grep vpp"
- ret, _, _ = ssh.exec_command(cmd)
- if ret == 0:
- # Try to fix interrupted installations
- cmd = 'dpkg --configure -a'
- stdout = ssh_no_error(ssh, cmd, sudo=True)
- print "###TI {}".format(stdout)
- # Try to remove installed vpp.* packages
- cmd = 'apt-get purge -y "vpp.*"'
- stdout = ssh_no_error(ssh, cmd, sudo=True)
+ if honeycomb:
+ fix_interrupted("honeycomb")
+ # remove HC logs
+ cmd = "rm -rf /var/log/honeycomb"
+ stdout = ssh_ignore_error(ssh, cmd, sudo=True)
print "###TI {}".format(stdout)
+ fix_interrupted("vpp")
+
else:
# Create installation directory on DUT
cmd = "rm -r {0}; mkdir {0}".format(install_dir)
stdout = ssh_no_error(ssh, cmd)
print "###TI {}".format(stdout)
- # Copy packages from local path to installation dir
- for deb in packages:
- print "###TI scp: {}".format(deb)
- ssh.scp(local_path=deb, remote_path=install_dir)
-
- cmd = "dpkg -l | grep vpp"
- ret, _, _ = ssh.exec_command(cmd)
- if ret == 0:
- # Try to fix interrupted installations
- cmd = 'dpkg --configure -a'
- stdout = ssh_no_error(ssh, cmd, sudo=True)
- print "###TI {}".format(stdout)
- # Try to remove installed vpp.* packages
- cmd = 'apt-get purge -y "vpp.*"'
- stdout = ssh_no_error(ssh, cmd, sudo=True)
- print "###TI {}".format(stdout)
-
- # Installation of VPP deb packages
+ if honeycomb:
+ smd = "ls ~/honeycomb | grep .deb"
+ stdout = ssh_ignore_error(ssh, smd)
+ if "honeycomb" in stdout:
+ # If custom honeycomb packages exist, use them
+ cmd = "cp ~/honeycomb/*.deb {0}".format(install_dir)
+ stdout = ssh_no_error(ssh, cmd)
+ print "###TI {}".format(stdout)
+ else:
+ # Copy packages from local path to installation dir
+ for deb in packages:
+ print "###TI scp: {}".format(deb)
+ ssh.scp(local_path=deb, remote_path=install_dir)
+ else:
+ # Copy packages from local path to installation dir
+ for deb in packages:
+ print "###TI scp: {}".format(deb)
+ ssh.scp(local_path=deb, remote_path=install_dir)
+
+ if honeycomb:
+ fix_interrupted("honeycomb")
+ fix_interrupted("vpp")
+
+ # Installation of deb packages
cmd = "dpkg -i --force-all {}/*.deb".format(install_dir)
stdout = ssh_no_error(ssh, cmd, sudo=True)
print "###TI {}".format(stdout)
diff --git a/resources/tools/testbed-setup/playbooks/01-host-setup.yaml b/resources/tools/testbed-setup/playbooks/01-host-setup.yaml
index 6d644e5054..8fa9d66799 100644
--- a/resources/tools/testbed-setup/playbooks/01-host-setup.yaml
+++ b/resources/tools/testbed-setup/playbooks/01-host-setup.yaml
@@ -127,3 +127,5 @@
apt: name=lxc state=present
- name: Disable 80-vpp.conf
command: ln -s /dev/null /etc/sysctl.d/80-vpp.conf
+ - name: Install java
+ apt: name=openjdk-8-jdk state=present
diff --git a/resources/traffic_scripts/honeycomb/read_vpp_version.py b/resources/traffic_scripts/honeycomb/read_vpp_version.py
new file mode 100755
index 0000000000..8a861801d6
--- /dev/null
+++ b/resources/traffic_scripts/honeycomb/read_vpp_version.py
@@ -0,0 +1,360 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import socket
+import multiprocessing
+import argparse
+from time import time
+
+
+class Counter(object):
+ """Counter used for stats collection."""
+ def __init__(self, start=0):
+ """Initializer."""
+ self.lock = multiprocessing.Lock()
+ self.value = start
+
+ def increment(self, value=1):
+ """Increment counter and return the new value."""
+ self.lock.acquire()
+ val = self.value
+ try:
+ self.value += value
+ finally:
+ self.lock.release()
+ return val
+
+
+class timer(object):
+ """Timer used used during test execution."""
+ def __init__(self, verbose=False):
+ self.verbose = verbose
+
+ def __enter__(self):
+ """Start the timer."""
+ self.start = time()
+ return self
+
+ def __exit__(self, *args):
+ """Stop the timer and save current value."""
+ self.end = time()
+ self.secs = self.end - self.start
+ self.msecs = self.secs * 1000 # millisecs
+ if self.verbose:
+ print("elapsed time: {0} ms".format(self.msecs))
+
+
+class ConfigBlaster(object):
+ """Generates Netconf requests, receives replies and collects statistics."""
+
+ TIMEOUT = 10
+
+ # Hello message with capabilities list for Netconf sessions.
+ hello = u"""<hello xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"
+ message-id="m-0">
+ <capabilities>
+ <capability>urn:ietf:params:netconf:base:1.0</capability>
+ </capabilities>
+ </hello>
+ ]]>]]>"""
+
+ # RPC to retrieve VPP version (minimal processing in VPP)
+ request_template = u"""<rpc xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"
+ message-id="m-1">
+ <get>
+ <filter xmlns:ns0="urn:ietf:params:xml:ns:netconf:base:1.0"
+ ns0:type="subtree">
+ <vpp-state xmlns="urn:opendaylight:params:xml:ns:yang:vpp:management">
+ <version/>
+ </vpp-state>
+ </filter>
+ </get>
+ </rpc>
+ ]]>]]>"""
+
+ class Stats(object):
+ """Stores and further processes statistics collected by worker
+ threads during their execution.
+ """
+
+ def __init__(self):
+ """Initializer."""
+ self.ok_rqst_rate = Counter(0)
+ self.total_rqst_rate = Counter(0)
+ self.ok_rqsts = Counter(0)
+ self.total_rqsts = Counter(0)
+
+ def process_stats(self, rqst_stats, elapsed_time):
+ """Calculates the stats for request/reply throughput, and aggregates
+ statistics across all threads.
+
+ :param rqst_stats: Request statistics dictionary.
+ :param elapsed_time: Elapsed time for the test.
+ :type rqst_stats: dict
+ :type elapsed_time: int
+ :returns: Rates (requests/sec) for successfully finished requests
+ and the total number of requests.
+ :rtype: tuple
+ """
+ ok_rqsts = rqst_stats["OK"]
+ total_rqsts = sum(rqst_stats.values())
+
+ ok_rqst_rate = ok_rqsts / elapsed_time
+ total_rqst_rate = total_rqsts / elapsed_time
+
+ self.ok_rqsts.increment(ok_rqsts)
+ self.total_rqsts.increment(total_rqsts)
+
+ self.ok_rqst_rate.increment(ok_rqst_rate)
+ self.total_rqst_rate.increment(total_rqst_rate)
+
+ return ok_rqst_rate, total_rqst_rate
+
+ @property
+ def get_ok_rqst_rate(self):
+ return self.ok_rqst_rate.value
+
+ @property
+ def get_total_rqst_rate(self):
+ return self.total_rqst_rate.value
+
+ @property
+ def get_ok_rqsts(self):
+ return self.ok_rqsts.value
+
+ @property
+ def get_total_rqsts(self):
+ return self.total_rqsts.value
+
+ def __init__(self, host, port, ncycles, nthreads, nrequests):
+ """Initializer.
+
+ :param host: Target IP address.
+ :param port: Target port.
+ :param ncycles: Number of test cycles.
+ :param nthreads: Number of threads for packet generation.
+ :param nrequests: Number of requests to send per thread.
+ :type host: str
+ :type port: int
+ :type ncycles: int
+ :type nthreads: int
+ :type nrequests: int
+ """
+
+ self.host = host
+ self.port = port
+ self.ncycles = ncycles
+ self.nthreads = nthreads
+ self.nrequests = nrequests
+
+ self.stats = self.Stats()
+ self.total_ok_rqsts = 0
+
+ self.print_lock = multiprocessing.Lock()
+ self.cond = multiprocessing.Condition()
+ self.threads_done = 0
+
+ self.recv_buf = 8192
+
+ def send_request(self, sock):
+ """Send Netconf request and receive the reply.
+
+ :param sock: Socket object to use for transfer.
+ :type sock: socket object
+ :returns: Response to request or error message.
+ :rtype: str
+ """
+
+ sock.send(self.request_template)
+ try:
+ return sock.recv(self.recv_buf)
+ except socket.timeout:
+ return "timeout"
+ except socket.error:
+ return "error"
+
+ def send_requests(self, tid, stats):
+ """Read entries from the Honeycomb operational data store. This function
+ is executed by a worker thread.
+
+ :param tid: Thread ID - used to id the Blaster thread when
+ statistics for the thread are printed out.
+ :param stats: Synchronized queue object for returning execution stats.
+ :type tid: int
+ :type stats: multiprocessing.Queue
+ """
+
+ rqst_stats = {"OK": 0, "Error": 0, "Timeout": 0}
+
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.settimeout(5)
+ # Initiate connection
+ sock.connect((self.host, self.port))
+ # Send hello message
+ sock.send(self.hello)
+ # Receive hello message
+ sock.recv(self.recv_buf)
+ # Determine length of expected responses
+ self.recv_buf = len(self.send_request(sock))
+
+ with self.print_lock:
+ print("\n Thread {0}:\n"
+ " Sending {1} requests".format(tid,
+ self.nrequests))
+
+ replies = [None]*self.nrequests
+ with timer() as t:
+ for x in range(self.nrequests):
+ sts = self.send_request(sock)
+ replies[x] = sts
+
+ for reply in replies:
+ if reply == "timeout":
+ rqst_stats["Timeout"] += 1
+ elif "error" in reply:
+ rqst_stats["Error"] += 1
+ else:
+ rqst_stats["OK"] += 1
+
+ ok_rps, total_rps = self.stats.process_stats(
+ rqst_stats, t.secs)
+
+ with self.print_lock:
+ print("\n Thread {0} results (READ): ".format(tid))
+ print(" Elapsed time: {0:.2f}s,".format(t.secs))
+ print(" Requests/s: {0:.2f} OK, {1:.2f} Total".format(
+ ok_rps, total_rps))
+ print(" Stats ({Requests}, {entries}): "),
+ print(rqst_stats)
+ self.threads_done += 1
+
+ sock.close()
+
+ stats.put({"stats": rqst_stats, "time": t.secs})
+
+ with self.cond:
+ self.cond.notify_all()
+
+ def run_cycle(self, function):
+ """Runs a test cycle. Each test consists of <cycles> test cycles, where
+ <threads> worker threads are started in each test cycle. Each thread
+ reads <requests> entries using Netconf RPCs.
+
+ :param function: Function to be executed in each thread.
+ :type function: function
+ :return: None
+ """
+
+ self.total_ok_rqsts = 0
+ stats_queue = multiprocessing.Queue()
+
+ for c in range(self.ncycles):
+ self.stats = self.Stats()
+ with self.print_lock:
+ print "\nCycle {0}:".format(c)
+
+ threads = []
+ thread_stats = []
+ for i in range(self.nthreads):
+ t = multiprocessing.Process(target=function,
+ args=(i, stats_queue))
+ threads.append(t)
+ t.start()
+
+ # Wait for all threads to finish and measure the execution time
+ with timer() as t:
+ for _ in threads:
+ thread_stats.append(stats_queue.get())
+ for thread in threads:
+ thread.join()
+
+ for item in thread_stats:
+ self.stats.process_stats(item["stats"], item["time"])
+
+ with self.print_lock:
+ print("\n*** Test summary:")
+ print(" Elapsed time: {0:.2f}s".format(t.secs))
+ print(
+ " Peak requests/s: {0:.2f} OK, {1:.2f} Total".format(
+ self.stats.get_ok_rqst_rate,
+ self.stats.get_total_rqst_rate))
+ print(
+ " Avg. requests/s: {0:.2f} OK, {1:.2f} Total ({2:.2f} "
+ "of peak total)".format(
+ self.stats.get_ok_rqsts / t.secs,
+ self.stats.get_total_rqsts / t.secs,
+ (self.stats.get_total_rqsts / t.secs * 100) /
+ self.stats.get_total_rqst_rate))
+
+ self.total_ok_rqsts += self.stats.get_ok_rqsts
+
+ self.threads_done = 0
+
+ def add_blaster(self):
+ """Run the test."""
+ self.run_cycle(self.send_requests)
+
+ @property
+ def get_ok_rqsts(self):
+ return self.total_ok_rqsts
+
+
+def create_arguments_parser():
+ """Creates argument parser for test script.
+ Shorthand to arg parser on library level in order to access and
+ eventually enhance in ancestors.
+
+ :returns: argument parser supporting arguments and parameters
+ :rtype: argparse.ArgumentParser
+ """
+ my_parser = argparse.ArgumentParser(
+ description="entry reading performance test: Reads entries from "
+ "the config tree, as specified by optional parameters.")
+
+ my_parser.add_argument(
+ "--host", default="127.0.0.1",
+ help="Host where odl controller is running (default is 127.0.0.1).")
+ my_parser.add_argument(
+ "--port", default=7777,
+ help="Port on which Honeycomb's Netconf is listening"
+ " (default is 7777 for TCP)")
+ my_parser.add_argument(
+ "--cycles", type=int, default=1,
+ help="Number of entry read cycles; default 1. <THREADS> worker threads "
+ "are started in each cycle and the cycle ends when all threads "
+ "finish. Another cycle is started when the previous cycle "
+ "is finished.")
+ my_parser.add_argument(
+ "--threads", type=int, default=1,
+ help="Number of request worker threads to start in each cycle; "
+ "default=1. Each thread will read <entries> entries.")
+ my_parser.add_argument(
+ "--requests", type=int, default=10,
+ help="Number of requests that will be made by each worker thread "
+ "in each cycle; default 10")
+
+ return my_parser
+
+if __name__ == "__main__":
+
+ parser = create_arguments_parser()
+ in_args = parser.parse_args()
+
+ fct = ConfigBlaster(in_args.host, in_args.port, in_args.cycles,
+ in_args.threads, in_args.requests)
+
+ # Run through <cycles>, where <threads> are started in each cycle and
+ # <entries> are added from each thread
+ fct.add_blaster()
+
+ print " Successful reads: {0}\n".format(fct.get_ok_rqsts)
diff --git a/tests/vpp/perf/honeycomb/__init__.robot b/tests/vpp/perf/honeycomb/__init__.robot
new file mode 100644
index 0000000000..b984993139
--- /dev/null
+++ b/tests/vpp/perf/honeycomb/__init__.robot
@@ -0,0 +1,61 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Variables***
+# Honeycomb node to run tests on.
+| ${node}= | ${nodes['DUT1']}
+
+*** Settings ***
+| Resource | resources/libraries/robot/shared/default.robot
+| Resource | resources/libraries/robot/shared/interfaces.robot
+| Resource | resources/libraries/robot/honeycomb/honeycomb.robot
+| Resource | resources/libraries/robot/honeycomb/performance.robot
+| Library | resources.libraries.python.SetupFramework
+| Library | resources.libraries.python.CpuUtils
+| Library | resources.libraries.python.honeycomb.Performance
+| Suite Setup | Setup suite for Honeycomb performance tests
+| Suite Teardown | Run Keywords
+| ... | Stop VPP Service on DUT | ${node}
+| ... | AND | Archive Honeycomb Log File | ${node} | perf=${True}
+| ... | AND | Stop honeycomb service on DUTs | ${node}
+
+*** Keywords ***
+| Setup suite for Honeycomb performance tests
+| | [Documentation] | Setup variables and configure VPP and Honeycomb
+| | ... | for performance testing.
+| | ...
+| | Set Global Variable | ${node}
+| | ${cores}= | Get Length | ${node['cpuinfo']}
+| | Set Global Variable | ${cores}
+| | Stop VPP Service on DUT | ${node}
+| | Stop Honeycomb service on DUTs | ${node}
+| | Generate VPP Startup Configuration for Honeycomb Test on DUT | ${node}
+| | Configure Restconf binding address | ${node}
+| | Configure Log Level | ${node} | INFO
+| | Configure Persistence | ${node} | disable
+| | Configure jVPP timeout | ${node} | ${14}
+| | Generate Honeycomb startup configuration for performance test
+| | ... | ${node} | ${cores}
+| | Clear Persisted Honeycomb Configuration | ${node}
+
+| Configure ODL Client for performance tests
+| | [Documentation] | Setup ODL client for performance testing.
+| | ...
+| | ${use_odl_client}= | Get Variable Value | ${HC_ODL}
+| | Run Keyword If | '${use_odl_client}' != '${NONE}'
+| | ... | Run Keywords
+| | ... | Set Global Variable | ${use_odl_client}
+| | ... | AND | Copy ODL client | ${node} | ${HC_ODL} | ~ | ${install_dir}
+| | ... | AND | Configure ODL Client Service On DUT | ${node} | ${install_dir}
+| | ... | ELSE | Log | Variable HC_ODL is not present. Not using ODL.
+| | ... | level=INFO
diff --git a/tests/vpp/perf/honeycomb/localhostp1-tcp-netconf16t-crud-read.robot b/tests/vpp/perf/honeycomb/localhostp1-tcp-netconf16t-crud-read.robot
new file mode 100644
index 0000000000..c13851cc62
--- /dev/null
+++ b/tests/vpp/perf/honeycomb/localhostp1-tcp-netconf16t-crud-read.robot
@@ -0,0 +1,87 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+| Resource | resources/libraries/robot/honeycomb/performance.robot
+| ...
+| Suite Setup | Run Keywords
+| ... | Configure Honeycomb Netconf threads | ${node} | ${16}
+| ... | AND | Honeycomb Performance Suite Setup Generic | ${node}
+| ...
+| Suite Teardown | Honeycomb Performance Suite Teardown Generic | ${node}
+| ...
+| Force Tags | HC_PERF
+| ...
+| Documentation | *Base Netconf operational read test suite with sixteen Netconf
+| ... | threads.*
+| ...
+| ... | *[Top] Network Topologies:* DUT1 single-node topology.
+| ... | *[Enc] Packet Encapsulations:* Eth-IPv4-TCP.
+| ... | *[Cfg] DUT configuration:* Default
+| ... | *[Ver] verification:* DUT verifies Honeycomb's response times with zero
+| ... | error or loss tolerance. Test packets are generated by DUT1 on localhost
+| ... | loopback interface.
+| ... | Traffic profile contains flow-groups containing REST requests
+| ... | for operational reads.
+| ... | *[Ref] Applicable standard specifications:* RFC6241.
+
+*** Test Cases ***
+| TC01: Base operational read with 1 TG thread
+| | [Documentation]
+| | ... | [Ver] Measure response time using single trial throughput test
+| | ... | with 100K requests generated in a single thread.
+| | ${cycles}= | Set Variable | ${1}
+| | ${threads}= | Set Variable | ${1}
+| | ${entries}= | Set Variable | ${100000}
+| | Run base operational read performance trial
+| | ... | ${node} | ${cores} | ${cycles} | ${threads} | ${entries}
+
+| TC02: Base operational read with 2 TG threads
+| | [Documentation]
+| | ... | [Ver] Measure response time using single trial throughput test
+| | ... | with 100K requests generated in two threads.
+| | ${cycles}= | Set Variable | ${1}
+| | ${threads}= | Set Variable | ${2}
+| | ${entries}= | Set Variable | ${50000}
+| | Run base operational read performance trial
+| | ... | ${node} | ${cores} | ${cycles} | ${threads} | ${entries}
+
+| TC03: Base operational read with 4 TG threads
+| | [Documentation]
+| | ... | [Ver] Measure response time using single trial throughput test
+| | ... | with 100K requests generated in four threads.
+| | ${cycles}= | Set Variable | ${1}
+| | ${threads}= | Set Variable | ${4}
+| | ${entries}= | Set Variable | ${25000}
+| | Run base operational read performance trial
+| | ... | ${node} | ${cores} | ${cycles} | ${threads} | ${entries}
+
+| TC04: Base operational read with 8 TG threads
+| | [Documentation]
+| | ... | [Ver] Measure response time using single trial throughput test
+| | ... | with 100K requests generated in eigth threads.
+| | ${cycles}= | Set Variable | ${1}
+| | ${threads}= | Set Variable | ${8}
+| | ${entries}= | Set Variable | ${12500}
+| | Run base operational read performance trial
+| | ... | ${node} | ${cores} | ${cycles} | ${threads} | ${entries}
+
+| TC05: Base operational read with 16 TG threads
+| | [Documentation]
+| | ... | [Ver] Measure response time using single trial throughput test
+| | ... | with 100K requests generated in sixteen threads.
+| | ${cycles}= | Set Variable | ${1}
+| | ${threads}= | Set Variable | ${16}
+| | ${entries}= | Set Variable | ${6250}
+| | Run base operational read performance trial
+| | ... | ${node} | ${cores} | ${cycles} | ${threads} | ${entries}
diff --git a/tests/vpp/perf/honeycomb/localhostp1-tcp-netconf1t-crud-read.robot b/tests/vpp/perf/honeycomb/localhostp1-tcp-netconf1t-crud-read.robot
new file mode 100644
index 0000000000..d46fdf795b
--- /dev/null
+++ b/tests/vpp/perf/honeycomb/localhostp1-tcp-netconf1t-crud-read.robot
@@ -0,0 +1,87 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+| Resource | resources/libraries/robot/honeycomb/performance.robot
+| ...
+| Suite Setup | Run Keywords
+| ... | Configure Honeycomb Netconf threads | ${node} | ${1}
+| ... | AND | Honeycomb Performance Suite Setup Generic | ${node}
+| ...
+| Suite Teardown | Honeycomb Performance Suite Teardown Generic | ${node}
+| ...
+| Force Tags | HC_PERF
+| ...
+| Documentation | *Base Netconf operational read test suite with one Netconf
+| ... | thread.*
+| ...
+| ... | *[Top] Network Topologies:* DUT1 single-node topology.
+| ... | *[Enc] Packet Encapsulations:* Eth-IPv4-TCP.
+| ... | *[Cfg] DUT configuration:* Default
+| ... | *[Ver] verification:* DUT verifies Honeycomb's response times with zero
+| ... | error or loss tolerance. Test packets are generated by DUT1 on localhost
+| ... | loopback interface.
+| ... | Traffic profile contains flow-groups containing REST requests
+| ... | for operational reads.
+| ... | *[Ref] Applicable standard specifications:* RFC6241.
+
+*** Test Cases ***
+| TC01: Base operational read with 1 TG thread
+| | [Documentation]
+| | ... | [Ver] Measure response time using single trial throughput test
+| | ... | with 100K requests generated in a single thread.
+| | ${cycles}= | Set Variable | ${1}
+| | ${threads}= | Set Variable | ${1}
+| | ${entries}= | Set Variable | ${100000}
+| | Run base operational read performance trial
+| | ... | ${node} | ${cores} | ${cycles} | ${threads} | ${entries}
+
+| TC02: Base operational read with 2 TG threads
+| | [Documentation]
+| | ... | [Ver] Measure response time using single trial throughput test
+| | ... | with 100K requests generated in two threads.
+| | ${cycles}= | Set Variable | ${1}
+| | ${threads}= | Set Variable | ${2}
+| | ${entries}= | Set Variable | ${50000}
+| | Run base operational read performance trial
+| | ... | ${node} | ${cores} | ${cycles} | ${threads} | ${entries}
+
+| TC03: Base operational read with 4 TG threads
+| | [Documentation]
+| | ... | [Ver] Measure response time using single trial throughput test
+| | ... | with 100K requests generated in four threads.
+| | ${cycles}= | Set Variable | ${1}
+| | ${threads}= | Set Variable | ${4}
+| | ${entries}= | Set Variable | ${25000}
+| | Run base operational read performance trial
+| | ... | ${node} | ${cores} | ${cycles} | ${threads} | ${entries}
+
+| TC04: Base operational read with 8 TG threads
+| | [Documentation]
+| | ... | [Ver] Measure response time using single trial throughput test
+| | ... | with 100K requests generated in eigth threads.
+| | ${cycles}= | Set Variable | ${1}
+| | ${threads}= | Set Variable | ${8}
+| | ${entries}= | Set Variable | ${12500}
+| | Run base operational read performance trial
+| | ... | ${node} | ${cores} | ${cycles} | ${threads} | ${entries}
+
+| TC05: Base operational read with 16 TG threads
+| | [Documentation]
+| | ... | [Ver] Measure response time using single trial throughput test
+| | ... | with 100K requests generated in sixteen threads.
+| | ${cycles}= | Set Variable | ${1}
+| | ${threads}= | Set Variable | ${16}
+| | ${entries}= | Set Variable | ${6250}
+| | Run base operational read performance trial
+| | ... | ${node} | ${cores} | ${cycles} | ${threads} | ${entries}
diff --git a/tests/vpp/perf/honeycomb/localhostp1-tcp-netconf8t-crud-read.robot b/tests/vpp/perf/honeycomb/localhostp1-tcp-netconf8t-crud-read.robot
new file mode 100644
index 0000000000..9928f15f1d
--- /dev/null
+++ b/tests/vpp/perf/honeycomb/localhostp1-tcp-netconf8t-crud-read.robot
@@ -0,0 +1,97 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+| Resource | resources/libraries/robot/honeycomb/performance.robot
+| ...
+| Suite Setup | Run Keywords
+| ... | Configure Honeycomb Netconf threads | ${node} | ${8}
+| ... | AND | Honeycomb Performance Suite Setup Generic | ${node}
+| ...
+| Suite Teardown | Honeycomb Performance Suite Teardown Generic | ${node}
+| ...
+| Force Tags | HC_PERF
+| ...
+| Documentation | *Base Netconf operational read test suite with eight Netconf
+| ... | threads.*
+| ...
+| ... | *[Top] Network Topologies:* DUT1 single-node topology.
+| ... | *[Enc] Packet Encapsulations:* Eth-IPv4-TCP.
+| ... | *[Cfg] DUT configuration:* Default
+| ... | *[Ver] verification:* DUT verifies Honeycomb's response times with zero
+| ... | error or loss tolerance. Test packets are generated by DUT1 on localhost
+| ... | loopback interface.
+| ... | Traffic profile contains flow-groups containing REST requests
+| ... | for operational reads.
+| ... | *[Ref] Applicable standard specifications:* RFC6241.
+
+*** Test Cases ***
+| TC01: Base operational read with 1 TG thread
+| | [Documentation]
+| | ... | [Ver] Measure response time using single trial throughput test
+| | ... | with 100K requests generated in a single thread.
+| | ${cycles}= | Set Variable | ${1}
+| | ${threads}= | Set Variable | ${1}
+| | ${entries}= | Set Variable | ${100000}
+| | Run base operational read performance trial
+| | ... | ${node} | ${cores} | ${cycles} | ${threads} | ${entries}
+
+| TC02: Base operational read with 2 TG threads
+| | [Documentation]
+| | ... | [Ver] Measure response time using single trial throughput test
+| | ... | with 100K requests generated in two threads.
+| | ${cycles}= | Set Variable | ${1}
+| | ${threads}= | Set Variable | ${2}
+| | ${entries}= | Set Variable | ${50000}
+| | Run base operational read performance trial
+| | ... | ${node} | ${cores} | ${cycles} | ${threads} | ${entries}
+
+| TC03: Base operational read with 4 TG threads
+| | [Documentation]
+| | ... | [Ver] Measure response time using single trial throughput test
+| | ... | with 100K requests generated in four threads.
+| | ${cycles}= | Set Variable | ${1}
+| | ${threads}= | Set Variable | ${4}
+| | ${entries}= | Set Variable | ${25000}
+| | Run base operational read performance trial
+| | ... | ${node} | ${cores} | ${cycles} | ${threads} | ${entries}
+
+| TC04: Base operational read with 8 TG threads
+| | [Documentation]
+| | ... | [Ver] Measure response time using single trial throughput test
+| | ... | with 100K requests generated in eigth threads.
+| | ${cycles}= | Set Variable | ${1}
+| | ${threads}= | Set Variable | ${8}
+| | ${entries}= | Set Variable | ${12500}
+| | Run base operational read performance trial
+| | ... | ${node} | ${cores} | ${cycles} | ${threads} | ${entries}
+
+| TC05: Base operational read with 16 TG threads
+| | [Documentation]
+| | ... | [Ver] Measure response time using single trial throughput test
+| | ... | with 100K requests generated in sixteen threads.
+| | ${cycles}= | Set Variable | ${1}
+| | ${threads}= | Set Variable | ${16}
+| | ${entries}= | Set Variable | ${6250}
+| | Run base operational read performance trial
+| | ... | ${node} | ${cores} | ${cycles} | ${threads} | ${entries}
+
+| TC05: Base operational read with 24 TG threads
+| | [Documentation]
+| | ... | [Ver] Measure response time using single trial throughput test
+| | ... | with 100K requests generated in sixteen threads.
+| | ${cycles}= | Set Variable | ${1}
+| | ${threads}= | Set Variable | ${24}
+| | ${entries}= | Set Variable | ${6250}
+| | Run base operational read performance trial
+| | ... | ${node} | ${cores} | ${cycles} | ${threads} | ${entries}
diff --git a/topologies/available/lf_testbed1.yaml b/topologies/available/lf_testbed1.yaml
index b7ad638f2a..673940c063 100644
--- a/topologies/available/lf_testbed1.yaml
+++ b/topologies/available/lf_testbed1.yaml
@@ -107,6 +107,11 @@ nodes:
password: Csit1234
cryptodev: 0000:86:00.0
uio_driver: igb_uio
+ honeycomb:
+ user: admin
+ passwd: admin
+ port: 8183
+ netconf_port: 2831
interfaces:
port1:
# t1-sut1-c1/p1 - 10GE port1 on Intel NIC x520 2p10GE.
@@ -176,6 +181,11 @@ nodes:
password: Csit1234
cryptodev: 0000:86:00.0
uio_driver: igb_uio
+ honeycomb:
+ user: admin
+ passwd: admin
+ port: 8183
+ netconf_port: 2831
interfaces:
port1:
# t1-sut2-c1/p1 - 10GE port1 on Intel NIC x520 2p10GE.
diff --git a/topologies/available/lf_testbed2.yaml b/topologies/available/lf_testbed2.yaml
index 908797c479..e046fa5670 100644
--- a/topologies/available/lf_testbed2.yaml
+++ b/topologies/available/lf_testbed2.yaml
@@ -107,6 +107,11 @@ nodes:
password: Csit1234
cryptodev: 0000:86:00.0
uio_driver: igb_uio
+ honeycomb:
+ user: admin
+ passwd: admin
+ port: 8183
+ netconf_port: 2831
interfaces:
port1:
# t2-sut1-c1/p1 - 10GE port1 on Intel NIC x520 2p10GE.
@@ -176,6 +181,11 @@ nodes:
password: Csit1234
cryptodev: 0000:86:00.0
uio_driver: igb_uio
+ honeycomb:
+ user: admin
+ passwd: admin
+ port: 8183
+ netconf_port: 2831
interfaces:
port1:
# t2-sut2-c1/p1 - 10GE port1 on Intel NIC x520 2p10GE.
diff --git a/topologies/available/lf_testbed3.yaml b/topologies/available/lf_testbed3.yaml
index e68a9872f0..85cc8fe3c6 100644
--- a/topologies/available/lf_testbed3.yaml
+++ b/topologies/available/lf_testbed3.yaml
@@ -107,6 +107,11 @@ nodes:
password: Csit1234
cryptodev: 0000:86:00.0
uio_driver: igb_uio
+ honeycomb:
+ user: admin
+ passwd: admin
+ port: 8183
+ netconf_port: 2831
interfaces:
port1:
# t3-sut1-c1/p1 - 10GE port1 on Intel NIC x520 2p10GE.
@@ -176,6 +181,11 @@ nodes:
password: Csit1234
cryptodev: 0000:86:00.0
uio_driver: igb_uio
+ honeycomb:
+ user: admin
+ passwd: admin
+ port: 8183
+ netconf_port: 2831
interfaces:
port1:
# t3-sut3-c1/p1 - 10GE port1 on Intel NIC x520 2p10GE.