aboutsummaryrefslogtreecommitdiffstats
path: root/resources
diff options
context:
space:
mode:
authorselias <samelias@cisco.com>2017-05-26 14:51:50 +0200
committerSamuel Eliáš <samelias@cisco.com>2017-06-30 11:05:53 +0000
commitd32194f3afb0ec725d178effe6ae589571287602 (patch)
tree285fca263e6208c86b81f1202c99dcd0982b9781 /resources
parent4a946c16a4935e52b3e9039e4661813c256a8934 (diff)
CSIT-619 HC Test: Honeycomb performance testing - initial commit
- keywords and scripts for HC performance testing setup - basic performance suite: operational data read - traffic script and keywords used in tests Change-Id: Ic0290be73a7c925ea2561f8cd2524c5cb83fcda2 Signed-off-by: selias <samelias@cisco.com>
Diffstat (limited to 'resources')
-rw-r--r--resources/libraries/python/honeycomb/HoneycombSetup.py176
-rw-r--r--resources/libraries/python/honeycomb/HoneycombUtil.py15
-rw-r--r--resources/libraries/python/honeycomb/Performance.py129
-rw-r--r--resources/libraries/python/ssh.py28
-rw-r--r--resources/libraries/robot/honeycomb/honeycomb.robot86
-rw-r--r--resources/libraries/robot/honeycomb/performance.robot120
-rwxr-xr-xresources/tools/scripts/download_hc_pkgs.sh6
-rwxr-xr-xresources/tools/scripts/topo_installation.py78
-rw-r--r--resources/tools/testbed-setup/playbooks/01-host-setup.yaml2
-rwxr-xr-xresources/traffic_scripts/honeycomb/read_vpp_version.py360
10 files changed, 951 insertions, 49 deletions
diff --git a/resources/libraries/python/honeycomb/HoneycombSetup.py b/resources/libraries/python/honeycomb/HoneycombSetup.py
index 53130f405b..13b8b971b6 100644
--- a/resources/libraries/python/honeycomb/HoneycombSetup.py
+++ b/resources/libraries/python/honeycomb/HoneycombSetup.py
@@ -25,7 +25,6 @@ from resources.libraries.python.honeycomb.HoneycombUtil \
import HoneycombUtil as HcUtil
from resources.libraries.python.ssh import SSH
from resources.libraries.python.topology import NodeType
-from resources.libraries.python.DUTSetup import DUTSetup
class HoneycombSetup(object):
@@ -107,6 +106,34 @@ class HoneycombSetup(object):
format(errors))
@staticmethod
+ def restart_honeycomb_on_dut(node):
+ """Restart Honeycomb on specified DUT nodes.
+
+ This keyword restarts the Honeycomb service on specified DUTs. Use the
+ keyword "Check Honeycomb Startup State" to check if the Honeycomb is up
+ and running.
+
+ :param node: Node to restart Honeycomb on.
+ :type node: dict
+ :raises HoneycombError: If Honeycomb fails to start.
+ """
+
+ logger.console("\n(re)Starting Honeycomb service ...")
+
+ cmd = "sudo service honeycomb restart"
+
+ ssh = SSH()
+ ssh.connect(node)
+ (ret_code, _, _) = ssh.exec_command_sudo(cmd)
+ if int(ret_code) != 0:
+ raise HoneycombError('Node {0} failed to restart Honeycomb.'.
+ format(node['host']))
+ else:
+ logger.info(
+ "Honeycomb service restart is in progress on node {0}".format(
+ node['host']))
+
+ @staticmethod
def check_honeycomb_startup_state(*nodes):
"""Check state of Honeycomb service during startup on specified nodes.
@@ -130,8 +157,14 @@ class HoneycombSetup(object):
for node in nodes:
if node['type'] == NodeType.DUT:
HoneycombSetup.print_ports(node)
- status_code, _ = HTTPRequest.get(node, path,
- enable_logging=False)
+ try:
+ status_code, _ = HTTPRequest.get(node, path,
+ enable_logging=False)
+ except HTTPRequestError:
+ ssh = SSH()
+ ssh.connect(node)
+ ssh.exec_command("tail -n 100 /var/log/syslog")
+ raise
if status_code == HTTPCodes.OK:
logger.info("Honeycomb on node {0} is up and running".
format(node['host']))
@@ -479,7 +512,7 @@ class HoneycombSetup(object):
for feature in features:
cmd += " {0}".format(feature)
- ret_code, _, stderr = ssh.exec_command_sudo(cmd, timeout=120)
+ ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=120)
if int(ret_code) != 0:
raise HoneycombError("Feature install did not succeed.")
@@ -590,7 +623,140 @@ class HoneycombSetup(object):
ssh = SSH()
ssh.connect(node)
cmd = "service vpp stop"
- ret_code, _, _ = ssh.exec_command_sudo(cmd)
+ ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=80)
if int(ret_code) != 0:
raise RuntimeError("Could not stop VPP service on node {0}".format(
node['host']))
+
+
+class HoneycombStartupConfig(object):
+ """Generator for Honeycomb startup configuration.
+ """
+ def __init__(self):
+ """Initializer."""
+
+ self.template = """
+ #!/bin/sh -
+ STATUS=100
+
+ while [ $STATUS -eq 100 ]
+ do
+ {java_call} -jar $(dirname $0)/{jar_filename}
+ STATUS=$?
+ echo "Honeycomb exited with status: $STATUS"
+ if [ $STATUS -eq 100 ]
+ then
+ echo "Restarting..."
+ fi
+ done
+ """
+
+ self.java_call = "{scheduler} {affinity} java {jit_mode} {params}"
+
+ self.scheduler = ""
+ self.core_affinity = ""
+ self.jit_mode = ""
+ self.params = ""
+ self.numa = ""
+
+ self.config = ""
+ self.ssh = SSH()
+
+ def apply_config(self, node):
+ """Generate configuration file /opt/honeycomb/honeycomb on the specified
+ node.
+
+ :param node: Honeycomb node.
+ :type node: dict
+ """
+
+ self.ssh.connect(node)
+ _, filename, _ = self.ssh.exec_command("ls /opt/honeycomb | grep .jar")
+
+ java_call = self.java_call.format(scheduler=self.scheduler,
+ affinity=self.core_affinity,
+ jit_mode=self.jit_mode,
+ params=self.params)
+ self.config = self.template.format(java_call=java_call,
+ jar_filename=filename)
+
+ self.ssh.connect(node)
+ cmd = "echo '{config}' > /tmp/honeycomb " \
+ "&& chmod +x /tmp/honeycomb " \
+ "&& sudo mv -f /tmp/honeycomb /opt/honeycomb".format(
+ config=self.config)
+ self.ssh.exec_command(cmd)
+
+ def set_cpu_scheduler(self, scheduler="FIFO"):
+ """Use alternate CPU scheduler.
+
+ Note: OTHER scheduler doesn't load-balance over isolcpus.
+
+ :param scheduler: CPU scheduler to use.
+ :type scheduler: str
+ """
+
+ schedulers = {"FIFO": "-f 99", # First In, First Out
+ "RR": "-r 99", # Round Robin
+ "OTHER": "-o", # Ubuntu default
+ }
+ self.scheduler = "chrt {0}".format(schedulers[scheduler])
+
+ def set_cpu_core_affinity(self, low, high=None):
+ """Set core affinity for the honeycomb process and subprocesses.
+
+ :param low: Lowest core ID number.
+ :param high: Highest core ID number. Leave empty to use a single core.
+ :type low: int
+ :type high: int
+ """
+
+ self.core_affinity = "taskset -c {low}-{high}".format(
+ low=low, high=high if high else low)
+
+ def set_jit_compiler_mode(self, jit_mode):
+ """Set running mode for Java's JIT compiler.
+
+ :param jit_mode: Desiret JIT mode.
+ :type jit_mode: str
+ """
+
+ modes = {"client": "-client", # Default
+ "server": "-server", # Higher performance but longer warmup
+ "classic": "-classic" # Disables JIT compiler
+ }
+
+ self.jit_mode = modes[jit_mode]
+
+ def set_memory_size(self, mem_min, mem_max=None):
+ """Set minimum and maximum memory use for the JVM.
+
+ :param mem_min: Minimum amount of memory (MB).
+ :param mem_max: Maximum amount of memory (MB). Default is 4 times
+ minimum value.
+ :type mem_min: int
+ :type mem_max: int
+ """
+
+ self.params += " -Xms{min}m -Xmx{max}m".format(
+ min=mem_min, max=mem_max if mem_max else mem_min*4)
+
+ def set_metaspace_size(self, mem_min, mem_max=None):
+ """Set minimum and maximum memory used for class metadata in the JVM.
+
+ :param mem_min: Minimum metaspace size (MB).
+ :param mem_max: Maximum metaspace size (MB). Defailt is 4 times
+ minimum value.
+ :type mem_min: int
+ :type mem_max: int
+ """
+
+ self.params += " -XX:MetaspaceSize={min}m " \
+ "-XX:MaxMetaspaceSize={max}m".format(
+ min=mem_min, max=mem_max if mem_max else mem_min*4)
+
+ def set_numa_optimization(self):
+ """Use optimization of memory use and garbage collection for NUMA
+ architectures."""
+
+ self.params += " -XX:+UseNUMA -XX:+UseParallelGC"
diff --git a/resources/libraries/python/honeycomb/HoneycombUtil.py b/resources/libraries/python/honeycomb/HoneycombUtil.py
index a718a242f2..24f81af7b3 100644
--- a/resources/libraries/python/honeycomb/HoneycombUtil.py
+++ b/resources/libraries/python/honeycomb/HoneycombUtil.py
@@ -399,16 +399,23 @@ class HoneycombUtil(object):
return HTTPRequest.delete(node, path)
@staticmethod
- def archive_honeycomb_log(node):
+ def archive_honeycomb_log(node, perf=False):
"""Copy honeycomb log file from DUT node to VIRL for archiving.
:param node: Honeycomb node.
+ :param perf: Alternate handling, for use with performance test topology.
:type node: dict
+ :type perf: bool
"""
ssh = SSH()
ssh.connect(node)
- cmd = "cp /var/log/honeycomb/honeycomb.log /scratch/"
-
- ssh.exec_command_sudo(cmd)
+ if not perf:
+ cmd = "cp /var/log/honeycomb/honeycomb.log /scratch/"
+ ssh.exec_command_sudo(cmd)
+ else:
+ ssh.scp(
+ ".",
+ "/var/log/honeycomb/honeycomb.log",
+ get=True)
diff --git a/resources/libraries/python/honeycomb/Performance.py b/resources/libraries/python/honeycomb/Performance.py
new file mode 100644
index 0000000000..1c6b0bc522
--- /dev/null
+++ b/resources/libraries/python/honeycomb/Performance.py
@@ -0,0 +1,129 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Implementation of keywords for testing Honeycomb performance."""
+
+from resources.libraries.python.ssh import SSH
+from resources.libraries.python.constants import Constants as Const
+from resources.libraries.python.honeycomb.HoneycombUtil import HoneycombError
+
+
+class Performance(object):
+ """Keywords used in Honeycomb performance testing."""
+
+ def __init__(self):
+ """Initializer."""
+ pass
+
+ @staticmethod
+ def configure_netconf_threads(node, threads):
+ """Set Honeycomb's Netconf thread count in configuration.
+
+ :param node: Honeycomb node.
+ :param threads: Number of threads.
+ :type node: dict
+ :type threads: int
+ :raises HoneycombError: If the operation fails.
+ """
+
+ find = "netconf-netty-threads"
+ replace = '\\"netconf-netty-threads\\": {0},'.format(threads)
+
+ argument = '"/{0}/c\\ {1}"'.format(find, replace)
+ path = "{0}/config/honeycomb.json".format(Const.REMOTE_HC_DIR)
+ command = "sed -i {0} {1}".format(argument, path)
+
+ ssh = SSH()
+ ssh.connect(node)
+ (ret_code, _, stderr) = ssh.exec_command_sudo(command)
+ if ret_code != 0:
+ raise HoneycombError("Failed to modify configuration on "
+ "node {0}, {1}".format(node, stderr))
+
+ @staticmethod
+ def run_traffic_script_on_dut(node, script, cores, reserved=2,
+ *args, **kwargs):
+ """Copy traffic script over to the specified node and execute with
+ the provided arguments.
+
+ :param node: Node in topology.
+ :param script: Name of the script to execute.
+ :param cores: Number of processor cores to use.
+ :param reserved: Number of cores reserved for other tasks. Default is 2,
+ one for system tasks and one for VPP main thread.
+ :param args: Sequential arguments for the script.
+ :param kwargs: Named arguments for the script.
+ :type node: dict
+ :type script: str
+ :type cores: int
+ :type reserved: int
+ :type args: list
+ :type kwargs: dict
+ """
+
+ path = "resources/traffic_scripts/honeycomb/{0}".format(script)
+
+ # Assemble arguments for traffic script
+ arguments = ""
+ for arg in args:
+ arguments += "{0} ".format(arg)
+
+ for key, value in kwargs.items():
+ arguments += "--{0} {1} ".format(key, value)
+
+ ssh = SSH()
+ ssh.connect(node)
+ ssh.scp(path, "/tmp")
+
+ # Use alternate scheduler, Ubuntu's default can't load-balance
+ # over isolcpus
+ scheduler = "chrt -f 99"
+ core_afi = "taskset -c {first}-{last}".format(
+ first=reserved, last=cores-1)
+
+ cmd = "{scheduler} {affinity} python /tmp/{script} {args}".format(
+ scheduler=scheduler,
+ affinity=core_afi,
+ script=script,
+ args=arguments)
+
+ ret_code, stdout, _ = ssh.exec_command_sudo(cmd, timeout=600)
+
+ ssh.exec_command("sudo pkill python ; rm /tmp/{0}".format(script))
+ if ret_code != 0:
+ raise HoneycombError("Traffic script failed to execute.")
+ for line in stdout.splitlines():
+ if "Avg. requests" in line:
+ return line
+
+ @staticmethod
+ def log_core_schedule(node, process):
+ """Determine which cores the process' threads are running on.
+
+ :param node: Honeycomb node.
+ :param process: Name of the process.
+ :type node: dict
+ :type process: str
+ """
+
+ # Get info on process and all of its children
+ cmd1 = """cat /proc/`pidof {0}`/task/*/stat""".format(process)
+
+ # Parse process ID, name and core index
+ cmd2 = """awk '{print $1" "$2" "$39}'"""
+
+ cmd = "{0} | {1}".format(cmd1, cmd2)
+
+ ssh = SSH()
+ ssh.connect(node)
+ ssh.exec_command(cmd)
diff --git a/resources/libraries/python/ssh.py b/resources/libraries/python/ssh.py
index db39a0701c..ca6d6556a1 100644
--- a/resources/libraries/python/ssh.py
+++ b/resources/libraries/python/ssh.py
@@ -307,17 +307,35 @@ class SSH(object):
"""
chan.close()
- def scp(self, local_path, remote_path):
- """Copy files from local_path to remote_path.
+ def scp(self, local_path, remote_path, get=False):
+ """Copy files from local_path to remote_path or vice versa.
connect() method has to be called first!
+
+ :param local_path: Path to local file that should be uploaded; or
+ path where to save remote file.
+ :param remote_path: Remote path where to place uploaded file; or
+ path to remote file which should be downloaded.
+ :param get: scp operation to perform. Default is put.
+ :type local_path: str
+ :type remote_path: str
+ :type get: bool
"""
- logger.trace('SCP {0} to {1}:{2}'.format(
- local_path, self._ssh.get_transport().getpeername(), remote_path))
+ if not get:
+ logger.trace('SCP {0} to {1}:{2}'.format(
+ local_path, self._ssh.get_transport().getpeername(),
+ remote_path))
+ else:
+ logger.trace('SCP {0}:{1} to {2}'.format(
+ self._ssh.get_transport().getpeername(), remote_path,
+ local_path))
# SCPCLient takes a paramiko transport as its only argument
scp = SCPClient(self._ssh.get_transport(), socket_timeout=10)
start = time()
- scp.put(local_path, remote_path)
+ if not get:
+ scp.put(local_path, remote_path)
+ else:
+ scp.get(remote_path, local_path)
scp.close()
end = time()
logger.trace('SCP took {0} seconds'.format(end-start))
diff --git a/resources/libraries/robot/honeycomb/honeycomb.robot b/resources/libraries/robot/honeycomb/honeycomb.robot
index 9017584f5a..d0f9f0fbf6 100644
--- a/resources/libraries/robot/honeycomb/honeycomb.robot
+++ b/resources/libraries/robot/honeycomb/honeycomb.robot
@@ -73,6 +73,7 @@
| | ... | *Example:*
| | ...
| | ... | \| Clear persisted Honeycomb configuration \| ${nodes['DUT1']} \|
+| | ...
| | [Arguments] | @{duts}
| | Clear persisted Honeycomb config | @{duts}
@@ -86,14 +87,34 @@
| | ...
| | ... | \| Restart Honeycomb and VPP and clear persisted configuration \
| | ... | \| ${nodes['DUT1']} \|
+| | ...
| | [Arguments] | ${node}
| | Stop Honeycomb service on DUTs | ${node}
| | Clear persisted Honeycomb configuration | ${node}
| | Setup DUT | ${node}
+| | Sleep | 10s | Wait 10sec so VPP is up for sure.
| | Configure Honeycomb service on DUTs | ${node}
| Restart Honeycomb and VPP
-| | [Documentation] | Restarts Honeycomb service and wait until it starts up.
+| | [Documentation] | Stops the Honeycomb service and verifies it is stopped.
+| | ... | Then restarts VPP, starts Honeycomb again and verifies it is running.
+| | ...
+| | ... | *Arguments:*
+| | ... | - node - information about a DUT node. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Restart Honeycomb and VPP \| ${nodes['DUT1']} \|
+| | ...
+| | [Arguments] | ${node}
+| | Stop Honeycomb service on DUTs | ${node}
+| | Setup DUT | ${node}
+| | Sleep | 10s | Wait 10sec so VPP is up for sure.
+| | Configure Honeycomb service on DUTs | ${node}
+
+| Restart Honeycomb and VPP in performance test
+| | [Documentation] | Stops Honeycomb and VPP and verifies HC is stopped.
+| | ... | Then restarts VPP, starts Honeycomb again and verifies it is running.
| | ...
| | ... | *Arguments:*
| | ... | - node - information about a DUT node. Type: dictionary
@@ -101,10 +122,15 @@
| | ... | *Example:*
| | ...
| | ... | \| Restart Honeycomb and VPP \| ${nodes['DUT1']} \|
+| | ...
| | [Arguments] | ${node}
| | Stop Honeycomb service on DUTs | ${node}
+| | Stop VPP service on DUT | ${node}
| | Setup DUT | ${node}
+| | Sleep | 10s | Wait 10sec so VPP is up for sure.
| | Configure Honeycomb service on DUTs | ${node}
+| | Wait until keyword succeeds | 2min | 16sec
+| | ... | Check honeycomb startup state | ${node}
| Archive Honeycomb log file
| | [Documentation] | Copy honeycomb.log file from Honeycomb node\
@@ -112,12 +138,14 @@
| | ...
| | ... | *Arguments:*
| | ... | - node - information about a DUT node. Type: dictionary
+| | ... | - perf - Running on performance testbed? Yes/no Type: boolean
| | ...
| | ... | *Example:*
| | ...
| | ... | \| Archive Honeycomb log file \| ${nudes['DUT1']} \|
-| | [Arguments] | ${node}
-| | Archive Honeycomb log | ${node}
+| | ...
+| | [Arguments] | ${node} | ${perf}=${False}
+| | Archive Honeycomb log | ${node} | ${perf}
| Configure ODL Client Service On DUT
| | [Documentation] | Configure and start ODL client, then repeatedly check if
@@ -131,6 +159,7 @@
| | ...
| | ... | \| Configure ODL Client Service on DUT \| ${nodes['DUT1']} \
| | ... | \| carbon-SR1 \|
+| | ...
| | [Arguments] | ${node} | ${odl_name}
| | Copy ODL Client | ${node} | ${odl_name} | /mnt/common | /tmp
| | Setup ODL Client | ${node} | /tmp
@@ -153,6 +182,7 @@
| | ... | *Example:*
| | ...
| | ... | \| Configure Honeycomb for functional testing \| ${nodes['DUT1']} \|
+| | ...
| | [Arguments] | ${node}
| | Configure Restconf binding address | ${node}
| | Configure Log Level | ${node} | TRACE
@@ -172,6 +202,7 @@
| | ... | *Example:*
| | ...
| | ... | \| Configure ODL Client for functional testing \| ${nodes['DUT1']} \|
+| | ...
| | [Arguments] | ${node}
| | ${use_odl_client}= | Get Variable Value | ${HC_ODL}
| | Run Keyword If | '${use_odl_client}' != '${NONE}'
@@ -192,6 +223,7 @@
| | ... | *Example:*
| | ...
| | ... | \| Set Up Honeycomb Functional Test Suite \| ${nodes['DUT1']} \|
+| | ...
| | [Arguments] | ${node}
| | Setup DUT | ${node}
| | Configure all TGs for traffic script
@@ -209,6 +241,7 @@
| | ... | *Example:*
| | ...
| | ... | \| Tear Down Honeycomb Functional Test Suite \| ${nodes['DUT1']} \|
+| | ...
| | [Arguments] | ${node}
| | ${use_odl_client}= | Get Variable Value | ${HC_ODL}
| | Run Keyword If | '${use_odl_client}' != '${NONE}'
@@ -227,6 +260,7 @@
| | ... | *Example:*
| | ...
| | ... | \| Enable Honeycomb Feature \| ${nodes['DUT1']} \| NSH \|
+| | ...
| | [arguments] | ${node} | ${feature}
| | Manage Honeycomb Features | ${node} | ${feature}
@@ -240,5 +274,49 @@
| | ... | *Example:*
| | ...
| | ... | \| Disable Honeycomb Feature \| ${nodes['DUT1']} \| NSH \|
+| | ...
| | [arguments] | ${node} | ${feature}
-| | Manage Honeycomb Features | ${node} | ${feature} | disable=${True} \ No newline at end of file
+| | Manage Honeycomb Features | ${node} | ${feature} | disable=${True}
+
+| Stop VPP Service on DUT
+| | [Documentation] | Stop the VPP service on the specified node.
+| | ...
+| | ... | *Arguments:*
+| | ... | - node - information about a DUT node. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Stop VPP Service on DUT \| ${nodes['DUT1']} \|
+| | ...
+| | [Arguments] | ${node}
+| | Stop VPP Service | ${node}
+
+| Honeycomb Performance Suite Setup Generic
+| | [Documentation] | Generic test suite setup for Honeycomb performance tests.
+| | ... | Performs multiple attempts to start Honeycomb+VPP stack.
+| | ...
+| | ... | *Arguments:*
+| | ... | - node - information about a DUT node. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Honeycomb Performance Suite Setup Generic \| ${nodes['DUT1']} \|
+| | ...
+| | [Arguments] | ${node}
+| | Wait until keyword succeeds | 8min | 2min
+| | ... | Restart Honeycomb and VPP in Performance test | ${node}
+
+| Honeycomb Performance Suite Teardown Generic
+| | [Documentation] | Generic test suite teardown for Honeycomb performance
+| | ... | tests. Logs CPU usage before stopping Honeycomb.
+| | ...
+| | ... | *Arguments:*
+| | ... | - node - information about a DUT node. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Honeycomb Performance Suite Teardown Generic \| ${nodes['DUT1']} \|
+| | ...
+| | [Arguments] | ${node}
+| | Log Honeycomb and VPP process distribution on cores | ${node}
+| | Stop Honeycomb service on DUTs | ${node}
diff --git a/resources/libraries/robot/honeycomb/performance.robot b/resources/libraries/robot/honeycomb/performance.robot
new file mode 100644
index 0000000000..bd906e1742
--- /dev/null
+++ b/resources/libraries/robot/honeycomb/performance.robot
@@ -0,0 +1,120 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+| Library | resources.libraries.python.honeycomb.Performance
+| Library | resources.libraries.python.InterfaceUtil
+| Resource | resources/libraries/robot/honeycomb/honeycomb.robot
+| Documentation | Keywords used in Honeycomb performance testing.
+
+*** Keywords ***
+| Configure Honeycomb Netconf threads
+| | [Documentation] | Modify thread configuration of Honeycomb's Netconf server,
+| | ... | Requires a restart of Honeycomb to take effect.
+| | ...
+| | ... | *Arguments:*
+| | ... | - node - Node to change configuration on. Type: dictionary
+| | ... | - threads - Number of threads to configure. Type: integer
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Configure Honeycomb Netconf Threads \| ${nodes[DUT1]} \| ${2} \|
+| | ...
+| | [Arguments] | ${node} | ${threads}
+| | Configure Netconf Threads | ${node} | ${threads}
+
+| Run base operational read performance trial
+| | [Documentation] | Send Netconf requests over plain TCP to obtain VPP version
+| | ... | from Honeycomb operational data.
+| | ...
+| | ... | *Arguments:*
+| | ... | - node - Node to run test on. Type: dictionary
+| | ... | - cores - Number of available processor cores. Type: integer
+| | ... | - cycles - Number of test cycles to run. Final results will\
+| | ... | be averaged across all runs. Type: integer
+| | ... | - threads - Number of threads to use for generating traffic.\
+| | ... | Type: integer
+| | ... | - requests - Number of requests to send in each thread and cycle.\
+| | ... | Type: integer
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Run base operational read performance trial \| ${nodes[DUT1]} \
+| | ... | \| ${36} \| ${1} \| ${4} \| ${10000} \|
+| | ...
+| | [Arguments] | ${node} | ${cores} | ${cycles} | ${threads} | ${requests}
+| | ${result}= | Run traffic script on DUT | ${node} | read_vpp_version.py
+| | ... | ${cores} | cycles=${cycles} | threads=${threads}
+| | ... | requests=${requests}
+| | Set Test Message | ${result}
+
+| Generate VPP Startup Configuration for Honeycomb Test on DUT
+| | [Arguments] | ${node}
+| | [Documentation] | Create VPP base startup configuration on DUT, then restart
+| | ... | VPP to apply the configuration.
+| | ...
+| | ... | *Arguments:*
+| | ... | - node - VPP node to configure. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Generate VPP Startup Configuration for Honeycomb Test on DUT \
+| | ... | \| ${nodes[DUT1]} \|
+| | ...
+| | Import Library | resources.libraries.python.VppConfigGenerator
+| | ... | WITH NAME | VPP_config
+| | Run keyword | VPP_config.Set Node | ${node}
+| | Run keyword | VPP_config.Add Unix Log
+| | Run keyword | VPP_config.Add Unix CLI Listen
+| | Run keyword | VPP_config.Add Unix Nodaemon
+| | Run keyword | VPP_config.Add CPU Main Core | ${1}
+| | Run keyword | VPP_config.Apply Config
+
+| Log Honeycomb and VPP process distribution on cores
+| | [Documentation] | Log the distribution of VPP and Honeycomb child processes
+| | ... | over the CPU cores.
+| | ...
+| | ... | *Arguments:*
+| | ... | - node - Honeycomb node. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Log Honeycomb and VPP process distribution on cores \
+| | ... | \| ${nodes[DUT1]} \|
+| | ...
+| | [Arguments] | ${node}
+| | Log Core Schedule | ${node} | vpp
+| | Log Core Schedule | ${node} | java
+
+| Generate Honeycomb startup configuration for performance test
+| | [Documentation] | Create HC startup configuration and apply to config
+| | ... | file on DUT. Requires Honeycomb restart to take effect.
+| | ...
+| | ... | *Arguments:*
+| | ... | - node - Honeycomb node. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Generate Honeycomb startup configuration for performance test \
+| | ... | \| ${nodes[DUT1]} \|
+| | ...
+| | [Arguments] | ${node} | ${cores}
+| | Import Library | resources.libraries.python.honeycomb.HoneycombSetup.HoneycombStartupConfig
+| | ... | WITH NAME | HC_config
+| | Run Keyword | HC_config.Set CPU Scheduler | FIFO
+| | Run Keyword | HC_config.Set CPU Core Affinity | ${2} | ${cores}
+| | Run Keyword | HC_config.Set JIT Compiler Mode | server
+| | Run Keyword | HC_config.Set Memory Size | ${512} | ${2048}
+| | Run Keyword | HC_config.Set Metaspace Size | ${128} | ${512}
+| | Run Keyword | HC_config.Set NUMA Optimization
+| | Run Keyword | HC_config.apply config | ${node}
diff --git a/resources/tools/scripts/download_hc_pkgs.sh b/resources/tools/scripts/download_hc_pkgs.sh
index 23e0be4b16..1bda02505c 100755
--- a/resources/tools/scripts/download_hc_pkgs.sh
+++ b/resources/tools/scripts/download_hc_pkgs.sh
@@ -30,19 +30,19 @@ if [ "${OS}" == "ubuntu1404" ]; then
OS="ubuntu.trusty.main"
PACKAGE="deb deb.md5"
CLASS="deb"
- VPP_ARTIFACTS="vpp vpp-dbg vpp-dev vpp-lib vpp-plugins vpp-api-java"
+ VPP_ARTIFACTS="vpp vpp-dbg vpp-lib vpp-plugins"
DPDK_ARTIFACTS="vpp-dpdk-dkms"
elif [ "${OS}" == "ubuntu1604" ]; then
OS="ubuntu.xenial.main"
PACKAGE="deb deb.md5"
CLASS="deb"
- VPP_ARTIFACTS="vpp vpp-dbg vpp-dev vpp-lib vpp-plugins vpp-api-java"
+ VPP_ARTIFACTS="vpp vpp-dbg vpp-lib vpp-plugins"
DPDK_ARTIFACTS="vpp-dpdk-dkms"
elif [ "${OS}" == "centos7" ]; then
OS="centos7"
PACKAGE="rpm rpm.md5"
CLASS=""
- VPP_ARTIFACTS="vpp vpp-debuginfo vpp-devel vpp-lib vpp-plugins vpp-api-java"
+ VPP_ARTIFACTS="vpp vpp-debuginfo vpp-lib vpp-plugins"
DPDK_ARTIFACTS=""
fi
diff --git a/resources/tools/scripts/topo_installation.py b/resources/tools/scripts/topo_installation.py
index 0488bdae69..5c91abbd0f 100755
--- a/resources/tools/scripts/topo_installation.py
+++ b/resources/tools/scripts/topo_installation.py
@@ -85,15 +85,34 @@ def main():
help="Packages paths to copy")
parser.add_argument("-c", "--cancel", help="Cancel installation",
action="store_true")
+ parser.add_argument("-hc", "--honeycomb", help="Include Honeycomb package.",
+ required=False, default=False)
+
args = parser.parse_args()
topology_file = args.topo
packages = args.packages
install_dir = args.directory
cancel_installation = args.cancel
+ honeycomb = args.honeycomb
work_file = open(topology_file)
topology = load(work_file.read())['nodes']
+ def fix_interrupted(package):
+ """If there are interrupted installations, clean them up."""
+
+ cmd = "dpkg -l | grep {0}".format(package)
+ ret, _, _ = ssh.exec_command(cmd)
+ if ret == 0:
+ # Try to fix interrupted installations
+ cmd = 'dpkg --configure -a'
+ stdout = ssh_no_error(ssh, cmd, sudo=True)
+ print "###TI {}".format(stdout)
+ # Try to remove installed packages
+ cmd = 'apt-get purge -y "{0}.*"'.format(package)
+ stdout = ssh_no_error(ssh, cmd, sudo=True)
+ print "###TI {}".format(stdout)
+
ssh = SSH()
for node in topology:
if topology[node]['type'] == "DUT":
@@ -106,41 +125,44 @@ def main():
stdout = ssh_ignore_error(ssh, cmd)
print "###TI {}".format(stdout)
- cmd = "dpkg -l | grep vpp"
- ret, _, _ = ssh.exec_command(cmd)
- if ret == 0:
- # Try to fix interrupted installations
- cmd = 'dpkg --configure -a'
- stdout = ssh_no_error(ssh, cmd, sudo=True)
- print "###TI {}".format(stdout)
- # Try to remove installed vpp.* packages
- cmd = 'apt-get purge -y "vpp.*"'
- stdout = ssh_no_error(ssh, cmd, sudo=True)
+ if honeycomb:
+ fix_interrupted("honeycomb")
+ # remove HC logs
+ cmd = "rm -rf /var/log/honeycomb"
+ stdout = ssh_ignore_error(ssh, cmd, sudo=True)
print "###TI {}".format(stdout)
+ fix_interrupted("vpp")
+
else:
# Create installation directory on DUT
cmd = "rm -r {0}; mkdir {0}".format(install_dir)
stdout = ssh_no_error(ssh, cmd)
print "###TI {}".format(stdout)
- # Copy packages from local path to installation dir
- for deb in packages:
- print "###TI scp: {}".format(deb)
- ssh.scp(local_path=deb, remote_path=install_dir)
-
- cmd = "dpkg -l | grep vpp"
- ret, _, _ = ssh.exec_command(cmd)
- if ret == 0:
- # Try to fix interrupted installations
- cmd = 'dpkg --configure -a'
- stdout = ssh_no_error(ssh, cmd, sudo=True)
- print "###TI {}".format(stdout)
- # Try to remove installed vpp.* packages
- cmd = 'apt-get purge -y "vpp.*"'
- stdout = ssh_no_error(ssh, cmd, sudo=True)
- print "###TI {}".format(stdout)
-
- # Installation of VPP deb packages
+ if honeycomb:
+ smd = "ls ~/honeycomb | grep .deb"
+ stdout = ssh_ignore_error(ssh, smd)
+ if "honeycomb" in stdout:
+ # If custom honeycomb packages exist, use them
+ cmd = "cp ~/honeycomb/*.deb {0}".format(install_dir)
+ stdout = ssh_no_error(ssh, cmd)
+ print "###TI {}".format(stdout)
+ else:
+ # Copy packages from local path to installation dir
+ for deb in packages:
+ print "###TI scp: {}".format(deb)
+ ssh.scp(local_path=deb, remote_path=install_dir)
+ else:
+ # Copy packages from local path to installation dir
+ for deb in packages:
+ print "###TI scp: {}".format(deb)
+ ssh.scp(local_path=deb, remote_path=install_dir)
+
+ if honeycomb:
+ fix_interrupted("honeycomb")
+ fix_interrupted("vpp")
+
+ # Installation of deb packages
cmd = "dpkg -i --force-all {}/*.deb".format(install_dir)
stdout = ssh_no_error(ssh, cmd, sudo=True)
print "###TI {}".format(stdout)
diff --git a/resources/tools/testbed-setup/playbooks/01-host-setup.yaml b/resources/tools/testbed-setup/playbooks/01-host-setup.yaml
index 6d644e5054..8fa9d66799 100644
--- a/resources/tools/testbed-setup/playbooks/01-host-setup.yaml
+++ b/resources/tools/testbed-setup/playbooks/01-host-setup.yaml
@@ -127,3 +127,5 @@
apt: name=lxc state=present
- name: Disable 80-vpp.conf
command: ln -s /dev/null /etc/sysctl.d/80-vpp.conf
+ - name: Install java
+ apt: name=openjdk-8-jdk state=present
diff --git a/resources/traffic_scripts/honeycomb/read_vpp_version.py b/resources/traffic_scripts/honeycomb/read_vpp_version.py
new file mode 100755
index 0000000000..8a861801d6
--- /dev/null
+++ b/resources/traffic_scripts/honeycomb/read_vpp_version.py
@@ -0,0 +1,360 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import socket
+import multiprocessing
+import argparse
+from time import time
+
+
+class Counter(object):
+ """Counter used for stats collection."""
+ def __init__(self, start=0):
+ """Initializer."""
+ self.lock = multiprocessing.Lock()
+ self.value = start
+
+ def increment(self, value=1):
+ """Increment counter and return the new value."""
+ self.lock.acquire()
+ val = self.value
+ try:
+ self.value += value
+ finally:
+ self.lock.release()
+ return val
+
+
+class timer(object):
+ """Timer used used during test execution."""
+ def __init__(self, verbose=False):
+ self.verbose = verbose
+
+ def __enter__(self):
+ """Start the timer."""
+ self.start = time()
+ return self
+
+ def __exit__(self, *args):
+ """Stop the timer and save current value."""
+ self.end = time()
+ self.secs = self.end - self.start
+ self.msecs = self.secs * 1000 # millisecs
+ if self.verbose:
+ print("elapsed time: {0} ms".format(self.msecs))
+
+
+class ConfigBlaster(object):
+ """Generates Netconf requests, receives replies and collects statistics."""
+
+ TIMEOUT = 10
+
+ # Hello message with capabilities list for Netconf sessions.
+ hello = u"""<hello xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"
+ message-id="m-0">
+ <capabilities>
+ <capability>urn:ietf:params:netconf:base:1.0</capability>
+ </capabilities>
+ </hello>
+ ]]>]]>"""
+
+ # RPC to retrieve VPP version (minimal processing in VPP)
+ request_template = u"""<rpc xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"
+ message-id="m-1">
+ <get>
+ <filter xmlns:ns0="urn:ietf:params:xml:ns:netconf:base:1.0"
+ ns0:type="subtree">
+ <vpp-state xmlns="urn:opendaylight:params:xml:ns:yang:vpp:management">
+ <version/>
+ </vpp-state>
+ </filter>
+ </get>
+ </rpc>
+ ]]>]]>"""
+
+ class Stats(object):
+ """Stores and further processes statistics collected by worker
+ threads during their execution.
+ """
+
+ def __init__(self):
+ """Initializer."""
+ self.ok_rqst_rate = Counter(0)
+ self.total_rqst_rate = Counter(0)
+ self.ok_rqsts = Counter(0)
+ self.total_rqsts = Counter(0)
+
+ def process_stats(self, rqst_stats, elapsed_time):
+ """Calculates the stats for request/reply throughput, and aggregates
+ statistics across all threads.
+
+ :param rqst_stats: Request statistics dictionary.
+ :param elapsed_time: Elapsed time for the test.
+ :type rqst_stats: dict
+ :type elapsed_time: int
+ :returns: Rates (requests/sec) for successfully finished requests
+ and the total number of requests.
+ :rtype: tuple
+ """
+ ok_rqsts = rqst_stats["OK"]
+ total_rqsts = sum(rqst_stats.values())
+
+ ok_rqst_rate = ok_rqsts / elapsed_time
+ total_rqst_rate = total_rqsts / elapsed_time
+
+ self.ok_rqsts.increment(ok_rqsts)
+ self.total_rqsts.increment(total_rqsts)
+
+ self.ok_rqst_rate.increment(ok_rqst_rate)
+ self.total_rqst_rate.increment(total_rqst_rate)
+
+ return ok_rqst_rate, total_rqst_rate
+
+ @property
+ def get_ok_rqst_rate(self):
+ return self.ok_rqst_rate.value
+
+ @property
+ def get_total_rqst_rate(self):
+ return self.total_rqst_rate.value
+
+ @property
+ def get_ok_rqsts(self):
+ return self.ok_rqsts.value
+
+ @property
+ def get_total_rqsts(self):
+ return self.total_rqsts.value
+
+ def __init__(self, host, port, ncycles, nthreads, nrequests):
+ """Initializer.
+
+ :param host: Target IP address.
+ :param port: Target port.
+ :param ncycles: Number of test cycles.
+ :param nthreads: Number of threads for packet generation.
+ :param nrequests: Number of requests to send per thread.
+ :type host: str
+ :type port: int
+ :type ncycles: int
+ :type nthreads: int
+ :type nrequests: int
+ """
+
+ self.host = host
+ self.port = port
+ self.ncycles = ncycles
+ self.nthreads = nthreads
+ self.nrequests = nrequests
+
+ self.stats = self.Stats()
+ self.total_ok_rqsts = 0
+
+ self.print_lock = multiprocessing.Lock()
+ self.cond = multiprocessing.Condition()
+ self.threads_done = 0
+
+ self.recv_buf = 8192
+
+ def send_request(self, sock):
+ """Send Netconf request and receive the reply.
+
+ :param sock: Socket object to use for transfer.
+ :type sock: socket object
+ :returns: Response to request or error message.
+ :rtype: str
+ """
+
+ sock.send(self.request_template)
+ try:
+ return sock.recv(self.recv_buf)
+ except socket.timeout:
+ return "timeout"
+ except socket.error:
+ return "error"
+
+ def send_requests(self, tid, stats):
+ """Read entries from the Honeycomb operational data store. This function
+ is executed by a worker thread.
+
+ :param tid: Thread ID - used to id the Blaster thread when
+ statistics for the thread are printed out.
+ :param stats: Synchronized queue object for returning execution stats.
+ :type tid: int
+ :type stats: multiprocessing.Queue
+ """
+
+ rqst_stats = {"OK": 0, "Error": 0, "Timeout": 0}
+
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.settimeout(5)
+ # Initiate connection
+ sock.connect((self.host, self.port))
+ # Send hello message
+ sock.send(self.hello)
+ # Receive hello message
+ sock.recv(self.recv_buf)
+ # Determine length of expected responses
+ self.recv_buf = len(self.send_request(sock))
+
+ with self.print_lock:
+ print("\n Thread {0}:\n"
+ " Sending {1} requests".format(tid,
+ self.nrequests))
+
+ replies = [None]*self.nrequests
+ with timer() as t:
+ for x in range(self.nrequests):
+ sts = self.send_request(sock)
+ replies[x] = sts
+
+ for reply in replies:
+ if reply == "timeout":
+ rqst_stats["Timeout"] += 1
+ elif "error" in reply:
+ rqst_stats["Error"] += 1
+ else:
+ rqst_stats["OK"] += 1
+
+ ok_rps, total_rps = self.stats.process_stats(
+ rqst_stats, t.secs)
+
+ with self.print_lock:
+ print("\n Thread {0} results (READ): ".format(tid))
+ print(" Elapsed time: {0:.2f}s,".format(t.secs))
+ print(" Requests/s: {0:.2f} OK, {1:.2f} Total".format(
+ ok_rps, total_rps))
+ print(" Stats ({Requests}, {entries}): "),
+ print(rqst_stats)
+ self.threads_done += 1
+
+ sock.close()
+
+ stats.put({"stats": rqst_stats, "time": t.secs})
+
+ with self.cond:
+ self.cond.notify_all()
+
+ def run_cycle(self, function):
+ """Runs a test cycle. Each test consists of <cycles> test cycles, where
+ <threads> worker threads are started in each test cycle. Each thread
+ reads <requests> entries using Netconf RPCs.
+
+ :param function: Function to be executed in each thread.
+ :type function: function
+ :return: None
+ """
+
+ self.total_ok_rqsts = 0
+ stats_queue = multiprocessing.Queue()
+
+ for c in range(self.ncycles):
+ self.stats = self.Stats()
+ with self.print_lock:
+ print "\nCycle {0}:".format(c)
+
+ threads = []
+ thread_stats = []
+ for i in range(self.nthreads):
+ t = multiprocessing.Process(target=function,
+ args=(i, stats_queue))
+ threads.append(t)
+ t.start()
+
+ # Wait for all threads to finish and measure the execution time
+ with timer() as t:
+ for _ in threads:
+ thread_stats.append(stats_queue.get())
+ for thread in threads:
+ thread.join()
+
+ for item in thread_stats:
+ self.stats.process_stats(item["stats"], item["time"])
+
+ with self.print_lock:
+ print("\n*** Test summary:")
+ print(" Elapsed time: {0:.2f}s".format(t.secs))
+ print(
+ " Peak requests/s: {0:.2f} OK, {1:.2f} Total".format(
+ self.stats.get_ok_rqst_rate,
+ self.stats.get_total_rqst_rate))
+ print(
+ " Avg. requests/s: {0:.2f} OK, {1:.2f} Total ({2:.2f} "
+ "of peak total)".format(
+ self.stats.get_ok_rqsts / t.secs,
+ self.stats.get_total_rqsts / t.secs,
+ (self.stats.get_total_rqsts / t.secs * 100) /
+ self.stats.get_total_rqst_rate))
+
+ self.total_ok_rqsts += self.stats.get_ok_rqsts
+
+ self.threads_done = 0
+
+ def add_blaster(self):
+ """Run the test."""
+ self.run_cycle(self.send_requests)
+
+ @property
+ def get_ok_rqsts(self):
+ return self.total_ok_rqsts
+
+
+def create_arguments_parser():
+ """Creates argument parser for test script.
+ Shorthand to arg parser on library level in order to access and
+ eventually enhance in ancestors.
+
+ :returns: argument parser supporting arguments and parameters
+ :rtype: argparse.ArgumentParser
+ """
+ my_parser = argparse.ArgumentParser(
+ description="entry reading performance test: Reads entries from "
+ "the config tree, as specified by optional parameters.")
+
+ my_parser.add_argument(
+ "--host", default="127.0.0.1",
+ help="Host where odl controller is running (default is 127.0.0.1).")
+ my_parser.add_argument(
+ "--port", default=7777,
+ help="Port on which Honeycomb's Netconf is listening"
+ " (default is 7777 for TCP)")
+ my_parser.add_argument(
+ "--cycles", type=int, default=1,
+ help="Number of entry read cycles; default 1. <THREADS> worker threads "
+ "are started in each cycle and the cycle ends when all threads "
+ "finish. Another cycle is started when the previous cycle "
+ "is finished.")
+ my_parser.add_argument(
+ "--threads", type=int, default=1,
+ help="Number of request worker threads to start in each cycle; "
+ "default=1. Each thread will read <entries> entries.")
+ my_parser.add_argument(
+ "--requests", type=int, default=10,
+ help="Number of requests that will be made by each worker thread "
+ "in each cycle; default 10")
+
+ return my_parser
+
+if __name__ == "__main__":
+
+ parser = create_arguments_parser()
+ in_args = parser.parse_args()
+
+ fct = ConfigBlaster(in_args.host, in_args.port, in_args.cycles,
+ in_args.threads, in_args.requests)
+
+ # Run through <cycles>, where <threads> are started in each cycle and
+ # <entries> are added from each thread
+ fct.add_blaster()
+
+ print " Successful reads: {0}\n".format(fct.get_ok_rqsts)