aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries/python/honeycomb
diff options
context:
space:
mode:
Diffstat (limited to 'resources/libraries/python/honeycomb')
-rw-r--r--resources/libraries/python/honeycomb/HoneycombSetup.py176
-rw-r--r--resources/libraries/python/honeycomb/HoneycombUtil.py15
-rw-r--r--resources/libraries/python/honeycomb/Performance.py129
3 files changed, 311 insertions, 9 deletions
diff --git a/resources/libraries/python/honeycomb/HoneycombSetup.py b/resources/libraries/python/honeycomb/HoneycombSetup.py
index 53130f405b..13b8b971b6 100644
--- a/resources/libraries/python/honeycomb/HoneycombSetup.py
+++ b/resources/libraries/python/honeycomb/HoneycombSetup.py
@@ -25,7 +25,6 @@ from resources.libraries.python.honeycomb.HoneycombUtil \
import HoneycombUtil as HcUtil
from resources.libraries.python.ssh import SSH
from resources.libraries.python.topology import NodeType
-from resources.libraries.python.DUTSetup import DUTSetup
class HoneycombSetup(object):
@@ -107,6 +106,34 @@ class HoneycombSetup(object):
format(errors))
@staticmethod
+ def restart_honeycomb_on_dut(node):
+ """Restart Honeycomb on specified DUT nodes.
+
+ This keyword restarts the Honeycomb service on specified DUTs. Use the
+ keyword "Check Honeycomb Startup State" to check if the Honeycomb is up
+ and running.
+
+ :param node: Node to restart Honeycomb on.
+ :type node: dict
+ :raises HoneycombError: If Honeycomb fails to start.
+ """
+
+ logger.console("\n(re)Starting Honeycomb service ...")
+
+ cmd = "sudo service honeycomb restart"
+
+ ssh = SSH()
+ ssh.connect(node)
+ (ret_code, _, _) = ssh.exec_command_sudo(cmd)
+ if int(ret_code) != 0:
+ raise HoneycombError('Node {0} failed to restart Honeycomb.'.
+ format(node['host']))
+ else:
+ logger.info(
+ "Honeycomb service restart is in progress on node {0}".format(
+ node['host']))
+
+ @staticmethod
def check_honeycomb_startup_state(*nodes):
"""Check state of Honeycomb service during startup on specified nodes.
@@ -130,8 +157,14 @@ class HoneycombSetup(object):
for node in nodes:
if node['type'] == NodeType.DUT:
HoneycombSetup.print_ports(node)
- status_code, _ = HTTPRequest.get(node, path,
- enable_logging=False)
+ try:
+ status_code, _ = HTTPRequest.get(node, path,
+ enable_logging=False)
+ except HTTPRequestError:
+ ssh = SSH()
+ ssh.connect(node)
+ ssh.exec_command("tail -n 100 /var/log/syslog")
+ raise
if status_code == HTTPCodes.OK:
logger.info("Honeycomb on node {0} is up and running".
format(node['host']))
@@ -479,7 +512,7 @@ class HoneycombSetup(object):
for feature in features:
cmd += " {0}".format(feature)
- ret_code, _, stderr = ssh.exec_command_sudo(cmd, timeout=120)
+ ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=120)
if int(ret_code) != 0:
raise HoneycombError("Feature install did not succeed.")
@@ -590,7 +623,140 @@ class HoneycombSetup(object):
ssh = SSH()
ssh.connect(node)
cmd = "service vpp stop"
- ret_code, _, _ = ssh.exec_command_sudo(cmd)
+ ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=80)
if int(ret_code) != 0:
raise RuntimeError("Could not stop VPP service on node {0}".format(
node['host']))
+
+
+class HoneycombStartupConfig(object):
+ """Generator for Honeycomb startup configuration.
+ """
+ def __init__(self):
+ """Initializer."""
+
+ self.template = """
+ #!/bin/sh -
+ STATUS=100
+
+ while [ $STATUS -eq 100 ]
+ do
+ {java_call} -jar $(dirname $0)/{jar_filename}
+ STATUS=$?
+ echo "Honeycomb exited with status: $STATUS"
+ if [ $STATUS -eq 100 ]
+ then
+ echo "Restarting..."
+ fi
+ done
+ """
+
+ self.java_call = "{scheduler} {affinity} java {jit_mode} {params}"
+
+ self.scheduler = ""
+ self.core_affinity = ""
+ self.jit_mode = ""
+ self.params = ""
+ self.numa = ""
+
+ self.config = ""
+ self.ssh = SSH()
+
+ def apply_config(self, node):
+ """Generate configuration file /opt/honeycomb/honeycomb on the specified
+ node.
+
+ :param node: Honeycomb node.
+ :type node: dict
+ """
+
+ self.ssh.connect(node)
+ _, filename, _ = self.ssh.exec_command("ls /opt/honeycomb | grep .jar")
+
+ java_call = self.java_call.format(scheduler=self.scheduler,
+ affinity=self.core_affinity,
+ jit_mode=self.jit_mode,
+ params=self.params)
+ self.config = self.template.format(java_call=java_call,
+ jar_filename=filename)
+
+ self.ssh.connect(node)
+ cmd = "echo '{config}' > /tmp/honeycomb " \
+ "&& chmod +x /tmp/honeycomb " \
+ "&& sudo mv -f /tmp/honeycomb /opt/honeycomb".format(
+ config=self.config)
+ self.ssh.exec_command(cmd)
+
+ def set_cpu_scheduler(self, scheduler="FIFO"):
+ """Use alternate CPU scheduler.
+
+ Note: OTHER scheduler doesn't load-balance over isolcpus.
+
+ :param scheduler: CPU scheduler to use.
+ :type scheduler: str
+ """
+
+ schedulers = {"FIFO": "-f 99", # First In, First Out
+ "RR": "-r 99", # Round Robin
+ "OTHER": "-o", # Ubuntu default
+ }
+ self.scheduler = "chrt {0}".format(schedulers[scheduler])
+
+ def set_cpu_core_affinity(self, low, high=None):
+ """Set core affinity for the honeycomb process and subprocesses.
+
+ :param low: Lowest core ID number.
+ :param high: Highest core ID number. Leave empty to use a single core.
+ :type low: int
+ :type high: int
+ """
+
+ self.core_affinity = "taskset -c {low}-{high}".format(
+ low=low, high=high if high else low)
+
+ def set_jit_compiler_mode(self, jit_mode):
+ """Set running mode for Java's JIT compiler.
+
+ :param jit_mode: Desiret JIT mode.
+ :type jit_mode: str
+ """
+
+ modes = {"client": "-client", # Default
+ "server": "-server", # Higher performance but longer warmup
+ "classic": "-classic" # Disables JIT compiler
+ }
+
+ self.jit_mode = modes[jit_mode]
+
+ def set_memory_size(self, mem_min, mem_max=None):
+ """Set minimum and maximum memory use for the JVM.
+
+ :param mem_min: Minimum amount of memory (MB).
+ :param mem_max: Maximum amount of memory (MB). Default is 4 times
+ minimum value.
+ :type mem_min: int
+ :type mem_max: int
+ """
+
+ self.params += " -Xms{min}m -Xmx{max}m".format(
+ min=mem_min, max=mem_max if mem_max else mem_min*4)
+
+ def set_metaspace_size(self, mem_min, mem_max=None):
+ """Set minimum and maximum memory used for class metadata in the JVM.
+
+ :param mem_min: Minimum metaspace size (MB).
+ :param mem_max: Maximum metaspace size (MB). Defailt is 4 times
+ minimum value.
+ :type mem_min: int
+ :type mem_max: int
+ """
+
+ self.params += " -XX:MetaspaceSize={min}m " \
+ "-XX:MaxMetaspaceSize={max}m".format(
+ min=mem_min, max=mem_max if mem_max else mem_min*4)
+
+ def set_numa_optimization(self):
+ """Use optimization of memory use and garbage collection for NUMA
+ architectures."""
+
+ self.params += " -XX:+UseNUMA -XX:+UseParallelGC"
diff --git a/resources/libraries/python/honeycomb/HoneycombUtil.py b/resources/libraries/python/honeycomb/HoneycombUtil.py
index a718a242f2..24f81af7b3 100644
--- a/resources/libraries/python/honeycomb/HoneycombUtil.py
+++ b/resources/libraries/python/honeycomb/HoneycombUtil.py
@@ -399,16 +399,23 @@ class HoneycombUtil(object):
return HTTPRequest.delete(node, path)
@staticmethod
- def archive_honeycomb_log(node):
+ def archive_honeycomb_log(node, perf=False):
"""Copy honeycomb log file from DUT node to VIRL for archiving.
:param node: Honeycomb node.
+ :param perf: Alternate handling, for use with performance test topology.
:type node: dict
+ :type perf: bool
"""
ssh = SSH()
ssh.connect(node)
- cmd = "cp /var/log/honeycomb/honeycomb.log /scratch/"
-
- ssh.exec_command_sudo(cmd)
+ if not perf:
+ cmd = "cp /var/log/honeycomb/honeycomb.log /scratch/"
+ ssh.exec_command_sudo(cmd)
+ else:
+ ssh.scp(
+ ".",
+ "/var/log/honeycomb/honeycomb.log",
+ get=True)
diff --git a/resources/libraries/python/honeycomb/Performance.py b/resources/libraries/python/honeycomb/Performance.py
new file mode 100644
index 0000000000..1c6b0bc522
--- /dev/null
+++ b/resources/libraries/python/honeycomb/Performance.py
@@ -0,0 +1,129 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Implementation of keywords for testing Honeycomb performance."""
+
+from resources.libraries.python.ssh import SSH
+from resources.libraries.python.constants import Constants as Const
+from resources.libraries.python.honeycomb.HoneycombUtil import HoneycombError
+
+
+class Performance(object):
+ """Keywords used in Honeycomb performance testing."""
+
+ def __init__(self):
+ """Initializer."""
+ pass
+
+ @staticmethod
+ def configure_netconf_threads(node, threads):
+ """Set Honeycomb's Netconf thread count in configuration.
+
+ :param node: Honeycomb node.
+ :param threads: Number of threads.
+ :type node: dict
+ :type threads: int
+ :raises HoneycombError: If the operation fails.
+ """
+
+ find = "netconf-netty-threads"
+ replace = '\\"netconf-netty-threads\\": {0},'.format(threads)
+
+ argument = '"/{0}/c\\ {1}"'.format(find, replace)
+ path = "{0}/config/honeycomb.json".format(Const.REMOTE_HC_DIR)
+ command = "sed -i {0} {1}".format(argument, path)
+
+ ssh = SSH()
+ ssh.connect(node)
+ (ret_code, _, stderr) = ssh.exec_command_sudo(command)
+ if ret_code != 0:
+ raise HoneycombError("Failed to modify configuration on "
+ "node {0}, {1}".format(node, stderr))
+
+ @staticmethod
+ def run_traffic_script_on_dut(node, script, cores, reserved=2,
+ *args, **kwargs):
+ """Copy traffic script over to the specified node and execute with
+ the provided arguments.
+
+ :param node: Node in topology.
+ :param script: Name of the script to execute.
+ :param cores: Number of processor cores to use.
+ :param reserved: Number of cores reserved for other tasks. Default is 2,
+ one for system tasks and one for VPP main thread.
+ :param args: Sequential arguments for the script.
+ :param kwargs: Named arguments for the script.
+ :type node: dict
+ :type script: str
+ :type cores: int
+ :type reserved: int
+ :type args: list
+ :type kwargs: dict
+ """
+
+ path = "resources/traffic_scripts/honeycomb/{0}".format(script)
+
+ # Assemble arguments for traffic script
+ arguments = ""
+ for arg in args:
+ arguments += "{0} ".format(arg)
+
+ for key, value in kwargs.items():
+ arguments += "--{0} {1} ".format(key, value)
+
+ ssh = SSH()
+ ssh.connect(node)
+ ssh.scp(path, "/tmp")
+
+ # Use alternate scheduler, Ubuntu's default can't load-balance
+ # over isolcpus
+ scheduler = "chrt -f 99"
+ core_afi = "taskset -c {first}-{last}".format(
+ first=reserved, last=cores-1)
+
+ cmd = "{scheduler} {affinity} python /tmp/{script} {args}".format(
+ scheduler=scheduler,
+ affinity=core_afi,
+ script=script,
+ args=arguments)
+
+ ret_code, stdout, _ = ssh.exec_command_sudo(cmd, timeout=600)
+
+ ssh.exec_command("sudo pkill python ; rm /tmp/{0}".format(script))
+ if ret_code != 0:
+ raise HoneycombError("Traffic script failed to execute.")
+ for line in stdout.splitlines():
+ if "Avg. requests" in line:
+ return line
+
+ @staticmethod
+ def log_core_schedule(node, process):
+ """Determine which cores the process' threads are running on.
+
+ :param node: Honeycomb node.
+ :param process: Name of the process.
+ :type node: dict
+ :type process: str
+ """
+
+ # Get info on process and all of its children
+ cmd1 = """cat /proc/`pidof {0}`/task/*/stat""".format(process)
+
+ # Parse process ID, name and core index
+ cmd2 = """awk '{print $1" "$2" "$39}'"""
+
+ cmd = "{0} | {1}".format(cmd1, cmd2)
+
+ ssh = SSH()
+ ssh.connect(node)
+ ssh.exec_command(cmd)