aboutsummaryrefslogtreecommitdiffstats
path: root/csit.infra.dash/app/cdash/utils
diff options
context:
space:
mode:
Diffstat (limited to 'csit.infra.dash/app/cdash/utils')
-rw-r--r--csit.infra.dash/app/cdash/utils/__init__.py12
-rw-r--r--csit.infra.dash/app/cdash/utils/anomalies.py69
-rw-r--r--csit.infra.dash/app/cdash/utils/constants.py469
-rw-r--r--csit.infra.dash/app/cdash/utils/control_panel.py87
-rw-r--r--csit.infra.dash/app/cdash/utils/telemetry_data.py362
-rw-r--r--csit.infra.dash/app/cdash/utils/tooltips.yaml51
-rw-r--r--csit.infra.dash/app/cdash/utils/trigger.py65
-rw-r--r--csit.infra.dash/app/cdash/utils/url_processing.py99
-rw-r--r--csit.infra.dash/app/cdash/utils/utils.py895
9 files changed, 2109 insertions, 0 deletions
diff --git a/csit.infra.dash/app/cdash/utils/__init__.py b/csit.infra.dash/app/cdash/utils/__init__.py
new file mode 100644
index 0000000000..c6a5f639fe
--- /dev/null
+++ b/csit.infra.dash/app/cdash/utils/__init__.py
@@ -0,0 +1,12 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/csit.infra.dash/app/cdash/utils/anomalies.py b/csit.infra.dash/app/cdash/utils/anomalies.py
new file mode 100644
index 0000000000..3deece2e04
--- /dev/null
+++ b/csit.infra.dash/app/cdash/utils/anomalies.py
@@ -0,0 +1,69 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Functions used by Dash applications to detect anomalies.
+"""
+
+from numpy import isnan
+
+from ..jumpavg import classify
+
+
+def classify_anomalies(data):
+ """Process the data and return anomalies and trending values.
+
+ Gather data into groups with average as trend value.
+ Decorate values within groups to be normal,
+ the first value of changed average as a regression, or a progression.
+
+ :param data: Full data set with unavailable samples replaced by nan.
+ :type data: OrderedDict
+ :returns: Classification and trend values
+ :rtype: 3-tuple, list of strings, list of floats and list of floats
+ """
+ # NaN means something went wrong.
+ # Use 0.0 to cause that being reported as a severe regression.
+ bare_data = [0.0 if isnan(sample) else sample for sample in data.values()]
+ # TODO: Make BitCountingGroupList a subclass of list again?
+ group_list = classify(bare_data).group_list
+ group_list.reverse() # Just to use .pop() for FIFO.
+ classification = list()
+ avgs = list()
+ stdevs = list()
+ active_group = None
+ values_left = 0
+ avg = 0.0
+ stdv = 0.0
+ for sample in data.values():
+ if isnan(sample):
+ classification.append("outlier")
+ avgs.append(sample)
+ stdevs.append(sample)
+ continue
+ if values_left < 1 or active_group is None:
+ values_left = 0
+ while values_left < 1: # Ignore empty groups (should not happen).
+ active_group = group_list.pop()
+ values_left = len(active_group.run_list)
+ avg = active_group.stats.avg
+ stdv = active_group.stats.stdev
+ classification.append(active_group.comment)
+ avgs.append(avg)
+ stdevs.append(stdv)
+ values_left -= 1
+ continue
+ classification.append("normal")
+ avgs.append(avg)
+ stdevs.append(stdv)
+ values_left -= 1
+ return classification, avgs, stdevs
diff --git a/csit.infra.dash/app/cdash/utils/constants.py b/csit.infra.dash/app/cdash/utils/constants.py
new file mode 100644
index 0000000000..840766488a
--- /dev/null
+++ b/csit.infra.dash/app/cdash/utils/constants.py
@@ -0,0 +1,469 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Constants used in CDash.
+
+"Constant" means a value that keeps its value since initialization. The value
+does not need to be hard coded here, but can be read from environment variables.
+"""
+
+import logging
+
+from dash import html
+
+
+class Constants:
+ """Constants used in CDash.
+ """
+
+ ############################################################################
+ # General, application wide constants.
+
+ # Logging settings.
+ LOG_LEVEL = logging.INFO
+ LOG_FORMAT = "%(asctime)s: %(levelname)s: %(message)s"
+ LOG_DATE_FORMAT = "%Y/%m/%d %H:%M:%S"
+
+ # The application title.
+ TITLE = "FD.io CSIT"
+ BRAND = "CSIT-Dash"
+
+ # The application description.
+ DESCRIPTION = "Performance Dashboard"
+
+ # External stylesheets.
+ EXTERNAL_STYLESHEETS = ["/static/dist/css/bootstrap.css", ]
+
+ # URL to Jenkins
+ URL_JENKINS = "https://jenkins.fd.io/job/"
+
+ # URL to logs
+ URL_LOGS = "https://s3-logs.fd.io/vex-yul-rot-jenkins-1/"
+
+ # URL to the documentation
+ URL_DOC_TRENDING = "https://csit.fd.io/cdocs/methodology/trending/analysis/"
+ URL_DOC_REL_NOTES = "https://csit.fd.io/cdocs/release_notes/current/"
+
+ # Path and name of the file specifying the HTML layout of the dash
+ # application.
+ MAIN_HTML_LAYOUT_FILE = "base_layout.jinja2"
+
+ # Path and name of the file specifying the HTML layout of the dash
+ # application.
+ HTML_LAYOUT_FILE = "cdash/templates/dash_layout.jinja2"
+
+ # Application root.
+ APPLICATIN_ROOT = "/"
+
+ # Data to be downloaded from the parquets specification file.
+ DATA_SPEC_FILE = "cdash/data/data.yaml"
+
+ # Path to schemas to use when reading data from the parquet.
+ PATH_TO_SCHEMAS = "cdash/data/_metadata/"
+
+ # The file with tooltips.
+ TOOLTIP_FILE = "cdash/utils/tooltips.yaml"
+
+ # Maximal value of TIME_PERIOD for data read from the parquets in days.
+ # Do not change without a good reason.
+ MAX_TIME_PERIOD = 250
+
+ # It defines the time period for data read from the parquets in days from
+ # now back to the past.
+ # TIME_PERIOD = None - means all data (max MAX_TIME_PERIOD days) is read.
+ # TIME_PERIOD = MAX_TIME_PERIOD - is the default value
+ TIME_PERIOD = MAX_TIME_PERIOD # [days]
+
+ ############################################################################
+ # General, application wide, layout affecting constants.
+
+ # Add a time delay (in ms) to the spinner being shown
+ SPINNER_DELAY = 500
+
+ # If True, clear all inputs in control panel when button "ADD SELECTED" is
+ # pressed.
+ CLEAR_ALL_INPUTS = False
+
+ # The element is disabled.
+ STYLE_DISABLED = {"visibility": "hidden"}
+
+ # The element is enabled and visible.
+ STYLE_ENABLED = {"visibility": "visible"}
+
+ # The element is not displayed.
+ STYLE_DONT_DISPLAY = {"display": "none"}
+
+ # The element is displaed.
+ STYLE_DISPLAY = {"display": "flex"}
+
+ # Checklist "All" is disabled.
+ CL_ALL_DISABLED = [
+ {
+ "label": "All",
+ "value": "all",
+ "disabled": True
+ }
+ ]
+
+ # Checklist "All" is enabled, visible and unchecked.
+ CL_ALL_ENABLED = [
+ {
+ "label": "All",
+ "value": "all",
+ "disabled": False
+ }
+ ]
+
+ # Placeholder for any element in the layout.
+ PLACEHOLDER = html.Nobr("")
+
+ # List of drivers used in CSIT.
+ DRIVERS = ("avf", "af-xdp", "rdma", "dpdk", "mlx5")
+
+ # Labels for input elements (dropdowns, ...).
+ LABELS = {
+ "dpdk": "DPDK",
+ "container_memif": "LXC/DRC Container Memif",
+ "crypto": "IPSec IPv4 Routing",
+ "gso": "GSO",
+ "ip4": "IPv4 Routing",
+ "ip4_tunnels": "IPv4 Tunnels",
+ "ip6": "IPv6 Routing",
+ "ip6_tunnels": "IPv6 Tunnels",
+ "l2": "L2 Ethernet Switching",
+ "lb": "Load Balancer",
+ "srv6": "SRv6 Routing",
+ "vm_vhost": "VMs vhost-user",
+ "nfv_density.dcr_memif.chain_ipsec": "CNF Service Chains Routing IPSec",
+ "nfv_density.vm_vhost.chain_dot1qip4vxlan":"VNF Service Chains Tunnels",
+ "nfv_density.vm_vhost.chain": "VNF Service Chains Routing",
+ "nfv_density.dcr_memif.pipeline": "CNF Service Pipelines Routing",
+ "nfv_density.dcr_memif.chain": "CNF Service Chains Routing",
+ "hoststack": "Hoststack",
+ "flow": "Flow",
+ "l2bd": "L2 Bridge Domain",
+ "crypto.ethip4": "IPSec IPv4 Routing",
+ "crypto.ethip6": "IPSec IPv6 Routing",
+ "interfaces": "Interfaces",
+ "ip4_tunnels.lisp": "IPv4 Tunnels LISP",
+ "ip6_tunnels.lisp": "IPv6 Tunnels LISP",
+ "l2patch": "L2 Patch",
+ "l2xc": "L2 Cross Connect",
+ "vm_vhost.ethip4": "VMs vhost-user IPv4 Routing",
+ "vm_vhost.ethip6": "VMs vhost-user IPv6 Routing"
+ }
+
+ # URL style.
+ URL_STYLE = {
+ "background-color": "#d2ebf5",
+ "border-color": "#bce1f1",
+ "color": "#135d7c"
+ }
+
+ ############################################################################
+ # General, normalization constants.
+
+ NORM_FREQUENCY = 2.0 # [GHz]
+ FREQUENCY = { # [GHz]
+ "1n-aws": 3.400,
+ "2n-aws": 3.400,
+ "2n-c6in": 3.500,
+ "2n-clx": 2.300,
+ "2n-icx": 2.600,
+ "2n-spr": 2.800,
+ "2n-tx2": 2.500,
+ "2n-zn2": 2.900,
+ "3n-alt": 3.000,
+ "3n-icx": 2.600,
+ "3n-icxd": 2.000,
+ "3n-snr": 2.200,
+ "3n-tsh": 2.200,
+ "3na-spr": 2.800,
+ "3nb-spr": 2.800
+ }
+
+ ############################################################################
+ # General, plots and tables constants.
+
+ PLOT_COLORS = (
+ "#1A1110", "#DA2647", "#214FC6", "#01786F", "#BD8260", "#FFD12A",
+ "#A6E7FF", "#738276", "#C95A49", "#FC5A8D", "#CEC8EF", "#391285",
+ "#6F2DA8", "#FF878D", "#45A27D", "#FFD0B9", "#FD5240", "#DB91EF",
+ "#44D7A8", "#4F86F7", "#84DE02", "#FFCFF1", "#614051"
+ )
+
+ # Trending, anomalies.
+ ANOMALY_COLOR = {
+ "regression": 0.0,
+ "normal": 0.5,
+ "progression": 1.0
+ }
+
+ COLORSCALE_TPUT = [
+ [0.00, "red"],
+ [0.33, "red"],
+ [0.33, "white"],
+ [0.66, "white"],
+ [0.66, "green"],
+ [1.00, "green"]
+ ]
+
+ TICK_TEXT_TPUT = ["Regression", "Normal", "Progression"]
+
+ COLORSCALE_LAT = [
+ [0.00, "green"],
+ [0.33, "green"],
+ [0.33, "white"],
+ [0.66, "white"],
+ [0.66, "red"],
+ [1.00, "red"]
+ ]
+
+ TICK_TEXT_LAT = ["Progression", "Normal", "Regression"]
+
+ # Access to the results.
+ VALUE = {
+ "mrr": "result_receive_rate_rate_avg",
+ "ndr": "result_ndr_lower_rate_value",
+ "pdr": "result_pdr_lower_rate_value",
+ "mrr-bandwidth": "result_receive_rate_bandwidth_avg",
+ "ndr-bandwidth": "result_ndr_lower_bandwidth_value",
+ "pdr-bandwidth": "result_pdr_lower_bandwidth_value",
+ "latency": "result_latency_forward_pdr_50_avg",
+ "hoststack-cps": "result_rate_value",
+ "hoststack-rps": "result_rate_value",
+ "hoststack-cps-bandwidth": "result_bandwidth_value",
+ "hoststack-rps-bandwidth": "result_bandwidth_value",
+ "hoststack-bps": "result_bandwidth_value",
+ "hoststack-latency": "result_latency_value",
+ "soak": "result_critical_rate_lower_rate_value",
+ "soak-bandwidth": "result_critical_rate_lower_bandwidth_value"
+ }
+
+ VALUE_ITER = {
+ "mrr": "result_receive_rate_rate_values",
+ "ndr": "result_ndr_lower_rate_value",
+ "pdr": "result_pdr_lower_rate_value",
+ "mrr-bandwidth": "result_receive_rate_bandwidth_avg",
+ "ndr-bandwidth": "result_ndr_lower_bandwidth_value",
+ "pdr-bandwidth": "result_pdr_lower_bandwidth_value",
+ "latency": "result_latency_forward_pdr_50_avg",
+ "hoststack-cps": "result_rate_value",
+ "hoststack-rps": "result_rate_value",
+ "hoststack-cps-bandwidth": "result_bandwidth_value",
+ "hoststack-rps-bandwidth": "result_bandwidth_value",
+ "hoststack-bps": "result_bandwidth_value",
+ "hoststack-latency": "result_latency_value",
+ "soak": "result_critical_rate_lower_rate_value",
+ "soak-bandwidth": "result_critical_rate_lower_bandwidth_value"
+ }
+
+ UNIT = {
+ "mrr": "result_receive_rate_rate_unit",
+ "ndr": "result_ndr_lower_rate_unit",
+ "pdr": "result_pdr_lower_rate_unit",
+ "mrr-bandwidth": "result_receive_rate_bandwidth_unit",
+ "ndr-bandwidth": "result_ndr_lower_bandwidth_unit",
+ "pdr-bandwidth": "result_pdr_lower_bandwidth_unit",
+ "latency": "result_latency_forward_pdr_50_unit",
+ "hoststack-cps": "result_rate_unit",
+ "hoststack-rps": "result_rate_unit",
+ "hoststack-cps-bandwidth": "result_bandwidth_unit",
+ "hoststack-rps-bandwidth": "result_bandwidth_unit",
+ "hoststack-bps": "result_bandwidth_unit",
+ "hoststack-latency": "result_latency_unit",
+ "soak": "result_critical_rate_lower_rate_unit",
+ "soak-bandwidth": "result_critical_rate_lower_bandwidth_unit"
+ }
+
+ TESTS_WITH_BANDWIDTH = (
+ "ndr",
+ "pdr",
+ "mrr",
+ "hoststack-cps",
+ "hoststack-rps",
+ "soak"
+ )
+ TESTS_WITH_LATENCY = (
+ "pdr",
+ "hoststack-cps",
+ "hoststack-rps"
+ )
+
+ # Latencies.
+ LAT_HDRH = ( # Do not change the order
+ "result_latency_forward_pdr_0_hdrh",
+ "result_latency_reverse_pdr_0_hdrh",
+ "result_latency_forward_pdr_10_hdrh",
+ "result_latency_reverse_pdr_10_hdrh",
+ "result_latency_forward_pdr_50_hdrh",
+ "result_latency_reverse_pdr_50_hdrh",
+ "result_latency_forward_pdr_90_hdrh",
+ "result_latency_reverse_pdr_90_hdrh",
+ )
+
+ # This value depends on latency stream rate (9001 pps) and duration (5s).
+ # Keep it slightly higher to ensure rounding errors to not remove tick mark.
+ PERCENTILE_MAX = 99.999501
+
+ GRAPH_LAT_HDRH_DESC = {
+ "result_latency_forward_pdr_0_hdrh": "No-load.",
+ "result_latency_reverse_pdr_0_hdrh": "No-load.",
+ "result_latency_forward_pdr_10_hdrh": "Low-load, 10% PDR.",
+ "result_latency_reverse_pdr_10_hdrh": "Low-load, 10% PDR.",
+ "result_latency_forward_pdr_50_hdrh": "Mid-load, 50% PDR.",
+ "result_latency_reverse_pdr_50_hdrh": "Mid-load, 50% PDR.",
+ "result_latency_forward_pdr_90_hdrh": "High-load, 90% PDR.",
+ "result_latency_reverse_pdr_90_hdrh": "High-load, 90% PDR."
+ }
+
+ # Operators used to filter data in comparison tables.
+ OPERATORS = (
+ ("contains ", ),
+ ("lt ", "<"),
+ ("gt ", ">"),
+ ("eq ", "="),
+ ("ge ", ">="),
+ ("le ", "<="),
+ ("ne ", "!="),
+ ("datestartswith ", )
+ )
+
+ ############################################################################
+ # News.
+
+ # The title.
+ NEWS_TITLE = "Failures and Anomalies"
+
+ # The pathname prefix for the application.
+ NEWS_ROUTES_PATHNAME_PREFIX = "/news/"
+
+ # Time period for regressions and progressions.
+ NEWS_TIME_PERIOD = TIME_PERIOD # [days]
+
+ # Time periods for summary tables.
+ NEWS_LAST = 1 # [days]
+ NEWS_SHORT = 7 # [days]
+ NEWS_LONG = NEWS_TIME_PERIOD # [days]
+
+ ############################################################################
+ # Report.
+
+ # The title.
+ REPORT_TITLE = "Per Release Performance"
+
+ # The pathname prefix for the application.
+ REPORT_ROUTES_PATHNAME_PREFIX = "/report/"
+
+ # Layout of plot.ly graphs.
+ REPORT_GRAPH_LAYOUT_FILE = "cdash/report/layout.yaml"
+
+ # Default name of downloaded file with selected data.
+ REPORT_DOWNLOAD_FILE_NAME = "iterative_data.csv"
+
+ ############################################################################
+ # Comparisons.
+
+ # The title.
+ COMP_TITLE = "Per Release Performance Comparisons"
+
+ # The pathname prefix for the application.
+ COMP_ROUTES_PATHNAME_PREFIX = "/comparisons/"
+
+ # Default name of downloaded file with selected data.
+ COMP_DOWNLOAD_FILE_NAME = "comparison_data.csv"
+
+ # This parameter specifies the method to use for estimating the percentile.
+ # Possible values:
+ # - inverted_cdf
+ # - averaged_inverted_cdf
+ # - closest_observation
+ # - interpolated_inverted_cdf
+ # - hazen
+ # - weibull
+ # - linear (default)
+ # - median_unbiased
+ # - normal_unbiased
+ COMP_PERCENTILE_METHOD = "linear"
+
+ # Extreme or mild outlier?
+ OUTLIER_EXTREME = 3
+ OUTLIER_MILD = 1.5
+ COMP_OUTLIER_TYPE = OUTLIER_EXTREME
+
+ ############################################################################
+ # Statistics.
+
+ # The title.
+ STATS_TITLE = "Test Job Statistics"
+
+ # The pathname prefix for the application.
+ STATS_ROUTES_PATHNAME_PREFIX = "/stats/"
+
+ # Layout of plot.ly graphs.
+ STATS_GRAPH_LAYOUT_FILE = "cdash/stats/layout.yaml"
+
+ # The default job displayed when the page is loaded first time.
+ STATS_DEFAULT_JOB = "csit-vpp-perf-mrr-daily-master-2n-icx"
+
+ # Default name of downloaded file with selected data.
+ STATS_DOWNLOAD_FILE_NAME = "stats.csv"
+
+ # The width of the bar in the graph in miliseconds.
+ STATS_BAR_WIDTH_DAILY = 1000 * 3600 * 15
+ STATS_BAR_WIDTH_WEEKLY = 1000 * 3600 * 24
+
+ ############################################################################
+ # Trending.
+
+ # The title.
+ TREND_TITLE = "Performance Trending"
+
+ # The pathname prefix for the application.
+ TREND_ROUTES_PATHNAME_PREFIX = "/trending/"
+
+ # Layout of plot.ly graphs.
+ TREND_GRAPH_LAYOUT_FILE = "cdash/trending/layout.yaml"
+
+ # Default name of downloaded file with selected data.
+ TREND_DOWNLOAD_FILE_NAME = "trending_data.csv"
+ TELEMETRY_DOWNLOAD_FILE_NAME = "telemetry_data.csv"
+
+ ############################################################################
+ # Coverage data.
+
+ # The title.
+ COVERAGE_TITLE = "Per Release Coverage Data"
+
+ # The pathname prefix for the application.
+ COVERAGE_ROUTES_PATHNAME_PREFIX = "/coverage/"
+
+ # Default name of downloaded file with selected data.
+ COVERAGE_DOWNLOAD_FILE_NAME = "coverage_data.csv"
+
+ ############################################################################
+ # Search tests.
+
+ # The title.
+ SEARCH_TITLE = "Search Tests"
+
+ # The pathname prefix for the application.
+ SEARCH_ROUTES_PATHNAME_PREFIX = "/search/"
+
+ # Layout of plot.ly graphs.
+ SEARCH_GRAPH_LAYOUT_FILE = "cdash/search/layout.yaml"
+
+ # Default name of downloaded file with selected data.
+ SEARCH_DOWNLOAD_FILE_NAME = "search_data.csv"
+
+ ############################################################################
diff --git a/csit.infra.dash/app/cdash/utils/control_panel.py b/csit.infra.dash/app/cdash/utils/control_panel.py
new file mode 100644
index 0000000000..3da44e3901
--- /dev/null
+++ b/csit.infra.dash/app/cdash/utils/control_panel.py
@@ -0,0 +1,87 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A module implementing the control panel data structure.
+"""
+
+from copy import deepcopy
+from typing import Any
+
+class ControlPanel:
+ """A class representing the control panel.
+ """
+
+ def __init__(self, params: dict, panel: dict) -> None:
+ """Initialisation of the control pannel by default values. If
+ particular values are provided (parameter "panel") they are set
+ afterwards.
+
+ :param params: Default values to be set to the control panel. This
+ dictionary also defines the full set of the control panel's
+ parameters and their order.
+ :param panel: Custom values to be set to the control panel.
+ :type params: dict
+ :type panel: dict
+ """
+
+ if not params:
+ raise ValueError("The params must be defined.")
+ self._panel = deepcopy(params)
+ if panel:
+ for key in panel:
+ if key in self._panel:
+ self._panel[key] = panel[key]
+ else:
+ raise AttributeError(
+ f"The parameter {key} is not defined in the list of "
+ f"parameters."
+ )
+
+ @property
+ def panel(self) -> dict:
+ return self._panel
+
+ @property
+ def values(self) -> tuple:
+ """Returns the values from the Control panel as a tuple.
+
+ :returns: The values from the Control panel.
+ :rtype: tuple
+ """
+ return tuple(self._panel.values())
+
+ def set(self, kwargs: dict=dict()) -> None:
+ """Set the values of the Control panel.
+
+ :param kwargs: key - value pairs to be set.
+ :type kwargs: dict
+ :raises KeyError: If the key in kwargs is not present in the Control
+ panel.
+ """
+ for key, val in kwargs.items():
+ if key in self._panel:
+ self._panel[key] = val
+ else:
+ raise KeyError(f"The key {key} is not defined.")
+
+ def get(self, key: str) -> Any:
+ """Returns the value of a key from the Control panel.
+
+ :param key: The key which value should be returned.
+ :type key: str
+ :returns: The value of the key.
+ :rtype: any
+ :raises KeyError: If the key in kwargs is not present in the Control
+ panel.
+ """
+ return self._panel[key]
diff --git a/csit.infra.dash/app/cdash/utils/telemetry_data.py b/csit.infra.dash/app/cdash/utils/telemetry_data.py
new file mode 100644
index 0000000000..9975874d96
--- /dev/null
+++ b/csit.infra.dash/app/cdash/utils/telemetry_data.py
@@ -0,0 +1,362 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A module implementing the parsing of OpenMetrics data and elementary
+operations with it.
+"""
+
+
+import binascii
+import zlib
+import pandas as pd
+
+from ..trending.graphs import select_trending_data
+
+
+class TelemetryData:
+ """A class to store and manipulate the telemetry data.
+ """
+
+ def __init__(self, tests: list=list()) -> None:
+ """Initialize the object.
+
+ :param in_data: Input data.
+ :param tests: List of selected tests.
+ :type in_data: pandas.DataFrame
+ :type tests: list
+ """
+
+ self._tests = tests
+ self._data = None
+ self._unique_metrics = list()
+ self._unique_metrics_labels = pd.DataFrame()
+ self._selected_metrics_labels = pd.DataFrame()
+
+ def from_dataframe(self, in_data: pd.DataFrame=pd.DataFrame()) -> None:
+ """Read the input from pandas DataFrame.
+
+ This method must be called at the beginning to create all data
+ structures.
+ """
+
+ if in_data.empty:
+ return
+
+ metrics = set() # A set of unique metrics
+
+ # Create a dataframe with metrics for selected tests:
+ lst_items = list()
+ for itm in self._tests:
+ sel_data = select_trending_data(in_data, itm)
+ if sel_data is not None:
+ sel_data["test_name"] = itm["id"]
+ lst_items.append(sel_data)
+ df = pd.concat(lst_items, ignore_index=True, copy=False)
+
+ # Use only neccessary data:
+ df = df[[
+ "job",
+ "build",
+ "dut_type",
+ "dut_version",
+ "start_time",
+ "passed",
+ "test_name",
+ "test_type",
+ "result_receive_rate_rate_avg",
+ "result_receive_rate_rate_stdev",
+ "result_receive_rate_rate_unit",
+ "result_pdr_lower_rate_value",
+ "result_pdr_lower_rate_unit",
+ "result_ndr_lower_rate_value",
+ "result_ndr_lower_rate_unit",
+ "telemetry"
+ ]]
+ # Transform metrics from strings to dataframes:
+ lst_telemetry = list()
+ for _, row in df.iterrows():
+ d_telemetry = {
+ "metric": list(),
+ "labels": list(), # list of tuple(label, value)
+ "value": list(),
+ "timestamp": list()
+ }
+
+ # If there is no telemetry data, use empty dictionary
+ if row["telemetry"] is None or isinstance(row["telemetry"], float):
+ lst_telemetry.append(pd.DataFrame(data=d_telemetry))
+ continue
+
+ # Read telemetry data
+ # - list of uncompressed strings List[str, ...], or
+ # - list with only one compressed string List[str]
+ try:
+ tm_data = zlib.decompress(
+ binascii.a2b_base64(row["telemetry"][0].encode())
+ ).decode().split("\n")
+ except (binascii.Error, zlib.error, AttributeError, IndexError):
+ tm_data = row["telemetry"]
+
+ # Pre-process telemetry data
+ for itm in tm_data:
+ itm_lst = itm.replace("'", "").rsplit(" ", maxsplit=2)
+ metric, labels = itm_lst[0].split("{")
+ d_telemetry["metric"].append(metric)
+ d_telemetry["labels"].append(
+ [tuple(x.split("=")) for x in labels[:-1].split(",")]
+ )
+ d_telemetry["value"].append(itm_lst[1])
+ d_telemetry["timestamp"].append(itm_lst[2])
+
+ metrics.update(d_telemetry["metric"])
+ lst_telemetry.append(pd.DataFrame(data=d_telemetry))
+ df["telemetry"] = lst_telemetry
+
+ self._data = df
+ self._unique_metrics = sorted(metrics)
+
+ def from_json(self, in_data: dict) -> None:
+ """Read the input data from json.
+ """
+
+ df = pd.read_json(in_data)
+ lst_telemetry = list()
+ metrics = set() # A set of unique metrics
+ for _, row in df.iterrows():
+ telemetry = pd.DataFrame(row["telemetry"])
+ lst_telemetry.append(telemetry)
+ metrics.update(telemetry["metric"].to_list())
+ df["telemetry"] = lst_telemetry
+
+ self._data = df
+ self._unique_metrics = sorted(metrics)
+
+ def from_metrics(self, in_data: set) -> None:
+ """Read only the metrics.
+ """
+ self._unique_metrics = in_data
+
+ def from_metrics_with_labels(self, in_data: dict) -> None:
+ """Read only metrics with labels.
+ """
+ self._unique_metrics_labels = pd.DataFrame.from_dict(in_data)
+
+ def to_json(self) -> str:
+ """Return the data transformed from dataframe to json.
+
+ :returns: Telemetry data transformed to a json structure.
+ :rtype: dict
+ """
+ return self._data.to_json()
+
+ @property
+ def unique_metrics(self) -> list:
+ """Return a set of unique metrics.
+
+ :returns: A set of unique metrics.
+ :rtype: set
+ """
+ return self._unique_metrics
+
+ @property
+ def unique_metrics_with_labels(self) -> dict:
+ """
+ """
+ return self._unique_metrics_labels.to_dict()
+
+ def get_selected_labels(self, metrics: list) -> dict:
+ """Return a dictionary with labels (keys) and all their possible values
+ (values) for all selected 'metrics'.
+
+ :param metrics: List of metrics we are interested in.
+ :type metrics: list
+ :returns: A dictionary with labels and all their possible values.
+ :rtype: dict
+ """
+
+ lst_labels = list()
+ tmp_labels = dict()
+ for _, row in self._data.iterrows():
+ telemetry = row["telemetry"]
+ for itm in metrics:
+ df = telemetry.loc[(telemetry["metric"] == itm)]
+ lst_labels.append(df)
+ for _, tm in df.iterrows():
+ for label in tm["labels"]:
+ if label[0] not in tmp_labels:
+ tmp_labels[label[0]] = set()
+ tmp_labels[label[0]].add(label[1])
+
+ df_labels = pd.concat(lst_labels, ignore_index=True, copy=False)
+ selected_labels = dict()
+ for key in sorted(tmp_labels):
+ selected_labels[key] = sorted(tmp_labels[key])
+
+ self._unique_metrics_labels = df_labels[["metric", "labels"]].\
+ loc[df_labels[["metric", "labels"]].astype(str).\
+ drop_duplicates().index]
+
+ return selected_labels
+
+ @property
+ def str_metrics(self) -> str:
+ """Returns all unique metrics as a string.
+ """
+ return TelemetryData.metrics_to_str(self._unique_metrics_labels)
+
+ @staticmethod
+ def metrics_to_str(in_data: pd.DataFrame) -> str:
+ """Convert metrics from pandas dataframe to string. Metrics in string
+ are separated by '\n'.
+
+ :param in_data: Metrics to be converted to a string.
+ :type in_data: pandas.DataFrame
+ :returns: Metrics as a string.
+ :rtype: str
+ """
+ metrics = str()
+ for _, row in in_data.iterrows():
+ labels = ','.join([f"{itm[0]}='{itm[1]}'" for itm in row["labels"]])
+ metrics += f"{row['metric']}{{{labels}}}\n"
+ return metrics[:-1]
+
+ def search_unique_metrics(self, string: str) -> list:
+ """Return a list of metrics which name includes the given string.
+
+ :param string: A string which must be in the name of metric.
+ :type string: str
+ :returns: A list of metrics which name includes the given string.
+ :rtype: list
+ """
+ return [itm for itm in self._unique_metrics if string in itm]
+
+ def filter_selected_metrics_by_labels(
+ self,
+ selection: dict
+ ) -> pd.DataFrame:
+ """Filter selected unique metrics by labels and their values.
+
+ :param selection: Labels and their values specified by the user.
+ :type selection: dict
+ :returns: Pandas dataframe with filtered metrics.
+ :rtype: pandas.DataFrame
+ """
+
+ def _is_selected(labels: list, sel: dict) -> bool:
+ """Check if the provided 'labels' are selected by the user.
+
+ :param labels: List of labels and their values from a metric. The
+ items in this lists are two-item-lists whre the first item is
+ the label and the second one is its value.
+ :param sel: User selection. The keys are the selected lables and the
+ values are lists with label values.
+ :type labels: list
+ :type sel: dict
+ :returns: True if the 'labels' are selected by the user.
+ :rtype: bool
+ """
+ passed = list()
+ labels = dict(labels)
+ for key in sel.keys():
+ if key in list(labels.keys()):
+ if sel[key]:
+ passed.append(labels[key] in sel[key])
+ else:
+ passed.append(True)
+ else:
+ passed.append(False)
+ return bool(passed and all(passed))
+
+ self._selected_metrics_labels = pd.DataFrame()
+ lst_items = list()
+ for _, row in self._unique_metrics_labels.iterrows():
+ if _is_selected(row["labels"], selection):
+ lst_items.append(row.to_frame().T)
+ self._selected_metrics_labels = \
+ pd.concat(lst_items, ignore_index=True, axis=0, copy=False)
+ return self._selected_metrics_labels
+
+ def select_tm_trending_data(
+ self,
+ selection: dict,
+ ignore_host: bool = False
+ ) -> pd.DataFrame:
+ """Select telemetry data for trending based on user's 'selection'.
+
+ The output dataframe includes these columns:
+ - "job",
+ - "build",
+ - "dut_type",
+ - "dut_version",
+ - "start_time",
+ - "passed",
+ - "test_name",
+ - "test_id",
+ - "test_type",
+ - "result_receive_rate_rate_avg",
+ - "result_receive_rate_rate_stdev",
+ - "result_receive_rate_rate_unit",
+ - "result_pdr_lower_rate_value",
+ - "result_pdr_lower_rate_unit",
+ - "result_ndr_lower_rate_value",
+ - "result_ndr_lower_rate_unit",
+ - "tm_metric",
+ - "tm_value".
+
+ :param selection: User's selection (metrics and labels).
+ :param ignore_host: Ignore 'hostname' and 'hook' labels in metrics.
+ :type selection: dict
+ :type ignore_host: bool
+ :returns: Dataframe with selected data.
+ :rtype: pandas.DataFrame
+ """
+
+ if self._data is None:
+ return pd.DataFrame()
+ if self._data.empty:
+ return pd.DataFrame()
+ if not selection:
+ return pd.DataFrame()
+
+ df_sel = pd.DataFrame.from_dict(selection)
+ lst_rows = list()
+ for _, row in self._data.iterrows():
+ tm_row = row["telemetry"]
+ for _, tm_sel in df_sel.iterrows():
+ df_tmp = tm_row.loc[tm_row["metric"] == tm_sel["metric"]]
+ for _, tm in df_tmp.iterrows():
+ do_it = False
+ if ignore_host:
+ if tm["labels"][2:] == tm_sel["labels"][2:]:
+ labels = ','.join(
+ [f"{i[0]}='{i[1]}'" for i in tm["labels"][2:]]
+ )
+ do_it = True
+ else:
+ if tm["labels"] == tm_sel["labels"]:
+ labels = ','.join(
+ [f"{i[0]}='{i[1]}'" for i in tm["labels"]]
+ )
+ do_it = True
+ if do_it:
+ row["tm_metric"] = f"{tm['metric']}{{{labels}}}"
+ row["tm_value"] = tm["value"]
+ lst_rows.append(
+ row.drop(labels=["telemetry", ]).to_frame().T
+ )
+ if lst_rows:
+ return pd.concat(
+ lst_rows, ignore_index=True, axis=0, copy=False
+ ).drop_duplicates()
+ else:
+ return pd.DataFrame()
diff --git a/csit.infra.dash/app/cdash/utils/tooltips.yaml b/csit.infra.dash/app/cdash/utils/tooltips.yaml
new file mode 100644
index 0000000000..a51e9ffae4
--- /dev/null
+++ b/csit.infra.dash/app/cdash/utils/tooltips.yaml
@@ -0,0 +1,51 @@
+help-area:
+ The area defines a DUT packet path and lookup type.
+help-cadence:
+ The cadence of the Jenkins job which runs the tests.
+help-cmp-parameter:
+ The parameter to be used for comparison.
+help-cmp-value:
+ The value of parameter to be used for comparison.
+help-cores:
+ Number of cores the DUT uses during the test.
+help-csit-dut:
+ The version of CSIT (the part in front of the first hyphen) and the version of
+ Device under Test (the rest).
+help-data-type:
+ The type of collected data.
+help-download:
+ Download the selected data as a csv file.
+help-dut:
+ Device Under Test (DUT) - In software networking, “device” denotes a specific
+ piece of software tasked with packet processing. Such device is surrounded
+ with other software components (such as operating system kernel).
+help-dut-ver:
+ The version of the Device under Test.
+help-framesize:
+ Frame size - size of an Ethernet Layer-2 frame on the wire, including any VLAN
+ tags (dot1q, dot1ad) and Ethernet FCS, but excluding Ethernet preamble and
+ inter-frame gap. Measured in Bytes.
+help-infra:
+ Infrastructure is defined by the toplology (number of nodes), processor
+ architecture, NIC and driver.
+help-measurement:
+ The measured quantity in interest.
+help-normalize:
+ Normalize the results to CPU frequency 2GHz. The results from AWS environment
+ are not normalized as we do not know the exact value of CPU frequency.
+help-release:
+ The CSIT release.
+help-show-latency:
+ If selected, the latency is included in tables.
+help-tbed:
+ The test bed is defined by toplology (number of nodes) and processor
+ architecture.
+help-test:
+ The test specification consists of packet encapsulation, VPP packet processing
+ (packet forwarding mode and packet processing function(s)) and packet
+ forwarding path.
+help-ttype:
+ Main measured variable.
+help-url:
+ URL with current configuration. If there is no "Copy URL" button, use triple
+ click.
diff --git a/csit.infra.dash/app/cdash/utils/trigger.py b/csit.infra.dash/app/cdash/utils/trigger.py
new file mode 100644
index 0000000000..da0768b070
--- /dev/null
+++ b/csit.infra.dash/app/cdash/utils/trigger.py
@@ -0,0 +1,65 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A module implementing the processing of a trigger.
+"""
+
+from typing import Any
+
+from json import loads, JSONDecodeError
+
+
+class Trigger:
+ """
+ """
+ def __init__(self, trigger) -> None:
+ """
+ """
+ self._id = trigger[0]["prop_id"].split(".")
+ self._param = self._id[1]
+ try:
+ self._id = loads(self._id[0])
+ except (JSONDecodeError, TypeError):
+ # It is a string
+ self._id = {"type": self._id[0], "index": None}
+ self._val = trigger[0]["value"]
+
+ def __str__(self) -> str:
+ return (
+ f"\nTrigger:\n"
+ f" ID: {self._id}\n"
+ f" Type: {self._id['type']}\n"
+ f" Index: {self._id['index']}\n"
+ f" Parameter: {self._param}\n"
+ f" Value: {self._val}\n"
+ )
+
+ @property
+ def id(self) -> dict:
+ return self._id
+
+ @property
+ def type(self) -> str:
+ return self._id["type"]
+
+ @property
+ def idx(self) -> Any:
+ return self._id["index"]
+
+ @property
+ def parameter(self) -> str:
+ return self._param
+
+ @property
+ def value(self) -> Any:
+ return self._val
diff --git a/csit.infra.dash/app/cdash/utils/url_processing.py b/csit.infra.dash/app/cdash/utils/url_processing.py
new file mode 100644
index 0000000000..c436ebc830
--- /dev/null
+++ b/csit.infra.dash/app/cdash/utils/url_processing.py
@@ -0,0 +1,99 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""URL decoding and parsing and URL encoding.
+"""
+
+import logging
+
+from base64 import urlsafe_b64encode, urlsafe_b64decode
+from urllib.parse import urlencode, urlunparse, urlparse, parse_qs
+from zlib import compress, decompress
+from zlib import error as ZlibErr
+from binascii import Error as BinasciiErr
+
+
+def url_encode(params: dict) -> str:
+ """Encode the URL parameters and zip them and create the whole URL using
+ given data.
+
+ :param params: All data necessary to create the URL:
+ - scheme,
+ - network location,
+ - path,
+ - query,
+ - parameters.
+ :type params: dict
+ :returns: Encoded URL.
+ :rtype: str
+ """
+
+ url_params = params.get("params", None)
+ if url_params:
+ encoded_params = urlsafe_b64encode(
+ compress(urlencode(url_params).encode("utf-8"), level=9)
+ ).rstrip(b"=").decode("utf-8")
+ else:
+ encoded_params = str()
+
+ return urlunparse((
+ params.get("scheme", "http"),
+ params.get("netloc", str()),
+ params.get("path", str()),
+ str(), # params
+ params.get("query", str()),
+ encoded_params
+ ))
+
+
+def url_decode(url: str) -> dict:
+ """Parse the given URL and decode the parameters.
+
+ :param url: URL to be parsed and decoded.
+ :type url: str
+ :returns: Paresed URL.
+ :rtype: dict
+ """
+
+ try:
+ parsed_url = urlparse(url)
+ except ValueError as err:
+ logging.warning(f"\nThe url {url} is not valid, ignoring.\n{repr(err)}")
+ return dict()
+
+ if parsed_url.fragment:
+ try:
+ padding = b"=" * (4 - (len(parsed_url.fragment) % 4))
+ params = parse_qs(decompress(
+ urlsafe_b64decode(
+ (parsed_url.fragment.encode("utf-8") + padding)
+ )).decode("utf-8")
+ )
+ except (BinasciiErr, UnicodeDecodeError, ZlibErr) as err:
+ logging.warning(
+ f"\nNot possible to decode the parameters from url: {url}"
+ f"\nEncoded parameters: '{parsed_url.fragment}'"
+ f"\n{repr(err)}"
+ )
+ return dict()
+ else:
+ params = None
+
+ return {
+ "scheme": parsed_url.scheme,
+ "netloc": parsed_url.netloc,
+ "path": parsed_url.path,
+ "query": parsed_url.query,
+ "fragment": parsed_url.fragment,
+ "params": params
+ }
diff --git a/csit.infra.dash/app/cdash/utils/utils.py b/csit.infra.dash/app/cdash/utils/utils.py
new file mode 100644
index 0000000000..306b4f60d1
--- /dev/null
+++ b/csit.infra.dash/app/cdash/utils/utils.py
@@ -0,0 +1,895 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Functions used by Dash applications.
+"""
+
+import pandas as pd
+import plotly.graph_objects as go
+import dash_bootstrap_components as dbc
+
+import hdrh.histogram
+import hdrh.codec
+
+from math import sqrt
+from dash import dcc, no_update, html
+from datetime import datetime
+
+from ..utils.constants import Constants as C
+from ..utils.url_processing import url_encode
+from ..utils.trigger import Trigger
+
+
+def get_color(idx: int) -> str:
+ """Returns a color from the list defined in Constants.PLOT_COLORS defined by
+ its index.
+
+ :param idx: Index of the color.
+ :type idx: int
+ :returns: Color defined by hex code.
+ :trype: str
+ """
+ return C.PLOT_COLORS[idx % len(C.PLOT_COLORS)]
+
+
+def show_tooltip(tooltips:dict, id: str, title: str,
+ clipboard_id: str=None) -> list:
+ """Generate list of elements to display a text (e.g. a title) with a
+ tooltip and optionaly with Copy&Paste icon and the clipboard
+ functionality enabled.
+
+ :param tooltips: Dictionary with tooltips.
+ :param id: Tooltip ID.
+ :param title: A text for which the tooltip will be displayed.
+ :param clipboard_id: If defined, a Copy&Paste icon is displayed and the
+ clipboard functionality is enabled.
+ :type tooltips: dict
+ :type id: str
+ :type title: str
+ :type clipboard_id: str
+ :returns: List of elements to display a text with a tooltip and
+ optionaly with Copy&Paste icon.
+ :rtype: list
+ """
+
+ return [
+ dcc.Clipboard(target_id=clipboard_id, title="Copy URL") \
+ if clipboard_id else str(),
+ f"{title} ",
+ dbc.Badge(
+ id=id,
+ children="?",
+ pill=True,
+ color="white",
+ text_color="info",
+ class_name="border ms-1",
+ ),
+ dbc.Tooltip(
+ children=tooltips.get(id, str()),
+ target=id,
+ placement="auto"
+ )
+ ]
+
+
+def label(key: str) -> str:
+ """Returns a label for input elements (dropdowns, ...).
+
+ If the label is not defined, the function returns the provided key.
+
+ :param key: The key to the label defined in Constants.LABELS.
+ :type key: str
+ :returns: Label.
+ :rtype: str
+ """
+ return C.LABELS.get(key, key)
+
+
+def sync_checklists(options: list, sel: list, all: list, id: str) -> tuple:
+ """Synchronize a checklist with defined "options" with its "All" checklist.
+
+ :param options: List of options for the cheklist.
+ :param sel: List of selected options.
+ :param all: List of selected option from "All" checklist.
+ :param id: ID of a checklist to be used for synchronization.
+ :returns: Tuple of lists with otions for both checklists.
+ :rtype: tuple of lists
+ """
+ opts = {v["value"] for v in options}
+ if id =="all":
+ sel = list(opts) if all else list()
+ else:
+ all = ["all", ] if set(sel) == opts else list()
+ return sel, all
+
+
+def list_tests(selection: dict) -> list:
+ """Transform list of tests to a list of dictionaries usable by checkboxes.
+
+ :param selection: List of tests to be displayed in "Selected tests" window.
+ :type selection: list
+ :returns: List of dictionaries with "label", "value" pairs for a checkbox.
+ :rtype: list
+ """
+ if selection:
+ return [{"label": v["id"], "value": v["id"]} for v in selection]
+ else:
+ return list()
+
+
+def get_date(s_date: str) -> datetime:
+ """Transform string reprezentation of date to datetime.datetime data type.
+
+ :param s_date: String reprezentation of date.
+ :type s_date: str
+ :returns: Date as datetime.datetime.
+ :rtype: datetime.datetime
+ """
+ return datetime(int(s_date[0:4]), int(s_date[5:7]), int(s_date[8:10]))
+
+
+def gen_new_url(url_components: dict, params: dict) -> str:
+ """Generate a new URL with encoded parameters.
+
+ :param url_components: Dictionary with URL elements. It should contain
+ "scheme", "netloc" and "path".
+ :param url_components: URL parameters to be encoded to the URL.
+ :type parsed_url: dict
+ :type params: dict
+ :returns Encoded URL with parameters.
+ :rtype: str
+ """
+
+ if url_components:
+ return url_encode(
+ {
+ "scheme": url_components.get("scheme", ""),
+ "netloc": url_components.get("netloc", ""),
+ "path": url_components.get("path", ""),
+ "params": params
+ }
+ )
+ else:
+ return str()
+
+
+def get_duts(df: pd.DataFrame) -> list:
+ """Get the list of DUTs from the pre-processed information about jobs.
+
+ :param df: DataFrame with information about jobs.
+ :type df: pandas.DataFrame
+ :returns: Alphabeticaly sorted list of DUTs.
+ :rtype: list
+ """
+ return sorted(list(df["dut"].unique()))
+
+
+def get_ttypes(df: pd.DataFrame, dut: str) -> list:
+ """Get the list of test types from the pre-processed information about
+ jobs.
+
+ :param df: DataFrame with information about jobs.
+ :param dut: The DUT for which the list of test types will be populated.
+ :type df: pandas.DataFrame
+ :type dut: str
+ :returns: Alphabeticaly sorted list of test types.
+ :rtype: list
+ """
+ return sorted(list(df.loc[(df["dut"] == dut)]["ttype"].unique()))
+
+
+def get_cadences(df: pd.DataFrame, dut: str, ttype: str) -> list:
+ """Get the list of cadences from the pre-processed information about
+ jobs.
+
+ :param df: DataFrame with information about jobs.
+ :param dut: The DUT for which the list of cadences will be populated.
+ :param ttype: The test type for which the list of cadences will be
+ populated.
+ :type df: pandas.DataFrame
+ :type dut: str
+ :type ttype: str
+ :returns: Alphabeticaly sorted list of cadences.
+ :rtype: list
+ """
+ return sorted(list(df.loc[(
+ (df["dut"] == dut) &
+ (df["ttype"] == ttype)
+ )]["cadence"].unique()))
+
+
+def get_test_beds(df: pd.DataFrame, dut: str, ttype: str, cadence: str) -> list:
+ """Get the list of test beds from the pre-processed information about
+ jobs.
+
+ :param df: DataFrame with information about jobs.
+ :param dut: The DUT for which the list of test beds will be populated.
+ :param ttype: The test type for which the list of test beds will be
+ populated.
+ :param cadence: The cadence for which the list of test beds will be
+ populated.
+ :type df: pandas.DataFrame
+ :type dut: str
+ :type ttype: str
+ :type cadence: str
+ :returns: Alphabeticaly sorted list of test beds.
+ :rtype: list
+ """
+ return sorted(list(df.loc[(
+ (df["dut"] == dut) &
+ (df["ttype"] == ttype) &
+ (df["cadence"] == cadence)
+ )]["tbed"].unique()))
+
+
+def get_job(df: pd.DataFrame, dut, ttype, cadence, testbed):
+ """Get the name of a job defined by dut, ttype, cadence, test bed.
+ Input information comes from the control panel.
+
+ :param df: DataFrame with information about jobs.
+ :param dut: The DUT for which the job name will be created.
+ :param ttype: The test type for which the job name will be created.
+ :param cadence: The cadence for which the job name will be created.
+ :param testbed: The test bed for which the job name will be created.
+ :type df: pandas.DataFrame
+ :type dut: str
+ :type ttype: str
+ :type cadence: str
+ :type testbed: str
+ :returns: Job name.
+ :rtype: str
+ """
+ return df.loc[(
+ (df["dut"] == dut) &
+ (df["ttype"] == ttype) &
+ (df["cadence"] == cadence) &
+ (df["tbed"] == testbed)
+ )]["job"].item()
+
+
+def generate_options(opts: list, sort: bool=True) -> list:
+ """Return list of options for radio items in control panel. The items in
+ the list are dictionaries with keys "label" and "value".
+
+ :params opts: List of options (str) to be used for the generated list.
+ :type opts: list
+ :returns: List of options (dict).
+ :rtype: list
+ """
+ if sort:
+ opts = sorted(opts)
+ return [{"label": i, "value": i} for i in opts]
+
+
+def set_job_params(df: pd.DataFrame, job: str) -> dict:
+ """Create a dictionary with all options and values for (and from) the
+ given job.
+
+ :param df: DataFrame with information about jobs.
+ :params job: The name of job for and from which the dictionary will be
+ created.
+ :type df: pandas.DataFrame
+ :type job: str
+ :returns: Dictionary with all options and values for (and from) the
+ given job.
+ :rtype: dict
+ """
+
+ l_job = job.split("-")
+ return {
+ "job": job,
+ "dut": l_job[1],
+ "ttype": l_job[3],
+ "cadence": l_job[4],
+ "tbed": "-".join(l_job[-2:]),
+ "duts": generate_options(get_duts(df)),
+ "ttypes": generate_options(get_ttypes(df, l_job[1])),
+ "cadences": generate_options(get_cadences(df, l_job[1], l_job[3])),
+ "tbeds": generate_options(
+ get_test_beds(df, l_job[1], l_job[3], l_job[4]))
+ }
+
+
+def get_list_group_items(
+ items: list,
+ type: str,
+ colorize: bool=True,
+ add_index: bool=False
+ ) -> list:
+ """Generate list of ListGroupItems with checkboxes with selected items.
+
+ :param items: List of items to be displayed in the ListGroup.
+ :param type: The type part of an element ID.
+ :param colorize: If True, the color of labels is set, otherwise the default
+ color is used.
+ :param add_index: Add index to the list items.
+ :type items: list
+ :type type: str
+ :type colorize: bool
+ :type add_index: bool
+ :returns: List of ListGroupItems with checkboxes with selected items.
+ :rtype: list
+ """
+
+ children = list()
+ for i, l in enumerate(items):
+ idx = f"{i + 1}. " if add_index else str()
+ label = f"{idx}{l['id']}" if isinstance(l, dict) else f"{idx}{l}"
+ children.append(
+ dbc.ListGroupItem(
+ children=[
+ dbc.Checkbox(
+ id={"type": type, "index": i},
+ label=label,
+ value=False,
+ label_class_name="m-0 p-0",
+ label_style={
+ "font-size": ".875em",
+ "color": get_color(i) if colorize else "#55595c"
+ },
+ class_name="info"
+ )
+ ],
+ class_name="p-0"
+ )
+ )
+
+ return children
+
+
+def relative_change_stdev(mean1, mean2, std1, std2):
+ """Compute relative standard deviation of change of two values.
+
+ The "1" values are the base for comparison.
+ Results are returned as percentage (and percentual points for stdev).
+ Linearized theory is used, so results are wrong for relatively large stdev.
+
+ :param mean1: Mean of the first number.
+ :param mean2: Mean of the second number.
+ :param std1: Standard deviation estimate of the first number.
+ :param std2: Standard deviation estimate of the second number.
+ :type mean1: float
+ :type mean2: float
+ :type std1: float
+ :type std2: float
+ :returns: Relative change and its stdev.
+ :rtype: float
+ """
+ mean1, mean2 = float(mean1), float(mean2)
+ quotient = mean2 / mean1
+ first = std1 / mean1
+ second = std2 / mean2
+ std = quotient * sqrt(first * first + second * second)
+ return (quotient - 1) * 100, std * 100
+
+
+def get_hdrh_latencies(row: pd.Series, name: str) -> dict:
+ """Get the HDRH latencies from the test data.
+
+ :param row: A row fron the data frame with test data.
+ :param name: The test name to be displayed as the graph title.
+ :type row: pandas.Series
+ :type name: str
+ :returns: Dictionary with HDRH latencies.
+ :rtype: dict
+ """
+
+ latencies = {"name": name}
+ for key in C.LAT_HDRH:
+ try:
+ latencies[key] = row[key]
+ except KeyError:
+ return None
+
+ return latencies
+
+
+def graph_hdrh_latency(data: dict, layout: dict) -> go.Figure:
+ """Generate HDR Latency histogram graphs.
+
+ :param data: HDRH data.
+ :param layout: Layout of plot.ly graph.
+ :type data: dict
+ :type layout: dict
+ :returns: HDR latency Histogram.
+ :rtype: plotly.graph_objects.Figure
+ """
+
+ fig = None
+
+ traces = list()
+ for idx, (lat_name, lat_hdrh) in enumerate(data.items()):
+ try:
+ decoded = hdrh.histogram.HdrHistogram.decode(lat_hdrh)
+ except (hdrh.codec.HdrLengthException, TypeError):
+ continue
+ previous_x = 0.0
+ prev_perc = 0.0
+ xaxis = list()
+ yaxis = list()
+ hovertext = list()
+ for item in decoded.get_recorded_iterator():
+ # The real value is "percentile".
+ # For 100%, we cut that down to "x_perc" to avoid
+ # infinity.
+ percentile = item.percentile_level_iterated_to
+ x_perc = min(percentile, C.PERCENTILE_MAX)
+ xaxis.append(previous_x)
+ yaxis.append(item.value_iterated_to)
+ hovertext.append(
+ f"<b>{C.GRAPH_LAT_HDRH_DESC[lat_name]}</b><br>"
+ f"Direction: {('W-E', 'E-W')[idx % 2]}<br>"
+ f"Percentile: {prev_perc:.5f}-{percentile:.5f}%<br>"
+ f"Latency: {item.value_iterated_to}uSec"
+ )
+ next_x = 100.0 / (100.0 - x_perc)
+ xaxis.append(next_x)
+ yaxis.append(item.value_iterated_to)
+ hovertext.append(
+ f"<b>{C.GRAPH_LAT_HDRH_DESC[lat_name]}</b><br>"
+ f"Direction: {('W-E', 'E-W')[idx % 2]}<br>"
+ f"Percentile: {prev_perc:.5f}-{percentile:.5f}%<br>"
+ f"Latency: {item.value_iterated_to}uSec"
+ )
+ previous_x = next_x
+ prev_perc = percentile
+
+ traces.append(
+ go.Scatter(
+ x=xaxis,
+ y=yaxis,
+ name=C.GRAPH_LAT_HDRH_DESC[lat_name],
+ mode="lines",
+ legendgroup=C.GRAPH_LAT_HDRH_DESC[lat_name],
+ showlegend=bool(idx % 2),
+ line=dict(
+ color=get_color(int(idx/2)),
+ dash="solid",
+ width=1 if idx % 2 else 2
+ ),
+ hovertext=hovertext,
+ hoverinfo="text"
+ )
+ )
+ if traces:
+ fig = go.Figure()
+ fig.add_traces(traces)
+ layout_hdrh = layout.get("plot-hdrh-latency", None)
+ if lat_hdrh:
+ fig.update_layout(layout_hdrh)
+
+ return fig
+
+
+def navbar_trending(active: tuple):
+ """Add nav element with navigation panel. It is placed on the top.
+
+ :param active: Tuple of boolean values defining the active items in the
+ navbar. True == active
+ :type active: tuple
+ :returns: Navigation bar.
+ :rtype: dbc.NavbarSimple
+ """
+ return dbc.NavbarSimple(
+ children=[
+ dbc.NavItem(dbc.NavLink(
+ C.TREND_TITLE,
+ active=active[0],
+ external_link=True,
+ href="/trending"
+ )),
+ dbc.NavItem(dbc.NavLink(
+ C.NEWS_TITLE,
+ active=active[1],
+ external_link=True,
+ href="/news"
+ )),
+ dbc.NavItem(dbc.NavLink(
+ C.STATS_TITLE,
+ active=active[2],
+ external_link=True,
+ href="/stats"
+ )),
+ dbc.NavItem(dbc.NavLink(
+ C.SEARCH_TITLE,
+ active=active[3],
+ external_link=True,
+ href="/search"
+ )),
+ dbc.NavItem(dbc.NavLink(
+ "Documentation",
+ id="btn-documentation",
+ ))
+ ],
+ id="navbarsimple-main",
+ brand=C.BRAND,
+ brand_href="/",
+ brand_external_link=True,
+ class_name="p-2",
+ fluid=True
+ )
+
+
+def navbar_report(active: tuple):
+ """Add nav element with navigation panel. It is placed on the top.
+
+ :param active: Tuple of boolean values defining the active items in the
+ navbar. True == active
+ :type active: tuple
+ :returns: Navigation bar.
+ :rtype: dbc.NavbarSimple
+ """
+ return dbc.NavbarSimple(
+ id="navbarsimple-main",
+ children=[
+ dbc.NavItem(dbc.NavLink(
+ C.REPORT_TITLE,
+ active=active[0],
+ external_link=True,
+ href="/report"
+ )),
+ dbc.NavItem(dbc.NavLink(
+ "Comparisons",
+ active=active[1],
+ external_link=True,
+ href="/comparisons"
+ )),
+ dbc.NavItem(dbc.NavLink(
+ "Coverage Data",
+ active=active[2],
+ external_link=True,
+ href="/coverage"
+ )),
+ dbc.NavItem(dbc.NavLink(
+ C.SEARCH_TITLE,
+ active=active[3],
+ external_link=True,
+ href="/search"
+ )),
+ dbc.NavItem(dbc.NavLink(
+ "Documentation",
+ id="btn-documentation",
+ ))
+ ],
+ brand=C.BRAND,
+ brand_href="/",
+ brand_external_link=True,
+ class_name="p-2",
+ fluid=True
+ )
+
+
+def filter_table_data(
+ store_table_data: list,
+ table_filter: str
+ ) -> list:
+ """Filter table data using user specified filter.
+
+ :param store_table_data: Table data represented as a list of records.
+ :param table_filter: User specified filter.
+ :type store_table_data: list
+ :type table_filter: str
+ :returns: A new table created by filtering of table data represented as
+ a list of records.
+ :rtype: list
+ """
+
+ # Checks:
+ if not any((table_filter, store_table_data, )):
+ return store_table_data
+
+ def _split_filter_part(filter_part: str) -> tuple:
+ """Split a part of filter into column name, operator and value.
+ A "part of filter" is a sting berween "&&" operator.
+
+ :param filter_part: A part of filter.
+ :type filter_part: str
+ :returns: Column name, operator, value
+ :rtype: tuple[str, str, str|float]
+ """
+ for operator_type in C.OPERATORS:
+ for operator in operator_type:
+ if operator in filter_part:
+ name_p, val_p = filter_part.split(operator, 1)
+ name = name_p[name_p.find("{") + 1 : name_p.rfind("}")]
+ val_p = val_p.strip()
+ if (val_p[0] == val_p[-1] and val_p[0] in ("'", '"', '`')):
+ value = val_p[1:-1].replace("\\" + val_p[0], val_p[0])
+ else:
+ try:
+ value = float(val_p)
+ except ValueError:
+ value = val_p
+
+ return name, operator_type[0].strip(), value
+ return (None, None, None)
+
+ df = pd.DataFrame.from_records(store_table_data)
+ for filter_part in table_filter.split(" && "):
+ col_name, operator, filter_value = _split_filter_part(filter_part)
+ if operator == "contains":
+ df = df.loc[df[col_name].str.contains(filter_value, regex=True)]
+ elif operator in ("eq", "ne", "lt", "le", "gt", "ge"):
+ # These operators match pandas series operator method names.
+ df = df.loc[getattr(df[col_name], operator)(filter_value)]
+ elif operator == "datestartswith":
+ # This is a simplification of the front-end filtering logic,
+ # only works with complete fields in standard format.
+ # Currently not used in comparison tables.
+ df = df.loc[df[col_name].str.startswith(filter_value)]
+
+ return df.to_dict("records")
+
+
+def sort_table_data(
+ store_table_data: list,
+ sort_by: list
+ ) -> list:
+ """Sort table data using user specified order.
+
+ :param store_table_data: Table data represented as a list of records.
+ :param sort_by: User specified sorting order (multicolumn).
+ :type store_table_data: list
+ :type sort_by: list
+ :returns: A new table created by sorting the table data represented as
+ a list of records.
+ :rtype: list
+ """
+
+ # Checks:
+ if not any((sort_by, store_table_data, )):
+ return store_table_data
+
+ df = pd.DataFrame.from_records(store_table_data)
+ if len(sort_by):
+ dff = df.sort_values(
+ [col["column_id"] for col in sort_by],
+ ascending=[col["direction"] == "asc" for col in sort_by],
+ inplace=False
+ )
+ else:
+ # No sort is applied
+ dff = df
+
+ return dff.to_dict("records")
+
+
+def show_trending_graph_data(
+ trigger: Trigger,
+ data: dict,
+ graph_layout: dict
+ ) -> tuple:
+ """Generates the data for the offcanvas displayed when a particular point in
+ a trending graph (daily data) is clicked on.
+
+ :param trigger: The information from trigger when the data point is clicked
+ on.
+ :param graph: The data from the clicked point in the graph.
+ :param graph_layout: The layout of the HDRH latency graph.
+ :type trigger: Trigger
+ :type graph: dict
+ :type graph_layout: dict
+ :returns: The data to be displayed on the offcanvas and the information to
+ show the offcanvas.
+ :rtype: tuple(list, list, bool)
+ """
+
+ if trigger.idx == "tput":
+ idx = 0
+ elif trigger.idx == "bandwidth":
+ idx = 1
+ elif trigger.idx == "lat":
+ idx = len(data) - 1
+ else:
+ return list(), list(), False
+ try:
+ data = data[idx]["points"][0]
+ except (IndexError, KeyError, ValueError, TypeError):
+ return list(), list(), False
+
+ metadata = no_update
+ graph = list()
+
+ list_group_items = list()
+ for itm in data.get("text", None).split("<br>"):
+ if not itm:
+ continue
+ lst_itm = itm.split(": ")
+ if lst_itm[0] == "csit-ref":
+ list_group_item = dbc.ListGroupItem([
+ dbc.Badge(lst_itm[0]),
+ html.A(
+ lst_itm[1],
+ href=f"{C.URL_LOGS}{lst_itm[1]}",
+ target="_blank"
+ )
+ ])
+ else:
+ list_group_item = dbc.ListGroupItem([
+ dbc.Badge(lst_itm[0]),
+ lst_itm[1]
+ ])
+ list_group_items.append(list_group_item)
+
+ if trigger.idx == "tput":
+ title = "Throughput"
+ elif trigger.idx == "bandwidth":
+ title = "Bandwidth"
+ elif trigger.idx == "lat":
+ title = "Latency"
+ hdrh_data = data.get("customdata", None)
+ if hdrh_data:
+ graph = [dbc.Card(
+ class_name="gy-2 p-0",
+ children=[
+ dbc.CardHeader(hdrh_data.pop("name")),
+ dbc.CardBody(
+ dcc.Graph(
+ id="hdrh-latency-graph",
+ figure=graph_hdrh_latency(hdrh_data, graph_layout)
+ )
+ )
+ ])
+ ]
+
+ metadata = [
+ dbc.Card(
+ class_name="gy-2 p-0",
+ children=[
+ dbc.CardHeader(children=[
+ dcc.Clipboard(
+ target_id="tput-lat-metadata",
+ title="Copy",
+ style={"display": "inline-block"}
+ ),
+ title
+ ]),
+ dbc.CardBody(
+ dbc.ListGroup(list_group_items, flush=True),
+ id="tput-lat-metadata",
+ class_name="p-0",
+ )
+ ]
+ )
+ ]
+
+ return metadata, graph, True
+
+
+def show_iterative_graph_data(
+ trigger: Trigger,
+ data: dict,
+ graph_layout: dict
+ ) -> tuple:
+ """Generates the data for the offcanvas displayed when a particular point in
+ a box graph (iterative data) is clicked on.
+
+ :param trigger: The information from trigger when the data point is clicked
+ on.
+ :param graph: The data from the clicked point in the graph.
+ :param graph_layout: The layout of the HDRH latency graph.
+ :type trigger: Trigger
+ :type graph: dict
+ :type graph_layout: dict
+ :returns: The data to be displayed on the offcanvas and the information to
+ show the offcanvas.
+ :rtype: tuple(list, list, bool)
+ """
+
+ if trigger.idx == "tput":
+ idx = 0
+ elif trigger.idx == "bandwidth":
+ idx = 1
+ elif trigger.idx == "lat":
+ idx = len(data) - 1
+ else:
+ return list(), list(), False
+
+ try:
+ data = data[idx]["points"]
+ except (IndexError, KeyError, ValueError, TypeError):
+ return list(), list(), False
+
+ def _process_stats(data: list, param: str) -> list:
+ """Process statistical data provided by plot.ly box graph.
+
+ :param data: Statistical data provided by plot.ly box graph.
+ :param param: Parameter saying if the data come from "tput" or
+ "lat" graph.
+ :type data: list
+ :type param: str
+ :returns: Listo of tuples where the first value is the
+ statistic's name and the secont one it's value.
+ :rtype: list
+ """
+ if len(data) == 7:
+ stats = ("max", "upper fence", "q3", "median", "q1",
+ "lower fence", "min")
+ elif len(data) == 9:
+ stats = ("outlier", "max", "upper fence", "q3", "median",
+ "q1", "lower fence", "min", "outlier")
+ elif len(data) == 1:
+ if param == "lat":
+ stats = ("average latency at 50% PDR", )
+ elif param == "bandwidth":
+ stats = ("bandwidth", )
+ else:
+ stats = ("throughput", )
+ else:
+ return list()
+ unit = " [us]" if param == "lat" else str()
+ return [(f"{stat}{unit}", f"{value['y']:,.0f}")
+ for stat, value in zip(stats, data)]
+
+ customdata = data[0].get("customdata", dict())
+ datapoint = customdata.get("metadata", dict())
+ hdrh_data = customdata.get("hdrh", dict())
+
+ list_group_items = list()
+ for k, v in datapoint.items():
+ if k == "csit-ref":
+ if len(data) > 1:
+ continue
+ list_group_item = dbc.ListGroupItem([
+ dbc.Badge(k),
+ html.A(v, href=f"{C.URL_LOGS}{v}", target="_blank")
+ ])
+ else:
+ list_group_item = dbc.ListGroupItem([dbc.Badge(k), v])
+ list_group_items.append(list_group_item)
+
+ graph = list()
+ if trigger.idx == "tput":
+ title = "Throughput"
+ elif trigger.idx == "bandwidth":
+ title = "Bandwidth"
+ elif trigger.idx == "lat":
+ title = "Latency"
+ if len(data) == 1:
+ if hdrh_data:
+ graph = [dbc.Card(
+ class_name="gy-2 p-0",
+ children=[
+ dbc.CardHeader(hdrh_data.pop("name")),
+ dbc.CardBody(dcc.Graph(
+ id="hdrh-latency-graph",
+ figure=graph_hdrh_latency(hdrh_data, graph_layout)
+ ))
+ ])
+ ]
+
+ for k, v in _process_stats(data, trigger.idx):
+ list_group_items.append(dbc.ListGroupItem([dbc.Badge(k), v]))
+
+ metadata = [
+ dbc.Card(
+ class_name="gy-2 p-0",
+ children=[
+ dbc.CardHeader(children=[
+ dcc.Clipboard(
+ target_id="tput-lat-metadata",
+ title="Copy",
+ style={"display": "inline-block"}
+ ),
+ title
+ ]),
+ dbc.CardBody(
+ dbc.ListGroup(list_group_items, flush=True),
+ id="tput-lat-metadata",
+ class_name="p-0"
+ )
+ ]
+ )
+ ]
+
+ return metadata, graph, True