aboutsummaryrefslogtreecommitdiffstats
path: root/csit.infra.dash/app/cdash
diff options
context:
space:
mode:
authorTibor Frank <tifrank@cisco.com>2022-10-25 10:04:48 +0200
committerTibor Frank <tifrank@cisco.com>2023-01-27 07:48:41 +0100
commit4d03dd53c2d77bf2e35a07ed3a5a95f323c3a370 (patch)
tree2593036a7827709dd9f7b0f1e773da947a149529 /csit.infra.dash/app/cdash
parent73d84097f413bf9727f5a2fa91cd803b25bf5315 (diff)
C-Dash: Add telemetry panel
Signed-off-by: Tibor Frank <tifrank@cisco.com> Change-Id: Idee88c1da9bebd433fa47f5d983d432c54b5fbae
Diffstat (limited to 'csit.infra.dash/app/cdash')
-rw-r--r--csit.infra.dash/app/cdash/data/data.py7
-rw-r--r--csit.infra.dash/app/cdash/data/data.yaml2
-rw-r--r--csit.infra.dash/app/cdash/news/layout.py6
-rw-r--r--csit.infra.dash/app/cdash/report/layout.py5
-rw-r--r--csit.infra.dash/app/cdash/stats/layout.py6
-rw-r--r--csit.infra.dash/app/cdash/trending/graphs.py547
-rw-r--r--csit.infra.dash/app/cdash/trending/layout.py654
-rw-r--r--csit.infra.dash/app/cdash/trending/layout.yaml45
-rw-r--r--csit.infra.dash/app/cdash/utils/constants.py5
-rw-r--r--csit.infra.dash/app/cdash/utils/control_panel.py4
-rw-r--r--csit.infra.dash/app/cdash/utils/telemetry_data.py330
-rw-r--r--csit.infra.dash/app/cdash/utils/trigger.py6
-rw-r--r--csit.infra.dash/app/cdash/utils/url_processing.py4
-rw-r--r--csit.infra.dash/app/cdash/utils/utils.py25
14 files changed, 1366 insertions, 280 deletions
diff --git a/csit.infra.dash/app/cdash/data/data.py b/csit.infra.dash/app/cdash/data/data.py
index 8d2ae965dd..7ddb44311a 100644
--- a/csit.infra.dash/app/cdash/data/data.py
+++ b/csit.infra.dash/app/cdash/data/data.py
@@ -135,8 +135,9 @@ class Data:
:type last_modified_end: datetime, optional
:type days: integer, optional
:returns: List of file names.
- :rtype: List
+ :rtype: list
"""
+ file_list = list()
if days:
last_modified_begin = datetime.now(tz=UTC) - timedelta(days=days)
try:
@@ -215,9 +216,7 @@ class Data:
if self._debug:
df.info(verbose=True, memory_usage='deep')
logging.info(
- u"\n"
- f"Creation of dataframe {path} took: {time() - start}"
- u"\n"
+ f"\nCreation of dataframe {path} took: {time() - start}\n"
)
except NoFilesFound as err:
logging.error(f"No parquets found.\n{err}")
diff --git a/csit.infra.dash/app/cdash/data/data.yaml b/csit.infra.dash/app/cdash/data/data.yaml
index 396f1b1638..ec7f7ef1dd 100644
--- a/csit.infra.dash/app/cdash/data/data.yaml
+++ b/csit.infra.dash/app/cdash/data/data.yaml
@@ -51,6 +51,7 @@ trending-mrr:
- result_receive_rate_rate_avg
- result_receive_rate_rate_stdev
- result_receive_rate_rate_unit
+ - telemetry
trending-ndrpdr:
path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/trending
columns:
@@ -77,6 +78,7 @@ trending-ndrpdr:
- result_latency_forward_pdr_50_unit
- result_latency_forward_pdr_10_hdrh
- result_latency_forward_pdr_0_hdrh
+ - telemetry
iterative-mrr:
path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/iterative_{release}
columns:
diff --git a/csit.infra.dash/app/cdash/news/layout.py b/csit.infra.dash/app/cdash/news/layout.py
index 31712d6902..11151d727a 100644
--- a/csit.infra.dash/app/cdash/news/layout.py
+++ b/csit.infra.dash/app/cdash/news/layout.py
@@ -71,7 +71,11 @@ class Layout:
debug=True
).read_stats(days=C.NEWS_TIME_PERIOD)
- df_tst_info = pd.concat([data_mrr, data_ndrpdr], ignore_index=True)
+ df_tst_info = pd.concat(
+ [data_mrr, data_ndrpdr],
+ ignore_index=True,
+ copy=False
+ )
# Prepare information for the control panel:
self._jobs = sorted(list(df_tst_info["job"].unique()))
diff --git a/csit.infra.dash/app/cdash/report/layout.py b/csit.infra.dash/app/cdash/report/layout.py
index 64e3b8bcde..50cf092ae1 100644
--- a/csit.infra.dash/app/cdash/report/layout.py
+++ b/csit.infra.dash/app/cdash/report/layout.py
@@ -122,7 +122,8 @@ class Layout:
data_ndrpdr["release"] = rls
self._data = pd.concat(
[self._data, data_mrr, data_ndrpdr],
- ignore_index=True
+ ignore_index=True,
+ copy=False
)
# Get structure of tests:
@@ -1251,7 +1252,7 @@ class Layout:
if on_draw:
if store_sel:
- lg_selected = get_list_group_items(store_sel)
+ lg_selected = get_list_group_items(store_sel, "sel-cl")
plotting_area = self._get_plotting_area(
store_sel,
bool(normalize),
diff --git a/csit.infra.dash/app/cdash/stats/layout.py b/csit.infra.dash/app/cdash/stats/layout.py
index 2b88caaf04..116185d62c 100644
--- a/csit.infra.dash/app/cdash/stats/layout.py
+++ b/csit.infra.dash/app/cdash/stats/layout.py
@@ -83,7 +83,11 @@ class Layout:
debug=True
).read_stats(days=self._time_period)
- df_tst_info = pd.concat([data_mrr, data_ndrpdr], ignore_index=True)
+ df_tst_info = pd.concat(
+ [data_mrr, data_ndrpdr],
+ ignore_index=True,
+ copy=False
+ )
# Pre-process the data:
data_stats = data_stats[~data_stats.job.str.contains("-verify-")]
diff --git a/csit.infra.dash/app/cdash/trending/graphs.py b/csit.infra.dash/app/cdash/trending/graphs.py
index fdad73b8c3..79e2697f54 100644
--- a/csit.infra.dash/app/cdash/trending/graphs.py
+++ b/csit.infra.dash/app/cdash/trending/graphs.py
@@ -45,14 +45,14 @@ def _get_hdrh_latencies(row: pd.Series, name: str) -> dict:
return latencies
-def select_trending_data(data: pd.DataFrame, itm:dict) -> pd.DataFrame:
+def select_trending_data(data: pd.DataFrame, itm: dict) -> pd.DataFrame:
"""Select the data for graphs from the provided data frame.
:param data: Data frame with data for graphs.
:param itm: Item (in this case job name) which data will be selected from
the input data frame.
:type data: pandas.DataFrame
- :type itm: str
+ :type itm: dict
:returns: A data frame with selected data.
:rtype: pandas.DataFrame
"""
@@ -84,206 +84,217 @@ def select_trending_data(data: pd.DataFrame, itm:dict) -> pd.DataFrame:
return df
-def _generate_trending_traces(ttype: str, name: str, df: pd.DataFrame,
- color: str, norm_factor: float) -> list:
- """Generate the trending traces for the trending graph.
+def graph_trending(
+ data: pd.DataFrame,
+ sel: dict,
+ layout: dict,
+ normalize: bool
+ ) -> tuple:
+ """Generate the trending graph(s) - MRR, NDR, PDR and for PDR also Latences
+ (result_latency_forward_pdr_50_avg).
- :param ttype: Test type (MRR, NDR, PDR).
- :param name: The test name to be displayed as the graph title.
- :param df: Data frame with test data.
- :param color: The color of the trace (samples and trend line).
- :param norm_factor: The factor used for normalization of the results to CPU
- frequency set to Constants.NORM_FREQUENCY.
- :type ttype: str
- :type name: str
- :type df: pandas.DataFrame
- :type color: str
- :type norm_factor: float
- :returns: Traces (samples, trending line, anomalies)
- :rtype: list
+ :param data: Data frame with test results.
+ :param sel: Selected tests.
+ :param layout: Layout of plot.ly graph.
+ :param normalize: If True, the data is normalized to CPU frquency
+ Constants.NORM_FREQUENCY.
+ :type data: pandas.DataFrame
+ :type sel: dict
+ :type layout: dict
+ :type normalize: bool
+ :returns: Trending graph(s)
+ :rtype: tuple(plotly.graph_objects.Figure, plotly.graph_objects.Figure)
"""
- df = df.dropna(subset=[C.VALUE[ttype], ])
- if df.empty:
- return list()
+ if not sel:
+ return None, None
- x_axis = df["start_time"].tolist()
- if ttype == "pdr-lat":
- y_data = [(itm / norm_factor) for itm in df[C.VALUE[ttype]].tolist()]
- else:
- y_data = [(itm * norm_factor) for itm in df[C.VALUE[ttype]].tolist()]
-
- anomalies, trend_avg, trend_stdev = classify_anomalies(
- {k: v for k, v in zip(x_axis, y_data)}
- )
-
- hover = list()
- customdata = list()
- customdata_samples = list()
- for idx, (_, row) in enumerate(df.iterrows()):
- d_type = "trex" if row["dut_type"] == "none" else row["dut_type"]
- hover_itm = (
- f"date: {row['start_time'].strftime('%Y-%m-%d %H:%M:%S')}<br>"
- f"<prop> [{row[C.UNIT[ttype]]}]: {y_data[idx]:,.0f}<br>"
- f"<stdev>"
- f"{d_type}-ref: {row['dut_version']}<br>"
- f"csit-ref: {row['job']}/{row['build']}<br>"
- f"hosts: {', '.join(row['hosts'])}"
- )
- if ttype == "mrr":
- stdev = (
- f"stdev [{row['result_receive_rate_rate_unit']}]: "
- f"{row['result_receive_rate_rate_stdev']:,.0f}<br>"
- )
- else:
- stdev = ""
- hover_itm = hover_itm.replace(
- "<prop>", "latency" if ttype == "pdr-lat" else "average"
- ).replace("<stdev>", stdev)
- hover.append(hover_itm)
+
+ def _generate_trending_traces(
+ ttype: str,
+ name: str,
+ df: pd.DataFrame,
+ color: str,
+ norm_factor: float
+ ) -> list:
+ """Generate the trending traces for the trending graph.
+
+ :param ttype: Test type (MRR, NDR, PDR).
+ :param name: The test name to be displayed as the graph title.
+ :param df: Data frame with test data.
+ :param color: The color of the trace (samples and trend line).
+ :param norm_factor: The factor used for normalization of the results to
+ CPU frequency set to Constants.NORM_FREQUENCY.
+ :type ttype: str
+ :type name: str
+ :type df: pandas.DataFrame
+ :type color: str
+ :type norm_factor: float
+ :returns: Traces (samples, trending line, anomalies)
+ :rtype: list
+ """
+
+ df = df.dropna(subset=[C.VALUE[ttype], ])
+ if df.empty:
+ return list()
+
+ x_axis = df["start_time"].tolist()
if ttype == "pdr-lat":
- customdata_samples.append(_get_hdrh_latencies(row, name))
- customdata.append({"name": name})
+ y_data = [(v / norm_factor) for v in df[C.VALUE[ttype]].tolist()]
else:
- customdata_samples.append({"name": name, "show_telemetry": True})
- customdata.append({"name": name})
-
- hover_trend = list()
- for avg, stdev, (_, row) in zip(trend_avg, trend_stdev, df.iterrows()):
- d_type = "trex" if row["dut_type"] == "none" else row["dut_type"]
- hover_itm = (
- f"date: {row['start_time'].strftime('%Y-%m-%d %H:%M:%S')}<br>"
- f"trend [pps]: {avg:,.0f}<br>"
- f"stdev [pps]: {stdev:,.0f}<br>"
- f"{d_type}-ref: {row['dut_version']}<br>"
- f"csit-ref: {row['job']}/{row['build']}<br>"
- f"hosts: {', '.join(row['hosts'])}"
- )
- if ttype == "pdr-lat":
- hover_itm = hover_itm.replace("[pps]", "[us]")
- hover_trend.append(hover_itm)
-
- traces = [
- go.Scatter( # Samples
- x=x_axis,
- y=y_data,
- name=name,
- mode="markers",
- marker={
- "size": 5,
- "color": color,
- "symbol": "circle",
- },
- text=hover,
- hoverinfo="text+name",
- showlegend=True,
- legendgroup=name,
- customdata=customdata_samples
- ),
- go.Scatter( # Trend line
- x=x_axis,
- y=trend_avg,
- name=name,
- mode="lines",
- line={
- "shape": "linear",
- "width": 1,
- "color": color,
- },
- text=hover_trend,
- hoverinfo="text+name",
- showlegend=False,
- legendgroup=name,
- customdata=customdata
+ y_data = [(v * norm_factor) for v in df[C.VALUE[ttype]].tolist()]
+
+ anomalies, trend_avg, trend_stdev = classify_anomalies(
+ {k: v for k, v in zip(x_axis, y_data)}
)
- ]
- if anomalies:
- anomaly_x = list()
- anomaly_y = list()
- anomaly_color = list()
hover = list()
- for idx, anomaly in enumerate(anomalies):
- if anomaly in ("regression", "progression"):
- anomaly_x.append(x_axis[idx])
- anomaly_y.append(trend_avg[idx])
- anomaly_color.append(C.ANOMALY_COLOR[anomaly])
- hover_itm = (
- f"date: {x_axis[idx].strftime('%Y-%m-%d %H:%M:%S')}<br>"
- f"trend [pps]: {trend_avg[idx]:,.0f}<br>"
- f"classification: {anomaly}"
+ customdata = list()
+ customdata_samples = list()
+ for idx, (_, row) in enumerate(df.iterrows()):
+ d_type = "trex" if row["dut_type"] == "none" else row["dut_type"]
+ hover_itm = (
+ f"date: {row['start_time'].strftime('%Y-%m-%d %H:%M:%S')}<br>"
+ f"<prop> [{row[C.UNIT[ttype]]}]: {y_data[idx]:,.0f}<br>"
+ f"<stdev>"
+ f"{d_type}-ref: {row['dut_version']}<br>"
+ f"csit-ref: {row['job']}/{row['build']}<br>"
+ f"hosts: {', '.join(row['hosts'])}"
+ )
+ if ttype == "mrr":
+ stdev = (
+ f"stdev [{row['result_receive_rate_rate_unit']}]: "
+ f"{row['result_receive_rate_rate_stdev']:,.0f}<br>"
)
- if ttype == "pdr-lat":
- hover_itm = hover_itm.replace("[pps]", "[us]")
- hover.append(hover_itm)
- anomaly_color.extend([0.0, 0.5, 1.0])
- traces.append(
- go.Scatter(
- x=anomaly_x,
- y=anomaly_y,
+ else:
+ stdev = ""
+ hover_itm = hover_itm.replace(
+ "<prop>", "latency" if ttype == "pdr-lat" else "average"
+ ).replace("<stdev>", stdev)
+ hover.append(hover_itm)
+ if ttype == "pdr-lat":
+ customdata_samples.append(_get_hdrh_latencies(row, name))
+ customdata.append({"name": name})
+ else:
+ customdata_samples.append(
+ {"name": name, "show_telemetry": True}
+ )
+ customdata.append({"name": name})
+
+ hover_trend = list()
+ for avg, stdev, (_, row) in zip(trend_avg, trend_stdev, df.iterrows()):
+ d_type = "trex" if row["dut_type"] == "none" else row["dut_type"]
+ hover_itm = (
+ f"date: {row['start_time'].strftime('%Y-%m-%d %H:%M:%S')}<br>"
+ f"trend [pps]: {avg:,.0f}<br>"
+ f"stdev [pps]: {stdev:,.0f}<br>"
+ f"{d_type}-ref: {row['dut_version']}<br>"
+ f"csit-ref: {row['job']}/{row['build']}<br>"
+ f"hosts: {', '.join(row['hosts'])}"
+ )
+ if ttype == "pdr-lat":
+ hover_itm = hover_itm.replace("[pps]", "[us]")
+ hover_trend.append(hover_itm)
+
+ traces = [
+ go.Scatter( # Samples
+ x=x_axis,
+ y=y_data,
+ name=name,
mode="markers",
+ marker={
+ "size": 5,
+ "color": color,
+ "symbol": "circle",
+ },
text=hover,
hoverinfo="text+name",
- showlegend=False,
+ showlegend=True,
legendgroup=name,
+ customdata=customdata_samples
+ ),
+ go.Scatter( # Trend line
+ x=x_axis,
+ y=trend_avg,
name=name,
- customdata=customdata,
- marker={
- "size": 15,
- "symbol": "circle-open",
- "color": anomaly_color,
- "colorscale": C.COLORSCALE_LAT \
- if ttype == "pdr-lat" else C.COLORSCALE_TPUT,
- "showscale": True,
- "line": {
- "width": 2
- },
- "colorbar": {
- "y": 0.5,
- "len": 0.8,
- "title": "Circles Marking Data Classification",
- "titleside": "right",
- "tickmode": "array",
- "tickvals": [0.167, 0.500, 0.833],
- "ticktext": C.TICK_TEXT_LAT \
- if ttype == "pdr-lat" else C.TICK_TEXT_TPUT,
- "ticks": "",
- "ticklen": 0,
- "tickangle": -90,
- "thickness": 10
+ mode="lines",
+ line={
+ "shape": "linear",
+ "width": 1,
+ "color": color,
+ },
+ text=hover_trend,
+ hoverinfo="text+name",
+ showlegend=False,
+ legendgroup=name,
+ customdata=customdata
+ )
+ ]
+
+ if anomalies:
+ anomaly_x = list()
+ anomaly_y = list()
+ anomaly_color = list()
+ hover = list()
+ for idx, anomaly in enumerate(anomalies):
+ if anomaly in ("regression", "progression"):
+ anomaly_x.append(x_axis[idx])
+ anomaly_y.append(trend_avg[idx])
+ anomaly_color.append(C.ANOMALY_COLOR[anomaly])
+ hover_itm = (
+ f"date: {x_axis[idx].strftime('%Y-%m-%d %H:%M:%S')}<br>"
+ f"trend [pps]: {trend_avg[idx]:,.0f}<br>"
+ f"classification: {anomaly}"
+ )
+ if ttype == "pdr-lat":
+ hover_itm = hover_itm.replace("[pps]", "[us]")
+ hover.append(hover_itm)
+ anomaly_color.extend([0.0, 0.5, 1.0])
+ traces.append(
+ go.Scatter(
+ x=anomaly_x,
+ y=anomaly_y,
+ mode="markers",
+ text=hover,
+ hoverinfo="text+name",
+ showlegend=False,
+ legendgroup=name,
+ name=name,
+ customdata=customdata,
+ marker={
+ "size": 15,
+ "symbol": "circle-open",
+ "color": anomaly_color,
+ "colorscale": C.COLORSCALE_LAT \
+ if ttype == "pdr-lat" else C.COLORSCALE_TPUT,
+ "showscale": True,
+ "line": {
+ "width": 2
+ },
+ "colorbar": {
+ "y": 0.5,
+ "len": 0.8,
+ "title": "Circles Marking Data Classification",
+ "titleside": "right",
+ "tickmode": "array",
+ "tickvals": [0.167, 0.500, 0.833],
+ "ticktext": C.TICK_TEXT_LAT \
+ if ttype == "pdr-lat" else C.TICK_TEXT_TPUT,
+ "ticks": "",
+ "ticklen": 0,
+ "tickangle": -90,
+ "thickness": 10
+ }
}
- }
+ )
)
- )
- return traces
+ return traces
-def graph_trending(data: pd.DataFrame, sel:dict, layout: dict,
- normalize: bool) -> tuple:
- """Generate the trending graph(s) - MRR, NDR, PDR and for PDR also Latences
- (result_latency_forward_pdr_50_avg).
-
- :param data: Data frame with test results.
- :param sel: Selected tests.
- :param layout: Layout of plot.ly graph.
- :param normalize: If True, the data is normalized to CPU frquency
- Constants.NORM_FREQUENCY.
- :type data: pandas.DataFrame
- :type sel: dict
- :type layout: dict
- :type normalize: bool
- :returns: Trending graph(s)
- :rtype: tuple(plotly.graph_objects.Figure, plotly.graph_objects.Figure)
- """
-
- if not sel:
- return None, None
-
fig_tput = None
fig_lat = None
for idx, itm in enumerate(sel):
-
df = select_trending_data(data, itm)
if df is None or df.empty:
continue
@@ -393,3 +404,181 @@ def graph_hdrh_latency(data: dict, layout: dict) -> go.Figure:
fig.update_layout(layout_hdrh)
return fig
+
+
+def graph_tm_trending(data: pd.DataFrame, layout: dict) -> list:
+ """Generates one trending graph per test, each graph includes all selected
+ metrics.
+
+ :param data: Data frame with telemetry data.
+ :param layout: Layout of plot.ly graph.
+ :type data: pandas.DataFrame
+ :type layout: dict
+ :returns: List of generated graphs together with test names.
+ list(tuple(plotly.graph_objects.Figure(), str()), tuple(...), ...)
+ :rtype: list
+ """
+
+
+ def _generate_graph(
+ data: pd.DataFrame,
+ test: str,
+ layout: dict
+ ) -> go.Figure:
+ """Generates a trending graph for given test with all metrics.
+
+ :param data: Data frame with telemetry data for the given test.
+ :param test: The name of the test.
+ :param layout: Layout of plot.ly graph.
+ :type data: pandas.DataFrame
+ :type test: str
+ :type layout: dict
+ :returns: A trending graph.
+ :rtype: plotly.graph_objects.Figure
+ """
+ graph = None
+ traces = list()
+ for idx, metric in enumerate(data.tm_metric.unique()):
+ if "-pdr" in test and "='pdr'" not in metric:
+ continue
+ if "-ndr" in test and "='ndr'" not in metric:
+ continue
+
+ df = data.loc[(data["tm_metric"] == metric)]
+ x_axis = df["start_time"].tolist()
+ y_data = [float(itm) for itm in df["tm_value"].tolist()]
+ hover = list()
+ for i, (_, row) in enumerate(df.iterrows()):
+ hover.append(
+ f"date: "
+ f"{row['start_time'].strftime('%Y-%m-%d %H:%M:%S')}<br>"
+ f"value: {y_data[i]:,.0f}<br>"
+ f"{row['dut_type']}-ref: {row['dut_version']}<br>"
+ f"csit-ref: {row['job']}/{row['build']}<br>"
+ )
+ if any(y_data):
+ anomalies, trend_avg, trend_stdev = classify_anomalies(
+ {k: v for k, v in zip(x_axis, y_data)}
+ )
+ hover_trend = list()
+ for avg, stdev, (_, row) in \
+ zip(trend_avg, trend_stdev, df.iterrows()):
+ hover_trend.append(
+ f"date: "
+ f"{row['start_time'].strftime('%Y-%m-%d %H:%M:%S')}<br>"
+ f"trend: {avg:,.0f}<br>"
+ f"stdev: {stdev:,.0f}<br>"
+ f"{row['dut_type']}-ref: {row['dut_version']}<br>"
+ f"csit-ref: {row['job']}/{row['build']}"
+ )
+ else:
+ anomalies = None
+ color = get_color(idx)
+ traces.append(
+ go.Scatter( # Samples
+ x=x_axis,
+ y=y_data,
+ name=metric,
+ mode="markers",
+ marker={
+ "size": 5,
+ "color": color,
+ "symbol": "circle",
+ },
+ text=hover,
+ hoverinfo="text+name",
+ showlegend=True,
+ legendgroup=metric
+ )
+ )
+ if anomalies:
+ traces.append(
+ go.Scatter( # Trend line
+ x=x_axis,
+ y=trend_avg,
+ name=metric,
+ mode="lines",
+ line={
+ "shape": "linear",
+ "width": 1,
+ "color": color,
+ },
+ text=hover_trend,
+ hoverinfo="text+name",
+ showlegend=False,
+ legendgroup=metric
+ )
+ )
+
+ anomaly_x = list()
+ anomaly_y = list()
+ anomaly_color = list()
+ hover = list()
+ for idx, anomaly in enumerate(anomalies):
+ if anomaly in ("regression", "progression"):
+ anomaly_x.append(x_axis[idx])
+ anomaly_y.append(trend_avg[idx])
+ anomaly_color.append(C.ANOMALY_COLOR[anomaly])
+ hover_itm = (
+ f"date: {x_axis[idx].strftime('%Y-%m-%d %H:%M:%S')}"
+ f"<br>trend: {trend_avg[idx]:,.0f}"
+ f"<br>classification: {anomaly}"
+ )
+ hover.append(hover_itm)
+ anomaly_color.extend([0.0, 0.5, 1.0])
+ traces.append(
+ go.Scatter(
+ x=anomaly_x,
+ y=anomaly_y,
+ mode="markers",
+ text=hover,
+ hoverinfo="text+name",
+ showlegend=False,
+ legendgroup=metric,
+ name=metric,
+ marker={
+ "size": 15,
+ "symbol": "circle-open",
+ "color": anomaly_color,
+ "colorscale": C.COLORSCALE_TPUT,
+ "showscale": True,
+ "line": {
+ "width": 2
+ },
+ "colorbar": {
+ "y": 0.5,
+ "len": 0.8,
+ "title": "Circles Marking Data Classification",
+ "titleside": "right",
+ "tickmode": "array",
+ "tickvals": [0.167, 0.500, 0.833],
+ "ticktext": C.TICK_TEXT_TPUT,
+ "ticks": "",
+ "ticklen": 0,
+ "tickangle": -90,
+ "thickness": 10
+ }
+ }
+ )
+ )
+
+ if traces:
+ graph = go.Figure()
+ graph.add_traces(traces)
+ graph.update_layout(layout.get("plot-trending-telemetry", dict()))
+
+ return graph
+
+
+ tm_trending_graphs = list()
+
+ if data.empty:
+ return tm_trending_graphs
+
+ for test in data.test_name.unique():
+ df = data.loc[(data["test_name"] == test)]
+ graph = _generate_graph(df, test, layout)
+ if graph:
+ tm_trending_graphs.append((graph, test, ))
+
+ return tm_trending_graphs
diff --git a/csit.infra.dash/app/cdash/trending/layout.py b/csit.infra.dash/app/cdash/trending/layout.py
index 48b480193b..1866183da0 100644
--- a/csit.infra.dash/app/cdash/trending/layout.py
+++ b/csit.infra.dash/app/cdash/trending/layout.py
@@ -27,15 +27,18 @@ from dash import Input, Output, State
from dash.exceptions import PreventUpdate
from yaml import load, FullLoader, YAMLError
from ast import literal_eval
+from copy import deepcopy
from ..utils.constants import Constants as C
from ..utils.control_panel import ControlPanel
from ..utils.trigger import Trigger
+from ..utils.telemetry_data import TelemetryData
from ..utils.utils import show_tooltip, label, sync_checklists, gen_new_url, \
generate_options, get_list_group_items
from ..utils.url_processing import url_decode
from ..data.data import Data
-from .graphs import graph_trending, graph_hdrh_latency, select_trending_data
+from .graphs import graph_trending, graph_hdrh_latency, select_trending_data, \
+ graph_tm_trending
# Control panel partameters and their default values.
@@ -119,7 +122,11 @@ class Layout:
debug=True
).read_trending_ndrpdr(days=self._time_period)
- self._data = pd.concat([data_mrr, data_ndrpdr], ignore_index=True)
+ self._data = pd.concat(
+ [data_mrr, data_ndrpdr],
+ ignore_index=True,
+ copy=False
+ )
# Get structure of tests:
tbs = dict()
@@ -251,6 +258,8 @@ class Layout:
children=[
dcc.Store(id="store-selected-tests"),
dcc.Store(id="store-control-panel"),
+ dcc.Store(id="store-telemetry-data"),
+ dcc.Store(id="store-telemetry-user"),
dcc.Location(id="url", refresh=False),
dbc.Row(
id="row-navbar",
@@ -278,7 +287,8 @@ class Layout:
dbc.Row(id="metadata-tput-lat"),
dbc.Row(id="metadata-hdrh-graph")
]
- )
+ ),
+ delay_show=C.SPINNER_DELAY
)
]
)
@@ -333,30 +343,6 @@ class Layout:
)
])
- def _add_plotting_col(self) -> dbc.Col:
- """Add column with plots and tables. It is placed on the right side.
-
- :returns: Column with tables.
- :rtype: dbc.Col
- """
- return dbc.Col(
- id="col-plotting-area",
- children=[
- dbc.Spinner(
- children=[
- dbc.Row(
- id="plotting-area",
- class_name="g-0 p-0",
- children=[
- C.PLACEHOLDER
- ]
- )
- ]
- )
- ],
- width=9
- )
-
def _add_ctrl_panel(self) -> list:
"""Add control panel.
@@ -671,13 +657,167 @@ class Layout:
)
]
- def _get_plotting_area(
+ def _add_plotting_col(self) -> dbc.Col:
+ """Add column with plots. It is placed on the right side.
+
+ :returns: Column with plots.
+ :rtype: dbc.Col
+ """
+ return dbc.Col(
+ id="col-plotting-area",
+ children=[
+ dbc.Spinner(
+ dbc.Row(
+ id="plotting-area-trending",
+ class_name="g-0 p-0",
+ children=C.PLACEHOLDER
+ ),
+ delay_show=C.SPINNER_DELAY
+ ),
+ dbc.Row(
+ id="plotting-area-telemetry",
+ class_name="g-0 p-0",
+ children=C.PLACEHOLDER
+ ),
+ dbc.Row(
+ id="plotting-area-buttons",
+ class_name="g-0 p-0",
+ children=C.PLACEHOLDER
+ )
+ ],
+ width=9
+ )
+
+ def _get_plotting_area_buttons(self) -> dbc.Col:
+ """Add buttons and modals to the plotting area.
+
+ :returns: A column with buttons and modals for telemetry.
+ :rtype: dbc.Col
+ """
+ return dbc.Col([
+ html.Div(
+ [
+ dbc.Button(
+ id={"type": "telemetry-btn", "index": "open"},
+ children="Add Panel with Telemetry",
+ class_name="me-1",
+ color="info",
+ style={
+ "text-transform": "none",
+ "padding": "0rem 1rem"
+ }
+ ),
+ dbc.Modal(
+ [
+ dbc.ModalHeader(
+ dbc.ModalTitle(
+ "Select a Metric"
+ ),
+ close_button=False
+ ),
+ dbc.Spinner(
+ dbc.ModalBody(
+ id="plot-mod-telemetry-body-1",
+ children=self._get_telemetry_step_1()
+ ),
+ delay_show=2*C.SPINNER_DELAY
+ ),
+ dbc.ModalFooter([
+ dbc.Button(
+ "Select",
+ id={
+ "type": "telemetry-btn",
+ "index": "select"
+ },
+ disabled=True
+ ),
+ dbc.Button(
+ "Cancel",
+ id={
+ "type": "telemetry-btn",
+ "index": "cancel"
+ },
+ disabled=False
+ )
+ ])
+ ],
+ id="plot-mod-telemetry-1",
+ size="lg",
+ is_open=False,
+ scrollable=False,
+ backdrop="static",
+ keyboard=False
+ ),
+ dbc.Modal(
+ [
+ dbc.ModalHeader(
+ dbc.ModalTitle(
+ "Select Labels"
+ ),
+ close_button=False
+ ),
+ dbc.Spinner(
+ dbc.ModalBody(
+ id="plot-mod-telemetry-body-2",
+ children=self._get_telemetry_step_2()
+ ),
+ delay_show=2*C.SPINNER_DELAY
+ ),
+ dbc.ModalFooter([
+ dbc.Button(
+ "Back",
+ id={
+ "type": "telemetry-btn",
+ "index": "back"
+ },
+ disabled=False
+ ),
+ dbc.Button(
+ "Add Telemetry",
+ id={
+ "type": "telemetry-btn",
+ "index": "add"
+ },
+ disabled=True
+ ),
+ dbc.Button(
+ "Cancel",
+ id={
+ "type": "telemetry-btn",
+ "index": "cancel"
+ },
+ disabled=False
+ )
+ ])
+ ],
+ id="plot-mod-telemetry-2",
+ size="xl",
+ is_open=False,
+ scrollable=False,
+ backdrop="static",
+ keyboard=False
+ )
+ ],
+ className="d-grid gap-0 d-md-flex justify-content-md-end"
+ )
+ ])
+
+ def _get_plotting_area_trending(
self,
tests: list,
normalize: bool,
url: str
- ) -> list:
+ ) -> dbc.Col:
"""Generate the plotting area with all its content.
+
+ :param tests: A list of tests to be displayed in the trending graphs.
+ :param normalize: If True, the data in graphs is normalized.
+ :param url: An URL to be displayed in the modal window.
+ :type tests: list
+ :type normalize: bool
+ :type url: str
+ :returns: A collumn with trending graphs (tput and latency) in tabs.
+ :rtype: dbc.Col
"""
if not tests:
return C.PLACEHOLDER
@@ -711,13 +851,13 @@ class Layout:
)
trending = [
- dbc.Row(
- children=dbc.Tabs(
+ dbc.Row(children=[
+ dbc.Tabs(
children=tab_items,
id="tabs",
active_tab="tab-tput",
)
- ),
+ ]),
dbc.Row(
[
dbc.Col([html.Div(
@@ -762,12 +902,43 @@ class Layout:
)
]
- acc_items = [
- dbc.AccordionItem(
- title="Trending",
- children=trending
+ return dbc.Col(
+ children=[
+ dbc.Row(
+ dbc.Accordion(
+ children=[
+ dbc.AccordionItem(
+ title="Trending",
+ children=trending
+ )
+ ],
+ class_name="g-0 p-1",
+ start_collapsed=False,
+ always_open=True,
+ active_item=["item-0", ]
+ ),
+ class_name="g-0 p-0",
+ )
+ ]
+ )
+
+ def _get_plotting_area_telemetry(self, graphs: list) -> dbc.Col:
+ """Generate the plotting area with telemetry.
+ """
+ if not graphs:
+ return C.PLACEHOLDER
+
+ acc_items = list()
+ for graph in graphs:
+ acc_items.append(
+ dbc.AccordionItem(
+ title=f"Telemetry: {graph[1]}",
+ children=dcc.Graph(
+ id={"type": "graph-telemetry", "index": graph[1]},
+ figure=graph[0]
+ )
+ )
)
- ]
return dbc.Col(
children=[
@@ -780,45 +951,88 @@ class Layout:
active_item=[f"item-{i}" for i in range(len(acc_items))]
),
class_name="g-0 p-0",
- ),
- # dbc.Row(
- # dbc.Col([html.Div(
- # [
- # dbc.Button(
- # id="btn-add-telemetry",
- # children="Add Panel with Telemetry",
- # class_name="me-1",
- # color="info",
- # style={
- # "text-transform": "none",
- # "padding": "0rem 1rem"
- # }
- # )
- # ],
- # className=\
- # "d-grid gap-0 d-md-flex justify-content-md-end"
- # )]),
- # class_name="g-0 p-0"
- # )
+ )
]
)
+ @staticmethod
+ def _get_telemetry_step_1() -> list:
+ """Return the content of the modal window used in the step 1 of metrics
+ selection.
+
+ :returns: A list of dbc rows with 'input' and 'search output'.
+ :rtype: list
+ """
+ return [
+ dbc.Row(
+ class_name="g-0 p-1",
+ children=[
+ dbc.Input(
+ id="telemetry-search-in",
+ placeholder="Start typing a metric name...",
+ type="text"
+ )
+ ]
+ ),
+ dbc.Row(
+ class_name="g-0 p-1",
+ children=[
+ dbc.ListGroup(
+ class_name="overflow-auto p-0",
+ id="telemetry-search-out",
+ children=[],
+ style={"max-height": "14em"},
+ flush=True
+ )
+ ]
+ )
+ ]
+
+ @staticmethod
+ def _get_telemetry_step_2() -> list:
+ """Return the content of the modal window used in the step 2 of metrics
+ selection.
+
+ :returns: A list of dbc rows with 'container with dynamic dropdowns' and
+ 'search output'.
+ :rtype: list
+ """
+ return [
+ dbc.Row(
+ id="telemetry-dd",
+ class_name="g-0 p-1",
+ children=["Add content here."]
+ ),
+ dbc.Row(
+ class_name="g-0 p-1",
+ children=[
+ dbc.Textarea(
+ id="telemetry-list-metrics",
+ rows=20,
+ size="sm",
+ wrap="off",
+ readonly=True
+ )
+ ]
+ )
+ ]
+
def callbacks(self, app):
"""Callbacks for the whole application.
:param app: The application.
:type app: Flask
"""
-
+
@app.callback(
[
Output("store-control-panel", "data"),
Output("store-selected-tests", "data"),
- Output("plotting-area", "children"),
+ Output("plotting-area-trending", "children"),
+ Output("plotting-area-buttons", "children"),
Output("row-card-sel-tests", "style"),
Output("row-btns-sel-tests", "style"),
Output("lg-selected", "children"),
-
Output({"type": "ctrl-dd", "index": "dut"}, "value"),
Output({"type": "ctrl-dd", "index": "phy"}, "options"),
Output({"type": "ctrl-dd", "index": "phy"}, "disabled"),
@@ -852,11 +1066,11 @@ class Layout:
[
Input("url", "href"),
Input("normalize", "value"),
-
Input({"type": "ctrl-dd", "index": ALL}, "value"),
Input({"type": "ctrl-cl", "index": ALL}, "value"),
Input({"type": "ctrl-btn", "index": ALL}, "n_clicks")
- ]
+ ],
+ prevent_initial_call=True
)
def _update_application(
control_panel: dict,
@@ -879,11 +1093,6 @@ class Layout:
else:
url_params = None
- plotting_area = no_update
- row_card_sel_tests = no_update
- row_btns_sel_tests = no_update
- lg_selected = no_update
-
trigger = Trigger(callback_context.triggered)
if trigger.type == "url" and url_params:
@@ -1124,11 +1333,11 @@ class Layout:
store_sel = new_store_sel
elif trigger.idx == "rm-test-all":
store_sel = list()
-
+
if on_draw:
if store_sel:
- lg_selected = get_list_group_items(store_sel)
- plotting_area = self._get_plotting_area(
+ lg_selected = get_list_group_items(store_sel, "sel-cl")
+ plotting_area_trending = self._get_plotting_area_trending(
store_sel,
bool(normalize),
gen_new_url(
@@ -1136,18 +1345,28 @@ class Layout:
{"store_sel": store_sel, "norm": normalize}
)
)
+ plotting_area_buttons = self._get_plotting_area_buttons()
row_card_sel_tests = C.STYLE_ENABLED
row_btns_sel_tests = C.STYLE_ENABLED
else:
- plotting_area = C.PLACEHOLDER
+ plotting_area_trending = C.PLACEHOLDER
+ plotting_area_buttons = C.PLACEHOLDER
row_card_sel_tests = C.STYLE_DISABLED
row_btns_sel_tests = C.STYLE_DISABLED
+ lg_selected = no_update
store_sel = list()
+ else:
+ plotting_area_trending = no_update
+ plotting_area_buttons = no_update
+ row_card_sel_tests = no_update
+ row_btns_sel_tests = no_update
+ lg_selected = no_update
ret_val = [
ctrl_panel.panel,
store_sel,
- plotting_area,
+ plotting_area_trending,
+ plotting_area_buttons,
row_card_sel_tests,
row_btns_sel_tests,
lg_selected
@@ -1157,8 +1376,8 @@ class Layout:
@app.callback(
Output("plot-mod-url", "is_open"),
- [Input("plot-btn-url", "n_clicks")],
- [State("plot-mod-url", "is_open")],
+ Input("plot-btn-url", "n_clicks"),
+ State("plot-mod-url", "is_open")
)
def toggle_plot_mod_url(n, is_open):
"""Toggle the modal window with url.
@@ -1168,6 +1387,289 @@ class Layout:
return is_open
@app.callback(
+ Output("store-telemetry-data", "data"),
+ Output("store-telemetry-user", "data"),
+ Output("telemetry-search-in", "value"),
+ Output("telemetry-search-out", "children"),
+ Output("telemetry-list-metrics", "value"),
+ Output("telemetry-dd", "children"),
+ Output("plotting-area-telemetry", "children"),
+ Output("plot-mod-telemetry-1", "is_open"),
+ Output("plot-mod-telemetry-2", "is_open"),
+ Output({"type": "telemetry-btn", "index": "select"}, "disabled"),
+ Output({"type": "telemetry-btn", "index": "add"}, "disabled"),
+ State("store-telemetry-data", "data"),
+ State("store-telemetry-user", "data"),
+ State("store-selected-tests", "data"),
+ Input({"type": "tele-cl", "index": ALL}, "value"),
+ Input("telemetry-search-in", "value"),
+ Input({"type": "telemetry-btn", "index": ALL}, "n_clicks"),
+ Input({"type": "tm-dd", "index": ALL}, "value"),
+ prevent_initial_call=True
+ )
+ def _update_plot_mod_telemetry(
+ tm_data: dict,
+ tm_user: dict,
+ store_sel: list,
+ cl_metrics: list,
+ search_in: str,
+ n_clicks: list,
+ tm_dd_in: list
+ ) -> tuple:
+ """Toggle the modal window with telemetry.
+ """
+
+ if not any(n_clicks):
+ raise PreventUpdate
+
+ if tm_user is None:
+ # Telemetry user data
+ # The data provided by user or result of user action
+ tm_user = {
+ # List of unique metrics:
+ "unique_metrics": list(),
+ # List of metrics selected by user:
+ "selected_metrics": list(),
+ # Labels from metrics selected by user (key: label name,
+ # value: list of all possible values):
+ "unique_labels": dict(),
+ # Labels selected by the user (subset of 'unique_labels'):
+ "selected_labels": dict(),
+ # All unique metrics with labels (output from the step 1)
+ # converted from pandas dataframe to dictionary.
+ "unique_metrics_with_labels": dict(),
+ # Metrics with labels selected by the user using dropdowns.
+ "selected_metrics_with_labels": dict()
+ }
+
+ tm = TelemetryData(tests=store_sel)
+ tm_json = no_update
+ search_out = no_update
+ list_metrics = no_update
+ tm_dd = no_update
+ plotting_area_telemetry = no_update
+ is_open = (False, False)
+ is_btn_disabled = (True, True)
+
+ trigger = Trigger(callback_context.triggered)
+ if trigger.type == "telemetry-btn":
+ if trigger.idx in ("open", "back"):
+ tm.from_dataframe(self._data)
+ tm_json = tm.to_json()
+ tm_user["unique_metrics"] = tm.unique_metrics
+ tm_user["selected_metrics"] = list()
+ tm_user["unique_labels"] = dict()
+ tm_user["selected_labels"] = dict()
+ search_in = str()
+ search_out = get_list_group_items(
+ tm_user["unique_metrics"],
+ "tele-cl",
+ False
+ )
+ is_open = (True, False)
+ elif trigger.idx == "select":
+ tm.from_json(tm_data)
+ if any(cl_metrics):
+ if not tm_user["selected_metrics"]:
+ tm_user["selected_metrics"] = \
+ tm_user["unique_metrics"]
+ metrics = [a for a, b in \
+ zip(tm_user["selected_metrics"], cl_metrics) if b]
+ tm_user["selected_metrics"] = metrics
+ tm_user["unique_labels"] = \
+ tm.get_selected_labels(metrics)
+ tm_user["unique_metrics_with_labels"] = \
+ tm.unique_metrics_with_labels
+ list_metrics = tm.str_metrics
+ tm_dd = _get_dd_container(tm_user["unique_labels"])
+ if list_metrics:
+ is_btn_disabled = (True, False)
+ is_open = (False, True)
+ else:
+ tm_user = None
+ is_open = (False, False)
+ elif trigger.idx == "add":
+ tm.from_json(tm_data)
+ plotting_area_telemetry = self._get_plotting_area_telemetry(
+ graph_tm_trending(
+ tm.select_tm_trending_data(
+ tm_user["selected_metrics_with_labels"]
+ ),
+ self._graph_layout)
+ )
+ tm_user = None
+ is_open = (False, False)
+ elif trigger.idx == "cancel":
+ tm_user = None
+ is_open = (False, False)
+ elif trigger.type == "telemetry-search-in":
+ tm.from_metrics(tm_user["unique_metrics"])
+ tm_user["selected_metrics"] = \
+ tm.search_unique_metrics(search_in)
+ search_out = get_list_group_items(
+ tm_user["selected_metrics"],
+ type="tele-cl",
+ colorize=False
+ )
+ is_open = (True, False)
+ elif trigger.type == "tele-cl":
+ if any(cl_metrics):
+ is_btn_disabled = (False, True)
+ is_open = (True, False)
+ elif trigger.type == "tm-dd":
+ tm.from_metrics_with_labels(
+ tm_user["unique_metrics_with_labels"]
+ )
+ selected = dict()
+ previous_itm = None
+ for itm in tm_dd_in:
+ if itm is None:
+ show_new = True
+ elif isinstance(itm, str):
+ show_new = False
+ selected[itm] = list()
+ elif isinstance(itm, list):
+ if previous_itm is not None:
+ selected[previous_itm] = itm
+ show_new = True
+ previous_itm = itm
+
+ tm_dd = _get_dd_container(
+ tm_user["unique_labels"],
+ selected,
+ show_new
+ )
+ sel_metrics = tm.filter_selected_metrics_by_labels(selected)
+ tm_user["selected_metrics_with_labels"] = sel_metrics.to_dict()
+ if not sel_metrics.empty:
+ list_metrics = tm.metrics_to_str(sel_metrics)
+ else:
+ list_metrics = str()
+ if list_metrics:
+ is_btn_disabled = (True, False)
+ is_open = (False, True)
+
+ # Return values:
+ ret_val = [
+ tm_json,
+ tm_user,
+ search_in,
+ search_out,
+ list_metrics,
+ tm_dd,
+ plotting_area_telemetry
+ ]
+ ret_val.extend(is_open)
+ ret_val.extend(is_btn_disabled)
+ return ret_val
+
+ def _get_dd_container(
+ all_labels: dict,
+ selected_labels: dict=dict(),
+ show_new=True
+ ) -> list:
+ """Generate a container with dropdown selection boxes depenting on
+ the input data.
+
+ :param all_labels: A dictionary with unique labels and their
+ possible values.
+ :param selected_labels: A dictionalry with user selected lables and
+ their values.
+ :param show_new: If True, a dropdown selection box to add a new
+ label is displayed.
+ :type all_labels: dict
+ :type selected_labels: dict
+ :type show_new: bool
+ :returns: A list of dbc rows with dropdown selection boxes.
+ :rtype: list
+ """
+
+ def _row(
+ id: str,
+ lopts: list=list(),
+ lval: str=str(),
+ vopts: list=list(),
+ vvals: list=list()
+ ) -> dbc.Row:
+ """Generates a dbc row with dropdown boxes.
+
+ :param id: A string added to the dropdown ID.
+ :param lopts: A list of options for 'label' dropdown.
+ :param lval: Value of 'label' dropdown.
+ :param vopts: A list of options for 'value' dropdown.
+ :param vvals: A list of values for 'value' dropdown.
+ :type id: str
+ :type lopts: list
+ :type lval: str
+ :type vopts: list
+ :type vvals: list
+ :returns: dbc row with dropdown boxes.
+ :rtype: dbc.Row
+ """
+ children = list()
+ if lopts:
+ children.append(
+ dbc.Col(
+ width=6,
+ children=[
+ dcc.Dropdown(
+ id={
+ "type": "tm-dd",
+ "index": f"label-{id}"
+ },
+ placeholder="Select a label...",
+ optionHeight=20,
+ multi=False,
+ options=lopts,
+ value=lval if lval else None
+ )
+ ]
+ )
+ )
+ if vopts:
+ children.append(
+ dbc.Col(
+ width=6,
+ children=[
+ dcc.Dropdown(
+ id={
+ "type": "tm-dd",
+ "index": f"value-{id}"
+ },
+ placeholder="Select a value...",
+ optionHeight=20,
+ multi=True,
+ options=vopts,
+ value=vvals if vvals else None
+ )
+ ]
+ )
+ )
+
+ return dbc.Row(class_name="g-0 p-1", children=children)
+
+ container = list()
+
+ # Display rows with items in 'selected_labels'; label on the left,
+ # values on the right:
+ keys_left = list(all_labels.keys())
+ for idx, label in enumerate(selected_labels.keys()):
+ container.append(_row(
+ id=idx,
+ lopts=deepcopy(keys_left),
+ lval=label,
+ vopts=all_labels[label],
+ vvals=selected_labels[label]
+ ))
+ keys_left.remove(label)
+
+ # Display row with dd with labels on the left, right side is empty:
+ if show_new and keys_left:
+ container.append(_row(id="new", lopts=keys_left))
+
+ return container
+
+ @app.callback(
Output("metadata-tput-lat", "children"),
Output("metadata-hdrh-graph", "children"),
Output("offcanvas-metadata", "is_open"),
@@ -1253,7 +1755,7 @@ class Layout:
Input("plot-btn-download", "n_clicks"),
prevent_initial_call=True
)
- def _download_trending_data(store_sel, _):
+ def _download_trending_data(store_sel: list, _) -> dict:
"""Download the data
:param store_sel: List of tests selected by user stored in the
@@ -1272,6 +1774,6 @@ class Layout:
sel_data = select_trending_data(self._data, itm)
if sel_data is None:
continue
- df = pd.concat([df, sel_data], ignore_index=True)
+ df = pd.concat([df, sel_data], ignore_index=True, copy=False)
return dcc.send_data_frame(df.to_csv, C.TREND_DOWNLOAD_FILE_NAME)
diff --git a/csit.infra.dash/app/cdash/trending/layout.yaml b/csit.infra.dash/app/cdash/trending/layout.yaml
index 0eada51fe3..bc11dde61f 100644
--- a/csit.infra.dash/app/cdash/trending/layout.yaml
+++ b/csit.infra.dash/app/cdash/trending/layout.yaml
@@ -115,3 +115,48 @@ plot-hdrh-latency:
autosize: True
paper_bgcolor: "white"
plot_bgcolor: "white"
+
+plot-trending-telemetry:
+ autosize: True
+ showlegend: True
+ yaxis:
+ showticklabels: True
+ tickformat: ".3s"
+ title: "Metric"
+ hoverformat: ".5s"
+ gridcolor: "rgb(238, 238, 238)"
+ linecolor: "rgb(238, 238, 238)"
+ showline: True
+ zeroline: False
+ tickcolor: "rgb(238, 238, 238)"
+ linewidth: 1
+ showgrid: True
+ xaxis:
+ title: 'Date [MMDD]'
+ type: "date"
+ autorange: True
+ fixedrange: False
+ showgrid: True
+ gridcolor: "rgb(238, 238, 238)"
+ showline: True
+ linecolor: "rgb(238, 238, 238)"
+ zeroline: False
+ linewidth: 1
+ showticklabels: True
+ tickcolor: "rgb(238, 238, 238)"
+ tickmode: "auto"
+ tickformat: "%m%d"
+ margin:
+ r: 20
+ b: 0
+ t: 5
+ l: 70
+ paper_bgcolor: "#fff"
+ plot_bgcolor: "#fff"
+ hoverlabel:
+ namelength: 50
+ legend:
+ orientation: "h"
+ y: -0.2
+ font:
+ size: 12
diff --git a/csit.infra.dash/app/cdash/utils/constants.py b/csit.infra.dash/app/cdash/utils/constants.py
index 135f06f4d4..95acc07c47 100644
--- a/csit.infra.dash/app/cdash/utils/constants.py
+++ b/csit.infra.dash/app/cdash/utils/constants.py
@@ -63,7 +63,7 @@ class Constants:
# Maximal value of TIME_PERIOD for data read from the parquets in days.
# Do not change without a good reason.
- MAX_TIME_PERIOD = 180
+ MAX_TIME_PERIOD = 150 # 180
# It defines the time period for data read from the parquets in days from
# now back to the past.
@@ -79,6 +79,9 @@ class Constants:
############################################################################
# General, application wide, layout affecting constants.
+ # Add a time delay (in ms) to the spinner being shown
+ SPINNER_DELAY = 500
+
# If True, clear all inputs in control panel when button "ADD SELECTED" is
# pressed.
CLEAR_ALL_INPUTS = False
diff --git a/csit.infra.dash/app/cdash/utils/control_panel.py b/csit.infra.dash/app/cdash/utils/control_panel.py
index 723f404313..a81495e30c 100644
--- a/csit.infra.dash/app/cdash/utils/control_panel.py
+++ b/csit.infra.dash/app/cdash/utils/control_panel.py
@@ -15,7 +15,7 @@
"""
from copy import deepcopy
-
+from typing import Any
class ControlPanel:
"""A class representing the control panel.
@@ -74,7 +74,7 @@ class ControlPanel:
else:
raise KeyError(f"The key {key} is not defined.")
- def get(self, key: str) -> any:
+ def get(self, key: str) -> Any:
"""Returns the value of a key from the Control panel.
:param key: The key which value should be returned.
diff --git a/csit.infra.dash/app/cdash/utils/telemetry_data.py b/csit.infra.dash/app/cdash/utils/telemetry_data.py
new file mode 100644
index 0000000000..e88b8eed06
--- /dev/null
+++ b/csit.infra.dash/app/cdash/utils/telemetry_data.py
@@ -0,0 +1,330 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A module implementing the parsing of OpenMetrics data and elementary
+operations with it.
+"""
+
+
+import pandas as pd
+
+from ..trending.graphs import select_trending_data
+
+
+class TelemetryData:
+ """A class to store and manipulate the telemetry data.
+ """
+
+ def __init__(self, tests: list=list()) -> None:
+ """Initialize the object.
+
+ :param in_data: Input data.
+ :param tests: List of selected tests.
+ :type in_data: pandas.DataFrame
+ :type tests: list
+ """
+
+ self._tests = tests
+ self._data = None
+ self._unique_metrics = list()
+ self._unique_metrics_labels = pd.DataFrame()
+ self._selected_metrics_labels = pd.DataFrame()
+
+ def from_dataframe(self, in_data: pd.DataFrame=pd.DataFrame()) -> None:
+ """Read the input from pandas DataFrame.
+
+ This method must be call at the begining to create all data structures.
+ """
+
+ if in_data.empty:
+ return
+
+ df = pd.DataFrame()
+ metrics = set() # A set of unique metrics
+
+ # Create a dataframe with metrics for selected tests:
+ for itm in self._tests:
+ sel_data = select_trending_data(in_data, itm)
+ if sel_data is not None:
+ sel_data["test_name"] = itm["id"]
+ df = pd.concat([df, sel_data], ignore_index=True, copy=False)
+ # Use only neccessary data:
+ df = df[[
+ "job",
+ "build",
+ "dut_type",
+ "dut_version",
+ "start_time",
+ "passed",
+ "test_name",
+ "test_type",
+ "result_receive_rate_rate_avg",
+ "result_receive_rate_rate_stdev",
+ "result_receive_rate_rate_unit",
+ "result_pdr_lower_rate_value",
+ "result_pdr_lower_rate_unit",
+ "result_ndr_lower_rate_value",
+ "result_ndr_lower_rate_unit",
+ "telemetry"
+ ]]
+ # Transform metrics from strings to dataframes:
+ lst_telemetry = list()
+ for _, row in df.iterrows():
+ d_telemetry = {
+ "metric": list(),
+ "labels": list(), # list of tuple(label, value)
+ "value": list(),
+ "timestamp": list()
+ }
+ if row["telemetry"] is not None and \
+ not isinstance(row["telemetry"], float):
+ for itm in row["telemetry"]:
+ itm_lst = itm.replace("'", "").rsplit(" ", maxsplit=2)
+ metric, labels = itm_lst[0].split("{")
+ d_telemetry["metric"].append(metric)
+ d_telemetry["labels"].append(
+ [tuple(x.split("=")) for x in labels[:-1].split(",")]
+ )
+ d_telemetry["value"].append(itm_lst[1])
+ d_telemetry["timestamp"].append(itm_lst[2])
+ metrics.update(d_telemetry["metric"])
+ lst_telemetry.append(pd.DataFrame(data=d_telemetry))
+ df["telemetry"] = lst_telemetry
+
+ self._data = df
+ self._unique_metrics = sorted(metrics)
+
+ def from_json(self, in_data: dict) -> None:
+ """Read the input data from json.
+ """
+
+ df = pd.read_json(in_data)
+ lst_telemetry = list()
+ metrics = set() # A set of unique metrics
+ for _, row in df.iterrows():
+ telemetry = pd.DataFrame(row["telemetry"])
+ lst_telemetry.append(telemetry)
+ metrics.update(telemetry["metric"].to_list())
+ df["telemetry"] = lst_telemetry
+
+ self._data = df
+ self._unique_metrics = sorted(metrics)
+
+ def from_metrics(self, in_data: set) -> None:
+ """Read only the metrics.
+ """
+ self._unique_metrics = in_data
+
+ def from_metrics_with_labels(self, in_data: dict) -> None:
+ """Read only metrics with labels.
+ """
+ self._unique_metrics_labels = pd.DataFrame.from_dict(in_data)
+
+ def to_json(self) -> str:
+ """Return the data transformed from dataframe to json.
+
+ :returns: Telemetry data transformed to a json structure.
+ :rtype: dict
+ """
+ return self._data.to_json()
+
+ @property
+ def unique_metrics(self) -> list:
+ """Return a set of unique metrics.
+
+ :returns: A set of unique metrics.
+ :rtype: set
+ """
+ return self._unique_metrics
+
+ @property
+ def unique_metrics_with_labels(self) -> dict:
+ """
+ """
+ return self._unique_metrics_labels.to_dict()
+
+ def get_selected_labels(self, metrics: list) -> dict:
+ """Return a dictionary with labels (keys) and all their possible values
+ (values) for all selected 'metrics'.
+
+ :param metrics: List of metrics we are interested in.
+ :type metrics: list
+ :returns: A dictionary with labels and all their possible values.
+ :rtype: dict
+ """
+
+ df_labels = pd.DataFrame()
+ tmp_labels = dict()
+ for _, row in self._data.iterrows():
+ telemetry = row["telemetry"]
+ for itm in metrics:
+ df = telemetry.loc[(telemetry["metric"] == itm)]
+ df_labels = pd.concat(
+ [df_labels, df],
+ ignore_index=True,
+ copy=False
+ )
+ for _, tm in df.iterrows():
+ for label in tm["labels"]:
+ if label[0] not in tmp_labels:
+ tmp_labels[label[0]] = set()
+ tmp_labels[label[0]].add(label[1])
+
+ selected_labels = dict()
+ for key in sorted(tmp_labels):
+ selected_labels[key] = sorted(tmp_labels[key])
+
+ self._unique_metrics_labels = df_labels[["metric", "labels"]].\
+ loc[df_labels[["metric", "labels"]].astype(str).\
+ drop_duplicates().index]
+
+ return selected_labels
+
+ @property
+ def str_metrics(self) -> str:
+ """Returns all unique metrics as a string.
+ """
+ return TelemetryData.metrics_to_str(self._unique_metrics_labels)
+
+ @staticmethod
+ def metrics_to_str(in_data: pd.DataFrame) -> str:
+ """Convert metrics from pandas dataframe to string. Metrics in string
+ are separated by '\n'.
+
+ :param in_data: Metrics to be converted to a string.
+ :type in_data: pandas.DataFrame
+ :returns: Metrics as a string.
+ :rtype: str
+ """
+ metrics = str()
+ for _, row in in_data.iterrows():
+ labels = ','.join([f"{itm[0]}='{itm[1]}'" for itm in row["labels"]])
+ metrics += f"{row['metric']}{{{labels}}}\n"
+ return metrics[:-1]
+
+ def search_unique_metrics(self, string: str) -> list:
+ """Return a list of metrics which name includes the given string.
+
+ :param string: A string which must be in the name of metric.
+ :type string: str
+ :returns: A list of metrics which name includes the given string.
+ :rtype: list
+ """
+ return [itm for itm in self._unique_metrics if string in itm]
+
+ def filter_selected_metrics_by_labels(
+ self,
+ selection: dict
+ ) -> pd.DataFrame:
+ """Filter selected unique metrics by labels and their values.
+
+ :param selection: Labels and their values specified by the user.
+ :type selection: dict
+ :returns: Pandas dataframe with filtered metrics.
+ :rtype: pandas.DataFrame
+ """
+
+ def _is_selected(labels: list, sel: dict) -> bool:
+ """Check if the provided 'labels' are selected by the user.
+
+ :param labels: List of labels and their values from a metric. The
+ items in this lists are two-item-lists whre the first item is
+ the label and the second one is its value.
+ :param sel: User selection. The keys are the selected lables and the
+ values are lists with label values.
+ :type labels: list
+ :type sel: dict
+ :returns: True if the 'labels' are selected by the user.
+ :rtype: bool
+ """
+ passed = list()
+ labels = dict(labels)
+ for key in sel.keys():
+ if key in list(labels.keys()):
+ if sel[key]:
+ passed.append(labels[key] in sel[key])
+ else:
+ passed.append(True)
+ else:
+ passed.append(False)
+ return bool(passed and all(passed))
+
+ self._selected_metrics_labels = pd.DataFrame()
+ for _, row in self._unique_metrics_labels.iterrows():
+ if _is_selected(row["labels"], selection):
+ self._selected_metrics_labels = pd.concat(
+ [self._selected_metrics_labels, row.to_frame().T],
+ ignore_index=True,
+ axis=0,
+ copy=False
+ )
+ return self._selected_metrics_labels
+
+ def select_tm_trending_data(self, selection: dict) -> pd.DataFrame:
+ """Select telemetry data for trending based on user's 'selection'.
+
+ The output dataframe includes these columns:
+ - "job",
+ - "build",
+ - "dut_type",
+ - "dut_version",
+ - "start_time",
+ - "passed",
+ - "test_name",
+ - "test_id",
+ - "test_type",
+ - "result_receive_rate_rate_avg",
+ - "result_receive_rate_rate_stdev",
+ - "result_receive_rate_rate_unit",
+ - "result_pdr_lower_rate_value",
+ - "result_pdr_lower_rate_unit",
+ - "result_ndr_lower_rate_value",
+ - "result_ndr_lower_rate_unit",
+ - "tm_metric",
+ - "tm_value".
+
+ :param selection: User's selection (metrics and labels).
+ :type selection: dict
+ :returns: Dataframe with selected data.
+ :rtype: pandas.DataFrame
+ """
+
+ df = pd.DataFrame()
+
+ if self._data is None:
+ return df
+ if self._data.empty:
+ return df
+ if not selection:
+ return df
+
+ df_sel = pd.DataFrame.from_dict(selection)
+ for _, row in self._data.iterrows():
+ tm_row = row["telemetry"]
+ for _, tm_sel in df_sel.iterrows():
+ df_tmp = tm_row.loc[tm_row["metric"] == tm_sel["metric"]]
+ for _, tm in df_tmp.iterrows():
+ if tm["labels"] == tm_sel["labels"]:
+ labels = ','.join(
+ [f"{itm[0]}='{itm[1]}'" for itm in tm["labels"]]
+ )
+ row["tm_metric"] = f"{tm['metric']}{{{labels}}}"
+ row["tm_value"] = tm["value"]
+ new_row = row.drop(labels=["telemetry", ])
+ df = pd.concat(
+ [df, new_row.to_frame().T],
+ ignore_index=True,
+ axis=0,
+ copy=False
+ )
+ return df
diff --git a/csit.infra.dash/app/cdash/utils/trigger.py b/csit.infra.dash/app/cdash/utils/trigger.py
index 60ef9a3f91..ac303b6b0b 100644
--- a/csit.infra.dash/app/cdash/utils/trigger.py
+++ b/csit.infra.dash/app/cdash/utils/trigger.py
@@ -14,6 +14,8 @@
"""A module implementing the processing of a trigger.
"""
+from typing import Any
+
from json import loads, JSONDecodeError
@@ -51,7 +53,7 @@ class Trigger:
return self._id["type"]
@property
- def idx(self) -> any:
+ def idx(self) -> Any:
return self._id["index"]
@property
@@ -59,5 +61,5 @@ class Trigger:
return self._param
@property
- def value(self) -> any:
+ def value(self) -> Any:
return self._val
diff --git a/csit.infra.dash/app/cdash/utils/url_processing.py b/csit.infra.dash/app/cdash/utils/url_processing.py
index 7f0121ef34..c90c54c41f 100644
--- a/csit.infra.dash/app/cdash/utils/url_processing.py
+++ b/csit.infra.dash/app/cdash/utils/url_processing.py
@@ -69,7 +69,7 @@ def url_decode(url: str) -> dict:
parsed_url = urlparse(url)
except ValueError as err:
logging.warning(f"\nThe url {url} is not valid, ignoring.\n{repr(err)}")
- return None
+ return dict()
if parsed_url.fragment:
try:
@@ -85,7 +85,7 @@ def url_decode(url: str) -> dict:
f"\nEncoded parameters: '{parsed_url.fragment}'"
f"\n{repr(err)}"
)
- return None
+ return dict()
else:
params = None
diff --git a/csit.infra.dash/app/cdash/utils/utils.py b/csit.infra.dash/app/cdash/utils/utils.py
index 8584dee067..63e13ce141 100644
--- a/csit.infra.dash/app/cdash/utils/utils.py
+++ b/csit.infra.dash/app/cdash/utils/utils.py
@@ -346,29 +346,34 @@ def set_job_params(df: pd.DataFrame, job: str) -> dict:
}
-def get_list_group_items(tests: list) -> list:
- """Generate list of ListGroupItems with checkboxes with selected tests.
-
- :param tests: List of tests to be displayed in the ListGroup.
- :type tests: list
- :returns: List of ListGroupItems with checkboxes with selected tests.
+def get_list_group_items(items: list, type: str, colorize: bool=True) -> list:
+ """Generate list of ListGroupItems with checkboxes with selected items.
+
+ :param items: List of items to be displayed in the ListGroup.
+ :param type: The type part of an element ID.
+ :param colorize: If True, the color of labels is set, otherwise the default
+ color is used.
+ :type items: list
+ :type type: str
+ :type colorize: bool
+ :returns: List of ListGroupItems with checkboxes with selected items.
:rtype: list
"""
return [
dbc.ListGroupItem(
children=[
dbc.Checkbox(
- id={"type": "sel-cl", "index": i},
- label=l["id"],
+ id={"type": type, "index": i},
+ label=l["id"] if isinstance(l, dict) else l,
value=False,
label_class_name="m-0 p-0",
label_style={
"font-size": ".875em",
- "color": get_color(i)
+ "color": get_color(i) if colorize else "#55595c"
},
class_name="info"
)
],
class_name="p-0"
- ) for i, l in enumerate(tests)
+ ) for i, l in enumerate(items)
]