aboutsummaryrefslogtreecommitdiffstats
path: root/resources
diff options
context:
space:
mode:
authorTibor Frank <tifrank@cisco.com>2022-05-17 14:30:37 +0200
committerTibor Frank <tifrank@cisco.com>2022-05-19 11:58:43 +0000
commit3343fe81729eb4005319ca15b1e6881630d38c5b (patch)
treefd172c3ba1146d5ddffb3ced53e4bed0e7ad2a1c /resources
parent099b961a0aa234f870ff60e36492e324bb2abe11 (diff)
feat(uti): Add iterative data
Change-Id: Iaa7253b377f019235289f6bbf48eafd850a2dfc8 Signed-off-by: Tibor Frank <tifrank@cisco.com>
Diffstat (limited to 'resources')
-rw-r--r--resources/tools/dash/app/pal/__init__.py7
-rw-r--r--resources/tools/dash/app/pal/data/data.py36
-rw-r--r--resources/tools/dash/app/pal/data/data.yaml88
-rw-r--r--resources/tools/dash/app/pal/data/tooltips.yaml4
-rw-r--r--resources/tools/dash/app/pal/report/data.py268
-rw-r--r--resources/tools/dash/app/pal/report/graphs.py224
-rw-r--r--resources/tools/dash/app/pal/report/layout.py1485
-rw-r--r--resources/tools/dash/app/pal/report/layout.yaml150
-rw-r--r--resources/tools/dash/app/pal/report/report.py89
-rw-r--r--resources/tools/dash/app/pal/stats/layout.py3
-rw-r--r--resources/tools/dash/app/pal/stats/stats.py1
-rw-r--r--resources/tools/dash/app/pal/templates/report_layout.jinja217
-rw-r--r--resources/tools/dash/app/pal/trending/layout.py84
-rw-r--r--resources/tools/dash/app/pal/trending/trending.py1
14 files changed, 1969 insertions, 488 deletions
diff --git a/resources/tools/dash/app/pal/__init__.py b/resources/tools/dash/app/pal/__init__.py
index 4e32598147..bb110a254d 100644
--- a/resources/tools/dash/app/pal/__init__.py
+++ b/resources/tools/dash/app/pal/__init__.py
@@ -30,6 +30,10 @@ MAX_TIME_PERIOD = 180
# TIME_PERIOD = MAX_TIME_PERIOD is the default value
TIME_PERIOD = MAX_TIME_PERIOD # [days]
+# List of releases used for iterative data processing.
+# The releases MUST be in the order from the current (newest) to the last
+# (oldest).
+RELEASES=["rls2202", ]
def init_app():
"""Construct core Flask application with embedded Dash app.
@@ -65,6 +69,9 @@ def init_app():
from .trending.trending import init_trending
app = init_trending(app, time_period=time_period)
+ from .report.report import init_report
+ app = init_report(app, releases=RELEASES)
+
return app
diff --git a/resources/tools/dash/app/pal/data/data.py b/resources/tools/dash/app/pal/data/data.py
index 3d9b8b1664..efe2a2d1b6 100644
--- a/resources/tools/dash/app/pal/data/data.py
+++ b/resources/tools/dash/app/pal/data/data.py
@@ -13,14 +13,16 @@
"""Prepare data for Plotly Dash."""
-from datetime import datetime, timedelta
import logging
+
+from yaml import load, FullLoader, YAMLError
+from datetime import datetime, timedelta
from time import time
+from pytz import UTC
+from pandas import DataFrame
import awswrangler as wr
-from pytz import UTC
-from yaml import load, FullLoader, YAMLError
from awswrangler.exceptions import EmptyDataFrame, NoFilesFound
@@ -28,7 +30,7 @@ class Data:
"""
"""
- def __init__(self, data_spec_file, debug=False):
+ def __init__(self, data_spec_file: str, debug: bool=False) -> None:
"""
"""
@@ -61,7 +63,7 @@ class Data:
def data(self):
return self._data
- def _get_columns(self, parquet):
+ def _get_columns(self, parquet: str) -> list:
try:
return self._data_spec[parquet]["columns"]
except KeyError as err:
@@ -71,7 +73,7 @@ class Data:
f"specified.\n{err}"
)
- def _get_path(self, parquet):
+ def _get_path(self, parquet: str) -> str:
try:
return self._data_spec[parquet]["path"]
except KeyError as err:
@@ -84,7 +86,7 @@ class Data:
def _create_dataframe_from_parquet(self,
path, partition_filter=None, columns=None,
validate_schema=False, last_modified_begin=None,
- last_modified_end=None, days=None):
+ last_modified_end=None, days=None) -> DataFrame:
"""Read parquet stored in S3 compatible storage and returns Pandas
Dataframe.
@@ -148,7 +150,7 @@ class Data:
self._data = df
return df
- def read_stats(self, days=None):
+ def read_stats(self, days: int=None) -> tuple:
"""Read Suite Result Analysis data partition from parquet.
"""
l_stats = lambda part: True if part["stats_type"] == "sra" else False
@@ -176,7 +178,7 @@ class Data:
)
)
- def read_trending_mrr(self, days=None):
+ def read_trending_mrr(self, days: int=None) -> DataFrame:
"""Read MRR data partition from parquet.
"""
lambda_f = lambda part: True if part["test_type"] == "mrr" else False
@@ -188,7 +190,7 @@ class Data:
days=days
)
- def read_trending_ndrpdr(self, days=None):
+ def read_trending_ndrpdr(self, days: int=None) -> DataFrame:
"""Read NDRPDR data partition from iterative parquet.
"""
lambda_f = lambda part: True if part["test_type"] == "ndrpdr" else False
@@ -200,26 +202,24 @@ class Data:
days=days
)
- def read_iterative_mrr(self, days=None):
+ def read_iterative_mrr(self, release: str) -> DataFrame:
"""Read MRR data partition from iterative parquet.
"""
lambda_f = lambda part: True if part["test_type"] == "mrr" else False
return self._create_dataframe_from_parquet(
- path=self._get_path("iterative-mrr"),
+ path=self._get_path("iterative-mrr").format(release=release),
partition_filter=lambda_f,
- columns=self._get_columns("iterative-mrr"),
- days=days
+ columns=self._get_columns("iterative-mrr")
)
- def read_iterative_ndrpdr(self, days=None):
+ def read_iterative_ndrpdr(self, release: str) -> DataFrame:
"""Read NDRPDR data partition from parquet.
"""
lambda_f = lambda part: True if part["test_type"] == "ndrpdr" else False
return self._create_dataframe_from_parquet(
- path=self._get_path("iterative-ndrpdr"),
+ path=self._get_path("iterative-ndrpdr").format(release=release),
partition_filter=lambda_f,
- columns=self._get_columns("iterative-ndrpdr"),
- days=days
+ columns=self._get_columns("iterative-ndrpdr")
)
diff --git a/resources/tools/dash/app/pal/data/data.yaml b/resources/tools/dash/app/pal/data/data.yaml
index 92cd659f48..15fad711ba 100644
--- a/resources/tools/dash/app/pal/data/data.yaml
+++ b/resources/tools/dash/app/pal/data/data.yaml
@@ -104,7 +104,7 @@ trending-ndrpdr:
# - result_latency_forward_pdr_0_min
# - result_latency_forward_pdr_0_unit
iterative-mrr:
- path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/iterative_rls2202
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/iterative_{release}
columns:
- job
- build
@@ -122,7 +122,7 @@ iterative-mrr:
- result_receive_rate_rate_unit
- result_receive_rate_rate_values
iterative-ndrpdr:
- path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/iterative_rls2202
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/iterative_{release}
columns:
- job
- build
@@ -135,62 +135,62 @@ iterative-ndrpdr:
- test_name_long
- test_name_short
- version
- - result_pdr_upper_rate_unit
- - result_pdr_upper_rate_value
- - result_pdr_upper_bandwidth_unit
- - result_pdr_upper_bandwidth_value
+ # - result_pdr_upper_rate_unit
+ # - result_pdr_upper_rate_value
+ # - result_pdr_upper_bandwidth_unit
+ # - result_pdr_upper_bandwidth_value
- result_pdr_lower_rate_unit
- result_pdr_lower_rate_value
- - result_pdr_lower_bandwidth_unit
- - result_pdr_lower_bandwidth_value
- - result_ndr_upper_rate_unit
- - result_ndr_upper_rate_value
- - result_ndr_upper_bandwidth_unit
- - result_ndr_upper_bandwidth_value
+ # - result_pdr_lower_bandwidth_unit
+ # - result_pdr_lower_bandwidth_value
+ # - result_ndr_upper_rate_unit
+ # - result_ndr_upper_rate_value
+ # - result_ndr_upper_bandwidth_unit
+ # - result_ndr_upper_bandwidth_value
- result_ndr_lower_rate_unit
- result_ndr_lower_rate_value
- - result_ndr_lower_bandwidth_unit
- - result_ndr_lower_bandwidth_value
- - result_latency_reverse_pdr_90_avg
+ # - result_ndr_lower_bandwidth_unit
+ # - result_ndr_lower_bandwidth_value
+ # - result_latency_reverse_pdr_90_avg
- result_latency_reverse_pdr_90_hdrh
- - result_latency_reverse_pdr_90_max
- - result_latency_reverse_pdr_90_min
- - result_latency_reverse_pdr_90_unit
- - result_latency_reverse_pdr_50_avg
+ # - result_latency_reverse_pdr_90_max
+ # - result_latency_reverse_pdr_90_min
+ # - result_latency_reverse_pdr_90_unit
+ # - result_latency_reverse_pdr_50_avg
- result_latency_reverse_pdr_50_hdrh
- - result_latency_reverse_pdr_50_max
- - result_latency_reverse_pdr_50_min
- - result_latency_reverse_pdr_50_unit
- - result_latency_reverse_pdr_10_avg
+ # - result_latency_reverse_pdr_50_max
+ # - result_latency_reverse_pdr_50_min
+ # - result_latency_reverse_pdr_50_unit
+ # - result_latency_reverse_pdr_10_avg
- result_latency_reverse_pdr_10_hdrh
- - result_latency_reverse_pdr_10_max
- - result_latency_reverse_pdr_10_min
- - result_latency_reverse_pdr_10_unit
- - result_latency_reverse_pdr_0_avg
+ # - result_latency_reverse_pdr_10_max
+ # - result_latency_reverse_pdr_10_min
+ # - result_latency_reverse_pdr_10_unit
+ # - result_latency_reverse_pdr_0_avg
- result_latency_reverse_pdr_0_hdrh
- - result_latency_reverse_pdr_0_max
- - result_latency_reverse_pdr_0_min
- - result_latency_reverse_pdr_0_unit
- - result_latency_forward_pdr_90_avg
+ # - result_latency_reverse_pdr_0_max
+ # - result_latency_reverse_pdr_0_min
+ # - result_latency_reverse_pdr_0_unit
+ # - result_latency_forward_pdr_90_avg
- result_latency_forward_pdr_90_hdrh
- - result_latency_forward_pdr_90_max
- - result_latency_forward_pdr_90_min
- - result_latency_forward_pdr_90_unit
+ # - result_latency_forward_pdr_90_max
+ # - result_latency_forward_pdr_90_min
+ # - result_latency_forward_pdr_90_unit
- result_latency_forward_pdr_50_avg
- result_latency_forward_pdr_50_hdrh
- - result_latency_forward_pdr_50_max
- - result_latency_forward_pdr_50_min
+ # - result_latency_forward_pdr_50_max
+ # - result_latency_forward_pdr_50_min
- result_latency_forward_pdr_50_unit
- - result_latency_forward_pdr_10_avg
+ # - result_latency_forward_pdr_10_avg
- result_latency_forward_pdr_10_hdrh
- - result_latency_forward_pdr_10_max
- - result_latency_forward_pdr_10_min
- - result_latency_forward_pdr_10_unit
- - result_latency_forward_pdr_0_avg
+ # - result_latency_forward_pdr_10_max
+ # - result_latency_forward_pdr_10_min
+ # - result_latency_forward_pdr_10_unit
+ # - result_latency_forward_pdr_0_avg
- result_latency_forward_pdr_0_hdrh
- - result_latency_forward_pdr_0_max
- - result_latency_forward_pdr_0_min
- - result_latency_forward_pdr_0_unit
+ # - result_latency_forward_pdr_0_max
+ # - result_latency_forward_pdr_0_min
+ # - result_latency_forward_pdr_0_unit
# coverage-ndrpdr:
# path: str
# columns:
diff --git a/resources/tools/dash/app/pal/data/tooltips.yaml b/resources/tools/dash/app/pal/data/tooltips.yaml
index e5259b4d7b..5a830e4b68 100644
--- a/resources/tools/dash/app/pal/data/tooltips.yaml
+++ b/resources/tools/dash/app/pal/data/tooltips.yaml
@@ -10,6 +10,8 @@ help-dut:
Device Under Test (DUT) - In software networking, “device” denotes a specific
piece of software tasked with packet processing. Such device is surrounded
with other software components (such as operating system kernel).
+help-dut-ver:
+ The version of the Device under Test.
help-framesize:
Frame size - size of an Ethernet Layer-2 frame on the wire, including any VLAN
tags (dot1q, dot1ad) and Ethernet FCS, but excluding Ethernet preamble and
@@ -17,6 +19,8 @@ help-framesize:
help-infra:
Infrastructure is defined by the toplology (number of nodes), processor
architecture, NIC and driver.
+help-release:
+ The CSIT release.
help-tbed:
The test bed is defined by toplology (number of nodes) and processor
architecture.
diff --git a/resources/tools/dash/app/pal/report/data.py b/resources/tools/dash/app/pal/report/data.py
deleted file mode 100644
index 848259be4a..0000000000
--- a/resources/tools/dash/app/pal/report/data.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Prepare data for Plotly Dash."""
-
-from logging import info
-from time import time
-
-import awswrangler as wr
-from awswrangler.exceptions import EmptyDataFrame, NoFilesFound
-from boto3 import session
-
-
-S3_DOCS_BUCKET="fdio-docs-s3-cloudfront-index"
-
-def create_dataframe_from_parquet(
- path, partition_filter=None, columns=None,
- validate_schema=False, last_modified_begin=None,
- last_modified_end=None):
- """Read parquet stored in S3 compatible storage and returns Pandas
- Dataframe.
-
- :param path: S3 prefix (accepts Unix shell-style wildcards) (e.g.
- s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0,
- s3://bucket/key1]).
- :param partition_filter: Callback Function filters to apply on PARTITION
- columns (PUSH-DOWN filter). This function MUST receive a single argument
- (Dict[str, str]) where keys are partitions names and values are
- partitions values. Partitions values will be always strings extracted
- from S3. This function MUST return a bool, True to read the partition or
- False to ignore it. Ignored if dataset=False.
- :param columns: Names of columns to read from the file(s).
- :param validate_schema: Check that individual file schemas are all the
- same / compatible. Schemas within a folder prefix should all be the
- same. Disable if you have schemas that are different and want to disable
- this check.
- :param last_modified_begin: Filter the s3 files by the Last modified date of
- the object. The filter is applied only after list all s3 files.
- :param last_modified_end: Filter the s3 files by the Last modified date of
- the object. The filter is applied only after list all s3 files.
- :type path: Union[str, List[str]]
- :type partition_filter: Callable[[Dict[str, str]], bool], optional
- :type columns: List[str], optional
- :type validate_schema: bool, optional
- :type last_modified_begin: datetime, optional
- :type last_modified_end: datetime, optional
- :returns: Pandas DataFrame or None if DataFrame cannot be fetched.
- :rtype: DataFrame
- """
- df = None
- start = time()
- try:
- df = wr.s3.read_parquet(
- path=path,
- path_suffix="parquet",
- ignore_empty=True,
- validate_schema=validate_schema,
- use_threads=True,
- dataset=True,
- columns=columns,
- partition_filter=partition_filter,
- last_modified_begin=last_modified_begin,
- last_modified_end=last_modified_end
- )
- info(f"Create dataframe {path} took: {time() - start}")
- info(df)
- info(df.info(memory_usage="deep"))
- except NoFilesFound:
- return df
-
- return df
-
-
-def read_stats():
- """Read Suite Result Analysis data partition from parquet.
- """
- lambda_f = lambda part: True if part["stats_type"] == "sra" else False
-
- return create_dataframe_from_parquet(
- path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/stats",
- partition_filter=lambda_f
- )
-
-def read_trending_mrr():
- """Read MRR data partition from parquet.
- """
- lambda_f = lambda part: True if part["test_type"] == "mrr" else False
-
- return create_dataframe_from_parquet(
- path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/trending",
- partition_filter=lambda_f,
- columns=["job", "build", "dut_type", "dut_version", "hosts",
- "start_time", "passed", "test_id", "test_name_long",
- "test_name_short", "version",
- "result_receive_rate_rate_avg",
- "result_receive_rate_rate_stdev",
- "result_receive_rate_rate_unit",
- "result_receive_rate_rate_values"
- ]
- )
-
-def read_iterative_mrr():
- """Read MRR data partition from iterative parquet.
- """
- lambda_f = lambda part: True if part["test_type"] == "mrr" else False
-
- return create_dataframe_from_parquet(
- path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/iterative_rls2202",
- partition_filter=lambda_f,
- columns=["job", "build", "dut_type", "dut_version", "hosts",
- "start_time", "passed", "test_id", "test_name_long",
- "test_name_short", "version",
- "result_receive_rate_rate_avg",
- "result_receive_rate_rate_stdev",
- "result_receive_rate_rate_unit",
- "result_receive_rate_rate_values"
- ]
- )
-
-def read_trending_ndrpdr():
- """Read NDRPDR data partition from iterative parquet.
- """
- lambda_f = lambda part: True if part["test_type"] == "ndrpdr" else False
-
- return create_dataframe_from_parquet(
- path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/trending",
- partition_filter=lambda_f,
- columns=["job", "build", "dut_type", "dut_version", "hosts",
- "start_time", "passed", "test_id", "test_name_long",
- "test_name_short", "version",
- "result_pdr_upper_rate_unit",
- "result_pdr_upper_rate_value",
- "result_pdr_upper_bandwidth_unit",
- "result_pdr_upper_bandwidth_value",
- "result_pdr_lower_rate_unit",
- "result_pdr_lower_rate_value",
- "result_pdr_lower_bandwidth_unit",
- "result_pdr_lower_bandwidth_value",
- "result_ndr_upper_rate_unit",
- "result_ndr_upper_rate_value",
- "result_ndr_upper_bandwidth_unit",
- "result_ndr_upper_bandwidth_value",
- "result_ndr_lower_rate_unit",
- "result_ndr_lower_rate_value",
- "result_ndr_lower_bandwidth_unit",
- "result_ndr_lower_bandwidth_value",
- "result_latency_reverse_pdr_90_avg",
- "result_latency_reverse_pdr_90_hdrh",
- "result_latency_reverse_pdr_90_max",
- "result_latency_reverse_pdr_90_min",
- "result_latency_reverse_pdr_90_unit",
- "result_latency_reverse_pdr_50_avg",
- "result_latency_reverse_pdr_50_hdrh",
- "result_latency_reverse_pdr_50_max",
- "result_latency_reverse_pdr_50_min",
- "result_latency_reverse_pdr_50_unit",
- "result_latency_reverse_pdr_10_avg",
- "result_latency_reverse_pdr_10_hdrh",
- "result_latency_reverse_pdr_10_max",
- "result_latency_reverse_pdr_10_min",
- "result_latency_reverse_pdr_10_unit",
- "result_latency_reverse_pdr_0_avg",
- "result_latency_reverse_pdr_0_hdrh",
- "result_latency_reverse_pdr_0_max",
- "result_latency_reverse_pdr_0_min",
- "result_latency_reverse_pdr_0_unit",
- "result_latency_forward_pdr_90_avg",
- "result_latency_forward_pdr_90_hdrh",
- "result_latency_forward_pdr_90_max",
- "result_latency_forward_pdr_90_min",
- "result_latency_forward_pdr_90_unit",
- "result_latency_forward_pdr_50_avg",
- "result_latency_forward_pdr_50_hdrh",
- "result_latency_forward_pdr_50_max",
- "result_latency_forward_pdr_50_min",
- "result_latency_forward_pdr_50_unit",
- "result_latency_forward_pdr_10_avg",
- "result_latency_forward_pdr_10_hdrh",
- "result_latency_forward_pdr_10_max",
- "result_latency_forward_pdr_10_min",
- "result_latency_forward_pdr_10_unit",
- "result_latency_forward_pdr_0_avg",
- "result_latency_forward_pdr_0_hdrh",
- "result_latency_forward_pdr_0_max",
- "result_latency_forward_pdr_0_min",
- "result_latency_forward_pdr_0_unit"
- ]
- )
-
-def read_iterative_ndrpdr():
- """Read NDRPDR data partition from parquet.
- """
- lambda_f = lambda part: True if part["test_type"] == "ndrpdr" else False
-
- return create_dataframe_from_parquet(
- path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/iterative_rls2202",
- partition_filter=lambda_f,
- columns=["job", "build", "dut_type", "dut_version", "hosts",
- "start_time", "passed", "test_id", "test_name_long",
- "test_name_short", "version",
- "result_pdr_upper_rate_unit",
- "result_pdr_upper_rate_value",
- "result_pdr_upper_bandwidth_unit",
- "result_pdr_upper_bandwidth_value",
- "result_pdr_lower_rate_unit",
- "result_pdr_lower_rate_value",
- "result_pdr_lower_bandwidth_unit",
- "result_pdr_lower_bandwidth_value",
- "result_ndr_upper_rate_unit",
- "result_ndr_upper_rate_value",
- "result_ndr_upper_bandwidth_unit",
- "result_ndr_upper_bandwidth_value",
- "result_ndr_lower_rate_unit",
- "result_ndr_lower_rate_value",
- "result_ndr_lower_bandwidth_unit",
- "result_ndr_lower_bandwidth_value",
- "result_latency_reverse_pdr_90_avg",
- "result_latency_reverse_pdr_90_hdrh",
- "result_latency_reverse_pdr_90_max",
- "result_latency_reverse_pdr_90_min",
- "result_latency_reverse_pdr_90_unit",
- "result_latency_reverse_pdr_50_avg",
- "result_latency_reverse_pdr_50_hdrh",
- "result_latency_reverse_pdr_50_max",
- "result_latency_reverse_pdr_50_min",
- "result_latency_reverse_pdr_50_unit",
- "result_latency_reverse_pdr_10_avg",
- "result_latency_reverse_pdr_10_hdrh",
- "result_latency_reverse_pdr_10_max",
- "result_latency_reverse_pdr_10_min",
- "result_latency_reverse_pdr_10_unit",
- "result_latency_reverse_pdr_0_avg",
- "result_latency_reverse_pdr_0_hdrh",
- "result_latency_reverse_pdr_0_max",
- "result_latency_reverse_pdr_0_min",
- "result_latency_reverse_pdr_0_unit",
- "result_latency_forward_pdr_90_avg",
- "result_latency_forward_pdr_90_hdrh",
- "result_latency_forward_pdr_90_max",
- "result_latency_forward_pdr_90_min",
- "result_latency_forward_pdr_90_unit",
- "result_latency_forward_pdr_50_avg",
- "result_latency_forward_pdr_50_hdrh",
- "result_latency_forward_pdr_50_max",
- "result_latency_forward_pdr_50_min",
- "result_latency_forward_pdr_50_unit",
- "result_latency_forward_pdr_10_avg",
- "result_latency_forward_pdr_10_hdrh",
- "result_latency_forward_pdr_10_max",
- "result_latency_forward_pdr_10_min",
- "result_latency_forward_pdr_10_unit",
- "result_latency_forward_pdr_0_avg",
- "result_latency_forward_pdr_0_hdrh",
- "result_latency_forward_pdr_0_max",
- "result_latency_forward_pdr_0_min",
- "result_latency_forward_pdr_0_unit"
- ]
- )
diff --git a/resources/tools/dash/app/pal/report/graphs.py b/resources/tools/dash/app/pal/report/graphs.py
new file mode 100644
index 0000000000..751eb34006
--- /dev/null
+++ b/resources/tools/dash/app/pal/report/graphs.py
@@ -0,0 +1,224 @@
+# Copyright (c) 2022 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+"""
+
+import plotly.graph_objects as go
+import pandas as pd
+
+import hdrh.histogram
+import hdrh.codec
+
+
+_COLORS = (
+ u"#1A1110", u"#DA2647", u"#214FC6", u"#01786F", u"#BD8260", u"#FFD12A",
+ u"#A6E7FF", u"#738276", u"#C95A49", u"#FC5A8D", u"#CEC8EF", u"#391285",
+ u"#6F2DA8", u"#FF878D", u"#45A27D", u"#FFD0B9", u"#FD5240", u"#DB91EF",
+ u"#44D7A8", u"#4F86F7", u"#84DE02", u"#FFCFF1", u"#614051"
+)
+_VALUE = {
+ "mrr": "result_receive_rate_rate_avg",
+ "ndr": "result_ndr_lower_rate_value",
+ "pdr": "result_pdr_lower_rate_value",
+ "pdr-lat": "result_latency_forward_pdr_50_avg"
+}
+_UNIT = {
+ "mrr": "result_receive_rate_rate_unit",
+ "ndr": "result_ndr_lower_rate_unit",
+ "pdr": "result_pdr_lower_rate_unit",
+ "pdr-lat": "result_latency_forward_pdr_50_unit"
+}
+_LAT_HDRH = ( # Do not change the order
+ "result_latency_forward_pdr_0_hdrh",
+ "result_latency_reverse_pdr_0_hdrh",
+ "result_latency_forward_pdr_10_hdrh",
+ "result_latency_reverse_pdr_10_hdrh",
+ "result_latency_forward_pdr_50_hdrh",
+ "result_latency_reverse_pdr_50_hdrh",
+ "result_latency_forward_pdr_90_hdrh",
+ "result_latency_reverse_pdr_90_hdrh",
+)
+# This value depends on latency stream rate (9001 pps) and duration (5s).
+# Keep it slightly higher to ensure rounding errors to not remove tick mark.
+PERCENTILE_MAX = 99.999501
+
+_GRAPH_LAT_HDRH_DESC = {
+ u"result_latency_forward_pdr_0_hdrh": u"No-load.",
+ u"result_latency_reverse_pdr_0_hdrh": u"No-load.",
+ u"result_latency_forward_pdr_10_hdrh": u"Low-load, 10% PDR.",
+ u"result_latency_reverse_pdr_10_hdrh": u"Low-load, 10% PDR.",
+ u"result_latency_forward_pdr_50_hdrh": u"Mid-load, 50% PDR.",
+ u"result_latency_reverse_pdr_50_hdrh": u"Mid-load, 50% PDR.",
+ u"result_latency_forward_pdr_90_hdrh": u"High-load, 90% PDR.",
+ u"result_latency_reverse_pdr_90_hdrh": u"High-load, 90% PDR."
+}
+
+
+def select_iterative_data(data: pd.DataFrame, itm:dict) -> pd.DataFrame:
+ """
+ """
+
+ phy = itm["phy"].split("-")
+ if len(phy) == 4:
+ topo, arch, nic, drv = phy
+ if drv == "dpdk":
+ drv = ""
+ else:
+ drv += "-"
+ drv = drv.replace("_", "-")
+ else:
+ return None
+
+ core = str() if itm["dut"] == "trex" else f"{itm['core']}"
+ ttype = "ndrpdr" if itm["testtype"] in ("ndr", "pdr") else itm["testtype"]
+ dut = "none" if itm["dut"] == "trex" else itm["dut"].upper()
+
+ df = data.loc[(
+ (data["dut_type"] == dut) &
+ (data["test_type"] == ttype) &
+ (data["passed"] == True)
+ )]
+ df = df[df.job.str.endswith(f"{topo}-{arch}")]
+ df = df[df.test_id.str.contains(
+ f"^.*[.|-]{nic}.*{itm['framesize']}-{core}-{drv}{itm['test']}-{ttype}$",
+ regex=True
+ )].sort_values(by="start_time", ignore_index=True)
+
+ return df
+
+
+def graph_iterative(data: pd.DataFrame, sel:dict, layout: dict) -> tuple:
+ """
+ """
+
+ fig_tput = go.Figure()
+ fig_tsa = go.Figure()
+
+ return fig_tput, fig_tsa
+
+
+def table_comparison(data: pd.DataFrame, sel:dict) -> pd.DataFrame:
+ """
+ """
+ table = pd.DataFrame(
+ {
+ "Test Case": [
+ "64b-2t1c-avf-eth-l2xcbase-eth-2memif-1dcr",
+ "64b-2t1c-avf-eth-l2xcbase-eth-2vhostvr1024-1vm-vppl2xc",
+ "64b-2t1c-avf-ethip4udp-ip4base-iacl50sl-10kflows",
+ "78b-2t1c-avf-ethip6-ip6scale2m-rnd "],
+ "2106.0-8": [
+ "14.45 +- 0.08",
+ "9.63 +- 0.05",
+ "9.7 +- 0.02",
+ "8.95 +- 0.06"],
+ "2110.0-8": [
+ "14.45 +- 0.08",
+ "9.63 +- 0.05",
+ "9.7 +- 0.02",
+ "8.95 +- 0.06"],
+ "2110.0-9": [
+ "14.45 +- 0.08",
+ "9.63 +- 0.05",
+ "9.7 +- 0.02",
+ "8.95 +- 0.06"],
+ "2202.0-9": [
+ "14.45 +- 0.08",
+ "9.63 +- 0.05",
+ "9.7 +- 0.02",
+ "8.95 +- 0.06"],
+ "2110.0-9 vs 2110.0-8": [
+ "-0.23 +- 0.62",
+ "-1.37 +- 1.3",
+ "+0.08 +- 0.2",
+ "-2.16 +- 0.83"],
+ "2202.0-9 vs 2110.0-9": [
+ "+6.95 +- 0.72",
+ "+5.35 +- 1.26",
+ "+4.48 +- 1.48",
+ "+4.09 +- 0.95"]
+ }
+)
+
+ return table
+
+
+def graph_hdrh_latency(data: dict, layout: dict) -> go.Figure:
+ """
+ """
+
+ fig = None
+
+ traces = list()
+ for idx, (lat_name, lat_hdrh) in enumerate(data.items()):
+ try:
+ decoded = hdrh.histogram.HdrHistogram.decode(lat_hdrh)
+ except (hdrh.codec.HdrLengthException, TypeError) as err:
+ continue
+ previous_x = 0.0
+ prev_perc = 0.0
+ xaxis = list()
+ yaxis = list()
+ hovertext = list()
+ for item in decoded.get_recorded_iterator():
+ # The real value is "percentile".
+ # For 100%, we cut that down to "x_perc" to avoid
+ # infinity.
+ percentile = item.percentile_level_iterated_to
+ x_perc = min(percentile, PERCENTILE_MAX)
+ xaxis.append(previous_x)
+ yaxis.append(item.value_iterated_to)
+ hovertext.append(
+ f"<b>{_GRAPH_LAT_HDRH_DESC[lat_name]}</b><br>"
+ f"Direction: {(u'W-E', u'E-W')[idx % 2]}<br>"
+ f"Percentile: {prev_perc:.5f}-{percentile:.5f}%<br>"
+ f"Latency: {item.value_iterated_to}uSec"
+ )
+ next_x = 100.0 / (100.0 - x_perc)
+ xaxis.append(next_x)
+ yaxis.append(item.value_iterated_to)
+ hovertext.append(
+ f"<b>{_GRAPH_LAT_HDRH_DESC[lat_name]}</b><br>"
+ f"Direction: {(u'W-E', u'E-W')[idx % 2]}<br>"
+ f"Percentile: {prev_perc:.5f}-{percentile:.5f}%<br>"
+ f"Latency: {item.value_iterated_to}uSec"
+ )
+ previous_x = next_x
+ prev_perc = percentile
+
+ traces.append(
+ go.Scatter(
+ x=xaxis,
+ y=yaxis,
+ name=_GRAPH_LAT_HDRH_DESC[lat_name],
+ mode=u"lines",
+ legendgroup=_GRAPH_LAT_HDRH_DESC[lat_name],
+ showlegend=bool(idx % 2),
+ line=dict(
+ color=_COLORS[int(idx/2)],
+ dash=u"solid",
+ width=1 if idx % 2 else 2
+ ),
+ hovertext=hovertext,
+ hoverinfo=u"text"
+ )
+ )
+ if traces:
+ fig = go.Figure()
+ fig.add_traces(traces)
+ layout_hdrh = layout.get("plot-hdrh-latency", None)
+ if lat_hdrh:
+ fig.update_layout(layout_hdrh)
+
+ return fig
diff --git a/resources/tools/dash/app/pal/report/layout.py b/resources/tools/dash/app/pal/report/layout.py
index 70fe727efc..26b9a9f4b5 100644
--- a/resources/tools/dash/app/pal/report/layout.py
+++ b/resources/tools/dash/app/pal/report/layout.py
@@ -11,36 +11,1457 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Plotly Dash HTML layout override."""
-
-html_layout = u"""
-<!DOCTYPE html>
- <html>
- <head>
- {%metas%}
- <title>{%title%}</title>
- {%favicon%}
- {%css%}
- </head>
- <body class="dash-template">
- <header>
- <div class="nav-wrapper">
- <a href="/">
- <h1>FD.io CSIT</h1>
- </a>
- <a href="">
- <h1>Report</h1>
- </a>
- <nav>
- </nav>
- </div>
- </header>
- {%app_entry%}
- <footer>
- {%config%}
- {%scripts%}
- {%renderer%}
- </footer>
- </body>
- </html>
+"""Plotly Dash HTML layout override.
"""
+
+import logging
+import pandas as pd
+import dash_bootstrap_components as dbc
+
+from flask import Flask
+from dash import dcc
+from dash import html
+from dash import callback_context, no_update, ALL
+from dash import Input, Output, State
+from dash.exceptions import PreventUpdate
+from yaml import load, FullLoader, YAMLError
+from copy import deepcopy
+from json import loads, JSONDecodeError
+from ast import literal_eval
+
+from pprint import pformat
+
+from ..data.data import Data
+from ..data.url_processing import url_decode, url_encode
+from .graphs import graph_iterative, table_comparison
+
+
+class Layout:
+ """
+ """
+
+ # If True, clear all inputs in control panel when button "ADD SELECTED" is
+ # pressed.
+ CLEAR_ALL_INPUTS = False
+
+ STYLE_DISABLED = {"display": "none"}
+ STYLE_ENABLED = {"display": "inherit"}
+
+ CL_ALL_DISABLED = [{
+ "label": "All",
+ "value": "all",
+ "disabled": True
+ }]
+ CL_ALL_ENABLED = [{
+ "label": "All",
+ "value": "all",
+ "disabled": False
+ }]
+
+ PLACEHOLDER = html.Nobr("")
+
+ DRIVERS = ("avf", "af-xdp", "rdma", "dpdk")
+
+ LABELS = {
+ "dpdk": "DPDK",
+ "container_memif": "LXC/DRC Container Memif",
+ "crypto": "IPSec IPv4 Routing",
+ "ip4": "IPv4 Routing",
+ "ip6": "IPv6 Routing",
+ "ip4_tunnels": "IPv4 Tunnels",
+ "l2": "L2 Ethernet Switching",
+ "srv6": "SRv6 Routing",
+ "vm_vhost": "VMs vhost-user",
+ "nfv_density-dcr_memif-chain_ipsec": "CNF Service Chains Routing IPSec",
+ "nfv_density-vm_vhost-chain_dot1qip4vxlan":"VNF Service Chains Tunnels",
+ "nfv_density-vm_vhost-chain": "VNF Service Chains Routing",
+ "nfv_density-dcr_memif-pipeline": "CNF Service Pipelines Routing",
+ "nfv_density-dcr_memif-chain": "CNF Service Chains Routing",
+ }
+
+ URL_STYLE = {
+ "background-color": "#d2ebf5",
+ "border-color": "#bce1f1",
+ "color": "#135d7c"
+ }
+
+ def __init__(self, app: Flask, releases: list, html_layout_file: str,
+ graph_layout_file: str, data_spec_file: str, tooltip_file: str) -> None:
+ """
+ """
+
+ # Inputs
+ self._app = app
+ self.releases = releases
+ self._html_layout_file = html_layout_file
+ self._graph_layout_file = graph_layout_file
+ self._data_spec_file = data_spec_file
+ self._tooltip_file = tooltip_file
+
+ # Read the data:
+ self._data = pd.DataFrame()
+ for rls in releases:
+ data_mrr = Data(self._data_spec_file, True).\
+ read_iterative_mrr(release=rls)
+ data_mrr["release"] = rls
+ data_ndrpdr = Data(self._data_spec_file, True).\
+ read_iterative_ndrpdr(release=rls)
+ data_ndrpdr["release"] = rls
+ self._data = pd.concat(
+ [self._data, data_mrr, data_ndrpdr], ignore_index=True)
+
+ # Get structure of tests:
+ tbs = dict()
+ cols = ["job", "test_id", "test_type", "dut_version", "release"]
+ for _, row in self._data[cols].drop_duplicates().iterrows():
+ rls = row["release"]
+ ttype = row["test_type"]
+ d_ver = row["dut_version"]
+ lst_job = row["job"].split("-")
+ dut = lst_job[1]
+ tbed = "-".join(lst_job[-2:])
+ lst_test_id = row["test_id"].split(".")
+ if dut == "dpdk":
+ area = "dpdk"
+ else:
+ area = "-".join(lst_test_id[3:-2])
+ suite = lst_test_id[-2].replace("2n1l-", "").replace("1n1l-", "").\
+ replace("2n-", "")
+ test = lst_test_id[-1]
+ nic = suite.split("-")[0]
+ for drv in self.DRIVERS:
+ if drv in test:
+ if drv == "af-xdp":
+ driver = "af_xdp"
+ else:
+ driver = drv
+ test = test.replace(f"{drv}-", "")
+ break
+ else:
+ driver = "dpdk"
+ infra = "-".join((tbed, nic, driver))
+ lst_test = test.split("-")
+ framesize = lst_test[0]
+ core = lst_test[1] if lst_test[1] else "8C"
+ test = "-".join(lst_test[2: -1])
+
+ if tbs.get(rls, None) is None:
+ tbs[rls] = dict()
+ if tbs[rls].get(dut, None) is None:
+ tbs[rls][dut] = dict()
+ if tbs[rls][dut].get(d_ver, None) is None:
+ tbs[rls][dut][d_ver] = dict()
+ if tbs[rls][dut][d_ver].get(infra, None) is None:
+ tbs[rls][dut][d_ver][infra] = dict()
+ if tbs[rls][dut][d_ver][infra].get(area, None) is None:
+ tbs[rls][dut][d_ver][infra][area] = dict()
+ if tbs[rls][dut][d_ver][infra][area].get(test, None) is None:
+ tbs[rls][dut][d_ver][infra][area][test] = dict()
+ tbs_test = tbs[rls][dut][d_ver][infra][area][test]
+ tbs_test["core"] = list()
+ tbs_test["frame-size"] = list()
+ tbs_test["test-type"] = list()
+ if core.upper() not in tbs_test["core"]:
+ tbs_test["core"].append(core.upper())
+ if framesize.upper() not in tbs_test["frame-size"]:
+ tbs_test["frame-size"].append(framesize.upper())
+ if ttype == "mrr":
+ if "MRR" not in tbs_test["test-type"]:
+ tbs_test["test-type"].append("MRR")
+ elif ttype == "ndrpdr":
+ if "NDR" not in tbs_test["test-type"]:
+ tbs_test["test-type"].extend(("NDR", "PDR", ))
+ self._spec_tbs = tbs
+
+ # Read from files:
+ self._html_layout = ""
+ self._graph_layout = None
+ self._tooltips = dict()
+
+ try:
+ with open(self._html_layout_file, "r") as file_read:
+ self._html_layout = file_read.read()
+ except IOError as err:
+ raise RuntimeError(
+ f"Not possible to open the file {self._html_layout_file}\n{err}"
+ )
+
+ try:
+ with open(self._graph_layout_file, "r") as file_read:
+ self._graph_layout = load(file_read, Loader=FullLoader)
+ except IOError as err:
+ raise RuntimeError(
+ f"Not possible to open the file {self._graph_layout_file}\n"
+ f"{err}"
+ )
+ except YAMLError as err:
+ raise RuntimeError(
+ f"An error occurred while parsing the specification file "
+ f"{self._graph_layout_file}\n{err}"
+ )
+
+ try:
+ with open(self._tooltip_file, "r") as file_read:
+ self._tooltips = load(file_read, Loader=FullLoader)
+ except IOError as err:
+ logging.warning(
+ f"Not possible to open the file {self._tooltip_file}\n{err}"
+ )
+ except YAMLError as err:
+ logging.warning(
+ f"An error occurred while parsing the specification file "
+ f"{self._tooltip_file}\n{err}"
+ )
+
+ # Callbacks:
+ if self._app is not None and hasattr(self, 'callbacks'):
+ self.callbacks(self._app)
+
+ @property
+ def html_layout(self):
+ return self._html_layout
+
+ @property
+ def spec_tbs(self):
+ return self._spec_tbs
+
+ @property
+ def data(self):
+ return self._data
+
+ @property
+ def layout(self):
+ return self._graph_layout
+
+ def label(self, key: str) -> str:
+ return self.LABELS.get(key, key)
+
+ def _show_tooltip(self, id: str, title: str,
+ clipboard_id: str=None) -> list:
+ """
+ """
+ return [
+ dcc.Clipboard(target_id=clipboard_id, title="Copy URL") \
+ if clipboard_id else str(),
+ f"{title} ",
+ dbc.Badge(
+ id=id,
+ children="?",
+ pill=True,
+ color="white",
+ text_color="info",
+ class_name="border ms-1",
+ ),
+ dbc.Tooltip(
+ children=self._tooltips.get(id, str()),
+ target=id,
+ placement="auto"
+ )
+ ]
+
+ def add_content(self):
+ """
+ """
+ if self.html_layout and self.spec_tbs:
+ return html.Div(
+ id="div-main",
+ children=[
+ dbc.Row(
+ id="row-navbar",
+ class_name="g-0",
+ children=[
+ self._add_navbar(),
+ ]
+ ),
+ dcc.Loading(
+ dbc.Offcanvas(
+ class_name="w-50",
+ id="offcanvas-metadata",
+ title="Throughput And Latency",
+ placement="end",
+ is_open=False,
+ children=[
+ dbc.Row(id="metadata-tput-lat"),
+ dbc.Row(id="metadata-hdrh-graph"),
+ ]
+ )
+ ),
+ dbc.Row(
+ id="row-main",
+ class_name="g-0",
+ children=[
+ dcc.Store(id="selected-tests"),
+ dcc.Store(id="control-panel"),
+ dcc.Location(id="url", refresh=False),
+ self._add_ctrl_col(),
+ self._add_plotting_col(),
+ ]
+ )
+ ]
+ )
+ else:
+ return html.Div(
+ id="div-main-error",
+ children=[
+ dbc.Alert(
+ [
+ "An Error Occured",
+ ],
+ color="danger",
+ ),
+ ]
+ )
+
+ def _add_navbar(self):
+ """Add nav element with navigation panel. It is placed on the top.
+ """
+ return dbc.NavbarSimple(
+ id="navbarsimple-main",
+ children=[
+ dbc.NavItem(
+ dbc.NavLink(
+ "Iterative Test Runs",
+ disabled=True,
+ external_link=True,
+ href="#"
+ )
+ )
+ ],
+ brand="Dashboard",
+ brand_href="/",
+ brand_external_link=True,
+ class_name="p-2",
+ fluid=True,
+ )
+
+ def _add_ctrl_col(self) -> dbc.Col:
+ """Add column with controls. It is placed on the left side.
+ """
+ return dbc.Col(
+ id="col-controls",
+ children=[
+ self._add_ctrl_panel(),
+ ],
+ )
+
+ def _add_plotting_col(self) -> dbc.Col:
+ """Add column with plots and tables. It is placed on the right side.
+ """
+ return dbc.Col(
+ id="col-plotting-area",
+ children=[
+ dcc.Loading(
+ children=[
+ dbc.Row( # Graphs
+ class_name="g-0 p-2",
+ children=[
+ dbc.Col(
+ dbc.Row( # Throughput
+ id="row-graph-tput",
+ class_name="g-0 p-2",
+ children=[
+ self.PLACEHOLDER
+ ]
+ ),
+ width=6
+ ),
+ dbc.Col(
+ dbc.Row( # TSA
+ id="row-graph-tsa",
+ class_name="g-0 p-2",
+ children=[
+ self.PLACEHOLDER
+ ]
+ ),
+ width=6
+ )
+ ]
+ ),
+ dbc.Row( # Tables
+ id="row-table",
+ class_name="g-0 p-2",
+ children=[
+ self.PLACEHOLDER
+ ]
+ ),
+ dbc.Row( # Download
+ id="row-btn-download",
+ class_name="g-0 p-2",
+ children=[
+ self.PLACEHOLDER
+ ]
+ )
+ ]
+ )
+ ],
+ width=9
+ )
+
+ def _add_ctrl_panel(self) -> dbc.Row:
+ """
+ """
+ return dbc.Row(
+ id="row-ctrl-panel",
+ class_name="g-0 p-2",
+ children=[
+ dbc.Row(
+ class_name="g-0",
+ children=[
+ dbc.InputGroup(
+ [
+ dbc.InputGroupText(
+ children=self._show_tooltip(
+ "help-release", "Release")
+ ),
+ dbc.Select(
+ id="dd-ctrl-rls",
+ placeholder=("Select a Release..."),
+ options=sorted(
+ [
+ {"label": k, "value": k} \
+ for k in self.spec_tbs.keys()
+ ],
+ key=lambda d: d["label"]
+ )
+ )
+ ],
+ class_name="mb-3",
+ size="sm",
+ ),
+ ]
+ ),
+ dbc.Row(
+ class_name="g-0",
+ children=[
+ dbc.InputGroup(
+ [
+ dbc.InputGroupText(
+ children=self._show_tooltip(
+ "help-dut", "DUT")
+ ),
+ dbc.Select(
+ id="dd-ctrl-dut",
+ placeholder=(
+ "Select a Device under Test..."
+ )
+ )
+ ],
+ class_name="mb-3",
+ size="sm",
+ ),
+ ]
+ ),
+ dbc.Row(
+ class_name="g-0",
+ children=[
+ dbc.InputGroup(
+ [
+ dbc.InputGroupText(
+ children=self._show_tooltip(
+ "help-dut-ver", "DUT Version")
+ ),
+ dbc.Select(
+ id="dd-ctrl-dutver",
+ placeholder=(
+ "Select a Version of "
+ "Device under Test..."
+ )
+ )
+ ],
+ class_name="mb-3",
+ size="sm",
+ ),
+ ]
+ ),
+ dbc.Row(
+ class_name="g-0",
+ children=[
+ dbc.InputGroup(
+ [
+ dbc.InputGroupText(
+ children=self._show_tooltip(
+ "help-infra", "Infra")
+ ),
+ dbc.Select(
+ id="dd-ctrl-phy",
+ placeholder=(
+ "Select a Physical Test Bed "
+ "Topology..."
+ )
+ )
+ ],
+ class_name="mb-3",
+ size="sm",
+ ),
+ ]
+ ),
+ dbc.Row(
+ class_name="g-0",
+ children=[
+ dbc.InputGroup(
+ [
+ dbc.InputGroupText(
+ children=self._show_tooltip(
+ "help-area", "Area")
+ ),
+ dbc.Select(
+ id="dd-ctrl-area",
+ placeholder="Select an Area...",
+ disabled=True,
+ ),
+ ],
+ class_name="mb-3",
+ size="sm",
+ ),
+ ]
+ ),
+ dbc.Row(
+ class_name="g-0",
+ children=[
+ dbc.InputGroup(
+ [
+ dbc.InputGroupText(
+ children=self._show_tooltip(
+ "help-test", "Test")
+ ),
+ dbc.Select(
+ id="dd-ctrl-test",
+ placeholder="Select a Test...",
+ disabled=True,
+ ),
+ ],
+ class_name="mb-3",
+ size="sm",
+ ),
+ ]
+ ),
+ dbc.Row(
+ id="row-ctrl-framesize",
+ class_name="gy-1",
+ children=[
+ dbc.Label(
+ children=self._show_tooltip(
+ "help-framesize", "Frame Size"),
+ class_name="p-0"
+ ),
+ dbc.Col(
+ children=[
+ dbc.Checklist(
+ id="cl-ctrl-framesize-all",
+ options=self.CL_ALL_DISABLED,
+ inline=True,
+ switch=False
+ ),
+ ],
+ width=3
+ ),
+ dbc.Col(
+ children=[
+ dbc.Checklist(
+ id="cl-ctrl-framesize",
+ inline=True,
+ switch=False
+ )
+ ]
+ )
+ ]
+ ),
+ dbc.Row(
+ id="row-ctrl-core",
+ class_name="gy-1",
+ children=[
+ dbc.Label(
+ children=self._show_tooltip(
+ "help-cores", "Number of Cores"),
+ class_name="p-0"
+ ),
+ dbc.Col(
+ children=[
+ dbc.Checklist(
+ id="cl-ctrl-core-all",
+ options=self.CL_ALL_DISABLED,
+ inline=False,
+ switch=False
+ )
+ ],
+ width=3
+ ),
+ dbc.Col(
+ children=[
+ dbc.Checklist(
+ id="cl-ctrl-core",
+ inline=True,
+ switch=False
+ )
+ ]
+ )
+ ]
+ ),
+ dbc.Row(
+ id="row-ctrl-testtype",
+ class_name="gy-1",
+ children=[
+ dbc.Label(
+ children=self._show_tooltip(
+ "help-ttype", "Test Type"),
+ class_name="p-0"
+ ),
+ dbc.Col(
+ children=[
+ dbc.Checklist(
+ id="cl-ctrl-testtype-all",
+ options=self.CL_ALL_DISABLED,
+ inline=True,
+ switch=False
+ ),
+ ],
+ width=3
+ ),
+ dbc.Col(
+ children=[
+ dbc.Checklist(
+ id="cl-ctrl-testtype",
+ inline=True,
+ switch=False
+ )
+ ]
+ )
+ ]
+ ),
+ dbc.Row(
+ class_name="gy-1 p-0",
+ children=[
+ dbc.ButtonGroup(
+ [
+ dbc.Button(
+ id="btn-ctrl-add",
+ children="Add Selected",
+ class_name="me-1",
+ color="info"
+ )
+ ],
+ size="md",
+ )
+ ]
+ ),
+ dbc.Row(
+ id="row-card-sel-tests",
+ class_name="gy-1",
+ style=self.STYLE_DISABLED,
+ children=[
+ dbc.Label(
+ "Selected tests",
+ class_name="p-0"
+ ),
+ dbc.Checklist(
+ class_name="overflow-auto",
+ id="cl-selected",
+ options=[],
+ inline=False,
+ style={"max-height": "12em"},
+ )
+ ],
+ ),
+ dbc.Row(
+ id="row-btns-sel-tests",
+ style=self.STYLE_DISABLED,
+ children=[
+ dbc.ButtonGroup(
+ class_name="gy-2",
+ children=[
+ dbc.Button(
+ id="btn-sel-remove",
+ children="Remove Selected",
+ class_name="w-100 me-1",
+ color="info",
+ disabled=False
+ ),
+ dbc.Button(
+ id="btn-sel-remove-all",
+ children="Remove All",
+ class_name="w-100 me-1",
+ color="info",
+ disabled=False
+ ),
+ ],
+ size="md",
+ )
+ ]
+ ),
+ ]
+ )
+
+ class ControlPanel:
+ def __init__(self, panel: dict) -> None:
+
+ CL_ALL_DISABLED = [{
+ "label": "All",
+ "value": "all",
+ "disabled": True
+ }]
+
+ # Defines also the order of keys
+ self._defaults = {
+ "dd-rls-value": str(),
+ "dd-dut-options": list(),
+ "dd-dut-disabled": True,
+ "dd-dut-value": str(),
+ "dd-dutver-options": list(),
+ "dd-dutver-disabled": True,
+ "dd-dutver-value": str(),
+ "dd-phy-options": list(),
+ "dd-phy-disabled": True,
+ "dd-phy-value": str(),
+ "dd-area-options": list(),
+ "dd-area-disabled": True,
+ "dd-area-value": str(),
+ "dd-test-options": list(),
+ "dd-test-disabled": True,
+ "dd-test-value": str(),
+ "cl-core-options": list(),
+ "cl-core-value": list(),
+ "cl-core-all-value": list(),
+ "cl-core-all-options": CL_ALL_DISABLED,
+ "cl-framesize-options": list(),
+ "cl-framesize-value": list(),
+ "cl-framesize-all-value": list(),
+ "cl-framesize-all-options": CL_ALL_DISABLED,
+ "cl-testtype-options": list(),
+ "cl-testtype-value": list(),
+ "cl-testtype-all-value": list(),
+ "cl-testtype-all-options": CL_ALL_DISABLED,
+ "btn-add-disabled": True,
+ "cl-selected-options": list()
+ }
+
+ self._panel = deepcopy(self._defaults)
+ if panel:
+ for key in self._defaults:
+ self._panel[key] = panel[key]
+
+ @property
+ def defaults(self) -> dict:
+ return self._defaults
+
+ @property
+ def panel(self) -> dict:
+ return self._panel
+
+ def set(self, kwargs: dict) -> None:
+ for key, val in kwargs.items():
+ if key in self._panel:
+ self._panel[key] = val
+ else:
+ raise KeyError(f"The key {key} is not defined.")
+
+ def get(self, key: str) -> any:
+ return self._panel[key]
+
+ def values(self) -> tuple:
+ return tuple(self._panel.values())
+
+ @staticmethod
+ def _sync_checklists(opt: list, sel: list, all: list, id: str) -> tuple:
+ """
+ """
+ options = {v["value"] for v in opt}
+ if id =="all":
+ sel = list(options) if all else list()
+ else:
+ all = ["all", ] if set(sel) == options else list()
+ return sel, all
+
+ @staticmethod
+ def _list_tests(selection: dict) -> list:
+ """Display selected tests with checkboxes
+ """
+ if selection:
+ return [{"label": v["id"], "value": v["id"]} for v in selection]
+ else:
+ return list()
+
+ def callbacks(self, app):
+
+ def _generate_plotting_area(figs: tuple, table: pd.DataFrame,
+ url: str) -> tuple:
+ """
+ """
+
+ (fig_tput, fig_tsa) = figs
+
+ row_fig_tput = self.PLACEHOLDER
+ row_fig_tsa = self.PLACEHOLDER
+ row_table = self.PLACEHOLDER
+ row_btn_dwnld = self.PLACEHOLDER
+
+ if fig_tput:
+ row_fig_tput = [
+ dcc.Graph(
+ id={"type": "graph", "index": "tput"},
+ figure=fig_tput
+ )
+ ]
+ row_btn_dwnld = [
+ dbc.Col( # Download
+ width=2,
+ children=[
+ dcc.Loading(children=[
+ dbc.Button(
+ id="btn-download-data",
+ children=self._show_tooltip(
+ "help-download", "Download Data"),
+ class_name="me-1",
+ color="info"
+ ),
+ dcc.Download(id="download-data")
+ ]),
+ ]
+ ),
+ dbc.Col( # Show URL
+ width=10,
+ children=[
+ dbc.InputGroup(
+ class_name="me-1",
+ children=[
+ dbc.InputGroupText(
+ style=self.URL_STYLE,
+ children=self._show_tooltip(
+ "help-url", "URL", "input-url")
+ ),
+ dbc.Input(
+ id="input-url",
+ readonly=True,
+ type="url",
+ style=self.URL_STYLE,
+ value=url
+ )
+ ]
+ )
+ ]
+ )
+ ]
+ if fig_tsa:
+ row_fig_tsa = [
+ dcc.Graph(
+ id={"type": "graph", "index": "lat"},
+ figure=fig_tsa
+ )
+ ]
+ if not table.empty:
+ row_table = [
+ dbc.Table.from_dataframe(
+ table,
+ id={"type": "table", "index": "compare"},
+ striped=True,
+ bordered=True,
+ hover=True
+ )
+ ]
+
+ return row_fig_tput, row_fig_tsa, row_table, row_btn_dwnld
+
+ @app.callback(
+ Output("control-panel", "data"), # Store
+ Output("selected-tests", "data"), # Store
+ Output("row-graph-tput", "children"),
+ Output("row-graph-tsa", "children"),
+ Output("row-table", "children"),
+ Output("row-btn-download", "children"),
+ Output("row-card-sel-tests", "style"),
+ Output("row-btns-sel-tests", "style"),
+ Output("dd-ctrl-rls", "value"),
+ Output("dd-ctrl-dut", "options"),
+ Output("dd-ctrl-dut", "disabled"),
+ Output("dd-ctrl-dut", "value"),
+ Output("dd-ctrl-dutver", "options"),
+ Output("dd-ctrl-dutver", "disabled"),
+ Output("dd-ctrl-dutver", "value"),
+ Output("dd-ctrl-phy", "options"),
+ Output("dd-ctrl-phy", "disabled"),
+ Output("dd-ctrl-phy", "value"),
+ Output("dd-ctrl-area", "options"),
+ Output("dd-ctrl-area", "disabled"),
+ Output("dd-ctrl-area", "value"),
+ Output("dd-ctrl-test", "options"),
+ Output("dd-ctrl-test", "disabled"),
+ Output("dd-ctrl-test", "value"),
+ Output("cl-ctrl-core", "options"),
+ Output("cl-ctrl-core", "value"),
+ Output("cl-ctrl-core-all", "value"),
+ Output("cl-ctrl-core-all", "options"),
+ Output("cl-ctrl-framesize", "options"),
+ Output("cl-ctrl-framesize", "value"),
+ Output("cl-ctrl-framesize-all", "value"),
+ Output("cl-ctrl-framesize-all", "options"),
+ Output("cl-ctrl-testtype", "options"),
+ Output("cl-ctrl-testtype", "value"),
+ Output("cl-ctrl-testtype-all", "value"),
+ Output("cl-ctrl-testtype-all", "options"),
+ Output("btn-ctrl-add", "disabled"),
+ Output("cl-selected", "options"), # User selection
+ State("control-panel", "data"), # Store
+ State("selected-tests", "data"), # Store
+ State("cl-selected", "value"), # User selection
+ Input("dd-ctrl-rls", "value"),
+ Input("dd-ctrl-dut", "value"),
+ Input("dd-ctrl-dutver", "value"),
+ Input("dd-ctrl-phy", "value"),
+ Input("dd-ctrl-area", "value"),
+ Input("dd-ctrl-test", "value"),
+ Input("cl-ctrl-core", "value"),
+ Input("cl-ctrl-core-all", "value"),
+ Input("cl-ctrl-framesize", "value"),
+ Input("cl-ctrl-framesize-all", "value"),
+ Input("cl-ctrl-testtype", "value"),
+ Input("cl-ctrl-testtype-all", "value"),
+ Input("btn-ctrl-add", "n_clicks"),
+ Input("btn-sel-remove", "n_clicks"),
+ Input("btn-sel-remove-all", "n_clicks"),
+ Input("url", "href")
+ )
+ def _update_ctrl_panel(cp_data: dict, store_sel: list, list_sel: list,
+ dd_rls: str, dd_dut: str, dd_dutver: str, dd_phy: str, dd_area: str,
+ dd_test: str, cl_core: list, cl_core_all: list, cl_framesize: list,
+ cl_framesize_all: list, cl_testtype: list, cl_testtype_all: list,
+ btn_add: int, btn_remove: int, btn_remove_all: int,
+ href: str) -> tuple:
+ """
+ """
+
+ def _gen_new_url(parsed_url: dict, store_sel: list) -> str:
+
+ if parsed_url:
+ new_url = url_encode({
+ "scheme": parsed_url["scheme"],
+ "netloc": parsed_url["netloc"],
+ "path": parsed_url["path"],
+ "params": {
+ "store_sel": store_sel,
+ }
+ })
+ else:
+ new_url = str()
+ return new_url
+
+
+ ctrl_panel = self.ControlPanel(cp_data)
+
+ # Parse the url:
+ parsed_url = url_decode(href)
+
+ row_fig_tput = no_update
+ row_fig_tsa = no_update
+ row_table = no_update
+ row_btn_dwnld = no_update
+ row_card_sel_tests = no_update
+ row_btns_sel_tests = no_update
+
+ trigger_id = callback_context.triggered[0]["prop_id"].split(".")[0]
+
+ if trigger_id == "dd-ctrl-rls":
+ try:
+ rls = self.spec_tbs[dd_rls]
+ options = sorted(
+ [{"label": v, "value": v} for v in rls.keys()],
+ key=lambda d: d["label"]
+ )
+ disabled = False
+ except KeyError:
+ options = list()
+ disabled = True
+ ctrl_panel.set({
+ "dd-rls-value": dd_rls,
+ "dd-dut-value": str(),
+ "dd-dut-options": options,
+ "dd-dut-disabled": disabled,
+ "dd-dutver-value": str(),
+ "dd-dutver-options": list(),
+ "dd-dutver-disabled": True,
+ "dd-phy-value": str(),
+ "dd-phy-options": list(),
+ "dd-phy-disabled": True,
+ "dd-area-value": str(),
+ "dd-area-options": list(),
+ "dd-area-disabled": True,
+ "dd-test-value": str(),
+ "dd-test-options": list(),
+ "dd-test-disabled": True,
+ "cl-core-options": list(),
+ "cl-core-value": list(),
+ "cl-core-all-value": list(),
+ "cl-core-all-options": self.CL_ALL_DISABLED,
+ "cl-framesize-options": list(),
+ "cl-framesize-value": list(),
+ "cl-framesize-all-value": list(),
+ "cl-framesize-all-options": self.CL_ALL_DISABLED,
+ "cl-testtype-options": list(),
+ "cl-testtype-value": list(),
+ "cl-testtype-all-value": list(),
+ "cl-testtype-all-options": self.CL_ALL_DISABLED
+ })
+ if trigger_id == "dd-ctrl-dut":
+ try:
+ rls = ctrl_panel.get("dd-rls-value")
+ dut = self.spec_tbs[rls][dd_dut]
+ options = sorted(
+ [{"label": v, "value": v} for v in dut.keys()],
+ key=lambda d: d["label"]
+ )
+ disabled = False
+ except KeyError:
+ options = list()
+ disabled = True
+ ctrl_panel.set({
+ "dd-dut-value": dd_dut,
+ "dd-dutver-value": str(),
+ "dd-dutver-options": options,
+ "dd-dutver-disabled": disabled,
+ "dd-phy-value": str(),
+ "dd-phy-options": list(),
+ "dd-phy-disabled": True,
+ "dd-area-value": str(),
+ "dd-area-options": list(),
+ "dd-area-disabled": True,
+ "dd-test-value": str(),
+ "dd-test-options": list(),
+ "dd-test-disabled": True,
+ "cl-core-options": list(),
+ "cl-core-value": list(),
+ "cl-core-all-value": list(),
+ "cl-core-all-options": self.CL_ALL_DISABLED,
+ "cl-framesize-options": list(),
+ "cl-framesize-value": list(),
+ "cl-framesize-all-value": list(),
+ "cl-framesize-all-options": self.CL_ALL_DISABLED,
+ "cl-testtype-options": list(),
+ "cl-testtype-value": list(),
+ "cl-testtype-all-value": list(),
+ "cl-testtype-all-options": self.CL_ALL_DISABLED
+ })
+ elif trigger_id == "dd-ctrl-dutver":
+ try:
+ rls = ctrl_panel.get("dd-rls-value")
+ dut = ctrl_panel.get("dd-dut-value")
+ dutver = self.spec_tbs[rls][dut][dd_dutver]
+ options = sorted(
+ [{"label": v, "value": v} for v in dutver.keys()],
+ key=lambda d: d["label"]
+ )
+ disabled = False
+ except KeyError:
+ options = list()
+ disabled = True
+ ctrl_panel.set({
+ "dd-dutver-value": dd_dutver,
+ "dd-phy-value": str(),
+ "dd-phy-options": options,
+ "dd-phy-disabled": disabled,
+ "dd-area-value": str(),
+ "dd-area-options": list(),
+ "dd-area-disabled": True,
+ "dd-test-value": str(),
+ "dd-test-options": list(),
+ "dd-test-disabled": True,
+ "cl-core-options": list(),
+ "cl-core-value": list(),
+ "cl-core-all-value": list(),
+ "cl-core-all-options": self.CL_ALL_DISABLED,
+ "cl-framesize-options": list(),
+ "cl-framesize-value": list(),
+ "cl-framesize-all-value": list(),
+ "cl-framesize-all-options": self.CL_ALL_DISABLED,
+ "cl-testtype-options": list(),
+ "cl-testtype-value": list(),
+ "cl-testtype-all-value": list(),
+ "cl-testtype-all-options": self.CL_ALL_DISABLED
+ })
+ elif trigger_id == "dd-ctrl-phy":
+ try:
+ rls = ctrl_panel.get("dd-rls-value")
+ dut = ctrl_panel.get("dd-dut-value")
+ dutver = ctrl_panel.get("dd-dutver-value")
+ phy = self.spec_tbs[rls][dut][dutver][dd_phy]
+ options = sorted(
+ [{"label": self.label(v), "value": v}
+ for v in phy.keys()],
+ key=lambda d: d["label"]
+ )
+ disabled = False
+ except KeyError:
+ options = list()
+ disabled = True
+ ctrl_panel.set({
+ "dd-phy-value": dd_phy,
+ "dd-area-value": str(),
+ "dd-area-options": options,
+ "dd-area-disabled": disabled,
+ "dd-test-value": str(),
+ "dd-test-options": list(),
+ "dd-test-disabled": True,
+ "cl-core-options": list(),
+ "cl-core-value": list(),
+ "cl-core-all-value": list(),
+ "cl-core-all-options": self.CL_ALL_DISABLED,
+ "cl-framesize-options": list(),
+ "cl-framesize-value": list(),
+ "cl-framesize-all-value": list(),
+ "cl-framesize-all-options": self.CL_ALL_DISABLED,
+ "cl-testtype-options": list(),
+ "cl-testtype-value": list(),
+ "cl-testtype-all-value": list(),
+ "cl-testtype-all-options": self.CL_ALL_DISABLED
+ })
+ elif trigger_id == "dd-ctrl-area":
+ try:
+ rls = ctrl_panel.get("dd-rls-value")
+ dut = ctrl_panel.get("dd-dut-value")
+ dutver = ctrl_panel.get("dd-dutver-value")
+ phy = ctrl_panel.get("dd-phy-value")
+ area = self.spec_tbs[rls][dut][dutver][phy][dd_area]
+ options = sorted(
+ [{"label": v, "value": v} for v in area.keys()],
+ key=lambda d: d["label"]
+ )
+ disabled = False
+ except KeyError:
+ options = list()
+ disabled = True
+ ctrl_panel.set({
+ "dd-area-value": dd_area,
+ "dd-test-value": str(),
+ "dd-test-options": options,
+ "dd-test-disabled": disabled,
+ "cl-core-options": list(),
+ "cl-core-value": list(),
+ "cl-core-all-value": list(),
+ "cl-core-all-options": self.CL_ALL_DISABLED,
+ "cl-framesize-options": list(),
+ "cl-framesize-value": list(),
+ "cl-framesize-all-value": list(),
+ "cl-framesize-all-options": self.CL_ALL_DISABLED,
+ "cl-testtype-options": list(),
+ "cl-testtype-value": list(),
+ "cl-testtype-all-value": list(),
+ "cl-testtype-all-options": self.CL_ALL_DISABLED
+ })
+ elif trigger_id == "dd-ctrl-test":
+ rls = ctrl_panel.get("dd-rls-value")
+ dut = ctrl_panel.get("dd-dut-value")
+ dutver = ctrl_panel.get("dd-dutver-value")
+ phy = ctrl_panel.get("dd-phy-value")
+ area = ctrl_panel.get("dd-area-value")
+ test = self.spec_tbs[rls][dut][dutver][phy][area][dd_test]
+ if dut and phy and area and dd_test:
+ ctrl_panel.set({
+ "dd-test-value": dd_test,
+ "cl-core-options": [{"label": v, "value": v}
+ for v in sorted(test["core"])],
+ "cl-core-value": list(),
+ "cl-core-all-value": list(),
+ "cl-core-all-options": self.CL_ALL_ENABLED,
+ "cl-framesize-options": [{"label": v, "value": v}
+ for v in sorted(test["frame-size"])],
+ "cl-framesize-value": list(),
+ "cl-framesize-all-value": list(),
+ "cl-framesize-all-options": self.CL_ALL_ENABLED,
+ "cl-testtype-options": [{"label": v, "value": v}
+ for v in sorted(test["test-type"])],
+ "cl-testtype-value": list(),
+ "cl-testtype-all-value": list(),
+ "cl-testtype-all-options": self.CL_ALL_ENABLED,
+ })
+ elif trigger_id == "cl-ctrl-core":
+ val_sel, val_all = self._sync_checklists(
+ opt=ctrl_panel.get("cl-core-options"),
+ sel=cl_core,
+ all=list(),
+ id=""
+ )
+ ctrl_panel.set({
+ "cl-core-value": val_sel,
+ "cl-core-all-value": val_all,
+ })
+ elif trigger_id == "cl-ctrl-core-all":
+ val_sel, val_all = self._sync_checklists(
+ opt = ctrl_panel.get("cl-core-options"),
+ sel=list(),
+ all=cl_core_all,
+ id="all"
+ )
+ ctrl_panel.set({
+ "cl-core-value": val_sel,
+ "cl-core-all-value": val_all,
+ })
+ elif trigger_id == "cl-ctrl-framesize":
+ val_sel, val_all = self._sync_checklists(
+ opt = ctrl_panel.get("cl-framesize-options"),
+ sel=cl_framesize,
+ all=list(),
+ id=""
+ )
+ ctrl_panel.set({
+ "cl-framesize-value": val_sel,
+ "cl-framesize-all-value": val_all,
+ })
+ elif trigger_id == "cl-ctrl-framesize-all":
+ val_sel, val_all = self._sync_checklists(
+ opt = ctrl_panel.get("cl-framesize-options"),
+ sel=list(),
+ all=cl_framesize_all,
+ id="all"
+ )
+ ctrl_panel.set({
+ "cl-framesize-value": val_sel,
+ "cl-framesize-all-value": val_all,
+ })
+ elif trigger_id == "cl-ctrl-testtype":
+ val_sel, val_all = self._sync_checklists(
+ opt = ctrl_panel.get("cl-testtype-options"),
+ sel=cl_testtype,
+ all=list(),
+ id=""
+ )
+ ctrl_panel.set({
+ "cl-testtype-value": val_sel,
+ "cl-testtype-all-value": val_all,
+ })
+ elif trigger_id == "cl-ctrl-testtype-all":
+ val_sel, val_all = self._sync_checklists(
+ opt = ctrl_panel.get("cl-testtype-options"),
+ sel=list(),
+ all=cl_testtype_all,
+ id="all"
+ )
+ ctrl_panel.set({
+ "cl-testtype-value": val_sel,
+ "cl-testtype-all-value": val_all,
+ })
+ elif trigger_id == "btn-ctrl-add":
+ _ = btn_add
+ rls = ctrl_panel.get("dd-rls-value")
+ dut = ctrl_panel.get("dd-dut-value")
+ dutver = ctrl_panel.get("dd-dutver-value")
+ phy = ctrl_panel.get("dd-phy-value")
+ area = ctrl_panel.get("dd-area-value")
+ test = ctrl_panel.get("dd-test-value")
+ cores = ctrl_panel.get("cl-core-value")
+ framesizes = ctrl_panel.get("cl-framesize-value")
+ testtypes = ctrl_panel.get("cl-testtype-value")
+ # Add selected test to the list of tests in store:
+ if all((rls, dut, dutver, phy, area, test, cores, framesizes,
+ testtypes)):
+ if store_sel is None:
+ store_sel = list()
+ for core in cores:
+ for framesize in framesizes:
+ for ttype in testtypes:
+ if dut == "trex":
+ core = str()
+ tid = "-".join((rls, dut, dutver,
+ phy.replace('af_xdp', 'af-xdp'), area,
+ framesize.lower(), core.lower(), test,
+ ttype.lower()))
+ if tid not in [itm["id"] for itm in store_sel]:
+ store_sel.append({
+ "id": tid,
+ "rls": rls,
+ "dut": dut,
+ "dutver": dutver,
+ "phy": phy,
+ "area": area,
+ "test": test,
+ "framesize": framesize.lower(),
+ "core": core.lower(),
+ "testtype": ttype.lower()
+ })
+ store_sel = sorted(store_sel, key=lambda d: d["id"])
+ row_card_sel_tests = self.STYLE_ENABLED
+ row_btns_sel_tests = self.STYLE_ENABLED
+ if self.CLEAR_ALL_INPUTS:
+ ctrl_panel.set(ctrl_panel.defaults)
+ ctrl_panel.set({
+ "cl-selected-options": self._list_tests(store_sel)
+ })
+ row_fig_tput, row_fig_tsa, row_table, row_btn_dwnld = \
+ _generate_plotting_area(
+ graph_iterative(self.data, store_sel, self.layout),
+ table_comparison(self.data, store_sel),
+ _gen_new_url(parsed_url, store_sel)
+ )
+ elif trigger_id == "btn-sel-remove-all":
+ _ = btn_remove_all
+ row_fig_tput = self.PLACEHOLDER
+ row_fig_tsa = self.PLACEHOLDER
+ row_table = self.PLACEHOLDER
+ row_btn_dwnld = self.PLACEHOLDER
+ row_card_sel_tests = self.STYLE_DISABLED
+ row_btns_sel_tests = self.STYLE_DISABLED
+ store_sel = list()
+ ctrl_panel.set({"cl-selected-options": list()})
+ elif trigger_id == "btn-sel-remove":
+ _ = btn_remove
+ if list_sel:
+ new_store_sel = list()
+ for item in store_sel:
+ if item["id"] not in list_sel:
+ new_store_sel.append(item)
+ store_sel = new_store_sel
+ if store_sel:
+ row_fig_tput, row_fig_tsa, row_table, row_btn_dwnld = \
+ _generate_plotting_area(
+ graph_iterative(self.data, store_sel, self.layout),
+ table_comparison(self.data, store_sel),
+ _gen_new_url(parsed_url, store_sel)
+ )
+ ctrl_panel.set({
+ "cl-selected-options": self._list_tests(store_sel)
+ })
+ else:
+ row_fig_tput = self.PLACEHOLDER
+ row_fig_tsa = self.PLACEHOLDER
+ row_table = self.PLACEHOLDER
+ row_btn_dwnld = self.PLACEHOLDER
+ row_card_sel_tests = self.STYLE_DISABLED
+ row_btns_sel_tests = self.STYLE_DISABLED
+ store_sel = list()
+ ctrl_panel.set({"cl-selected-options": list()})
+ elif trigger_id == "url":
+ # TODO: Add verification
+ url_params = parsed_url["params"]
+ if url_params:
+ store_sel = literal_eval(
+ url_params.get("store_sel", list())[0])
+ if store_sel:
+ row_fig_tput, row_fig_tsa, row_table, row_btn_dwnld = \
+ _generate_plotting_area(
+ graph_iterative(self.data, store_sel,
+ self.layout),
+ table_comparison(self.data, store_sel),
+ _gen_new_url(parsed_url, store_sel)
+ )
+ row_card_sel_tests = self.STYLE_ENABLED
+ row_btns_sel_tests = self.STYLE_ENABLED
+ ctrl_panel.set({
+ "cl-selected-options": self._list_tests(store_sel)
+ })
+ else:
+ row_fig_tput = self.PLACEHOLDER
+ row_fig_tsa = self.PLACEHOLDER
+ row_table = self.PLACEHOLDER
+ row_btn_dwnld = self.PLACEHOLDER
+ row_card_sel_tests = self.STYLE_DISABLED
+ row_btns_sel_tests = self.STYLE_DISABLED
+ store_sel = list()
+ ctrl_panel.set({"cl-selected-options": list()})
+
+ if ctrl_panel.get("cl-core-value") and \
+ ctrl_panel.get("cl-framesize-value") and \
+ ctrl_panel.get("cl-testtype-value"):
+ disabled = False
+ else:
+ disabled = True
+ ctrl_panel.set({"btn-add-disabled": disabled})
+
+ ret_val = [
+ ctrl_panel.panel, store_sel,
+ row_fig_tput, row_fig_tsa, row_table, row_btn_dwnld,
+ row_card_sel_tests, row_btns_sel_tests
+ ]
+ ret_val.extend(ctrl_panel.values())
+ return ret_val
+
+ # @app.callback(
+ # Output("metadata-tput-lat", "children"),
+ # Output("metadata-hdrh-graph", "children"),
+ # Output("offcanvas-metadata", "is_open"),
+ # Input({"type": "graph", "index": ALL}, "clickData"),
+ # prevent_initial_call=True
+ # )
+ # def _show_metadata_from_graphs(graph_data: dict) -> tuple:
+ # """
+ # """
+ # try:
+ # trigger_id = loads(
+ # callback_context.triggered[0]["prop_id"].split(".")[0]
+ # )["index"]
+ # idx = 0 if trigger_id == "tput" else 1
+ # graph_data = graph_data[idx]["points"][0]
+ # except (JSONDecodeError, IndexError, KeyError, ValueError,
+ # TypeError):
+ # raise PreventUpdate
+
+ # metadata = no_update
+ # graph = list()
+
+ # children = [
+ # dbc.ListGroupItem(
+ # [dbc.Badge(x.split(":")[0]), x.split(": ")[1]]
+ # ) for x in graph_data.get("text", "").split("<br>")
+ # ]
+ # if trigger_id == "tput":
+ # title = "Throughput"
+ # elif trigger_id == "lat":
+ # title = "Latency"
+ # hdrh_data = graph_data.get("customdata", None)
+ # if hdrh_data:
+ # graph = [dbc.Card(
+ # class_name="gy-2 p-0",
+ # children=[
+ # dbc.CardHeader(hdrh_data.pop("name")),
+ # dbc.CardBody(children=[
+ # dcc.Graph(
+ # id="hdrh-latency-graph",
+ # figure=graph_hdrh_latency(
+ # hdrh_data, self.layout
+ # )
+ # )
+ # ])
+ # ])
+ # ]
+ # metadata = [
+ # dbc.Card(
+ # class_name="gy-2 p-0",
+ # children=[
+ # dbc.CardHeader(children=[
+ # dcc.Clipboard(
+ # target_id="tput-lat-metadata",
+ # title="Copy",
+ # style={"display": "inline-block"}
+ # ),
+ # title
+ # ]),
+ # dbc.CardBody(
+ # id="tput-lat-metadata",
+ # class_name="p-0",
+ # children=[dbc.ListGroup(children, flush=True), ]
+ # )
+ # ]
+ # )
+ # ]
+
+ # return metadata, graph, True
+
+ # @app.callback(
+ # Output("download-data", "data"),
+ # State("selected-tests", "data"),
+ # Input("btn-download-data", "n_clicks"),
+ # prevent_initial_call=True
+ # )
+ # def _download_data(store_sel, n_clicks):
+ # """
+ # """
+
+ # if not n_clicks:
+ # raise PreventUpdate
+
+ # if not store_sel:
+ # raise PreventUpdate
+
+ # df = pd.DataFrame()
+ # for itm in store_sel:
+ # sel_data = select_trending_data(self.data, itm)
+ # if sel_data is None:
+ # continue
+ # df = pd.concat([df, sel_data], ignore_index=True)
+
+ # return dcc.send_data_frame(df.to_csv, "trending_data.csv")
diff --git a/resources/tools/dash/app/pal/report/layout.yaml b/resources/tools/dash/app/pal/report/layout.yaml
new file mode 100644
index 0000000000..6fa91f31f1
--- /dev/null
+++ b/resources/tools/dash/app/pal/report/layout.yaml
@@ -0,0 +1,150 @@
+plot-throughput:
+ titlefont:
+ size: 16
+ xaxis:
+ title: "<b>Test Cases [Index]</b>"
+ titlefont:
+ size: 14
+ autorange: True
+ fixedrange: False
+ gridcolor: "rgb(230, 230, 230)"
+ linecolor: "rgb(220, 220, 220)"
+ linewidth: 1
+ showgrid: True
+ showline: True
+ showticklabels: True
+ tickcolor: "rgb(220, 220, 220)"
+ tickmode: "array"
+ tickfont:
+ size: 14
+ zeroline: False
+ yaxis:
+ title: "<b>Packet Throughput [Mpps]</b>"
+ titlefont:
+ size: 14
+ gridcolor: "rgb(230, 230, 230)"
+ hoverformat: ".4r"
+ tickformat: ".3r"
+ linecolor: "rgb(220, 220, 220)"
+ linewidth: 1
+ showgrid: True
+ showline: True
+ showticklabels: True
+ tickcolor: "rgb(220, 220, 220)"
+ tickfont:
+ size: 14
+ zeroline: False
+ range: [0, 50]
+ autosize: False
+ margin:
+ t: 50
+ b: 0
+ l: 80
+ r: 20
+ showlegend: True
+ legend:
+ orientation: "h"
+ font:
+ size: 14
+ width: 700
+ height: 900
+ paper_bgcolor: "#fff"
+ plot_bgcolor: "#fff"
+ hoverlabel:
+ namelength: -1
+
+plot-throughput-speedup-analysis:
+ titlefont:
+ size: 16
+ xaxis:
+ title: "<b>Number of Cores [Qty]</b>"
+ titlefont:
+ size: 14
+ autorange: True
+ fixedrange: False
+ gridcolor: "rgb(230, 230, 230)"
+ linecolor: "rgb(220, 220, 220)"
+ linewidth: 1
+ showgrid: True
+ showline: True
+ showticklabels: True
+ tickcolor: "rgb(238, 238, 238)"
+ tickmode: "linear"
+ tickfont:
+ size: 14
+ zeroline: False
+ yaxis:
+ title: "<b>Packet Throughput [Mpps]</b>"
+ titlefont:
+ size: 14
+ type: "linear"
+ gridcolor: "rgb(230, 230, 230)"
+ hoverformat: ".4s"
+ linecolor: "rgb(220, 220, 220)"
+ linewidth: 1
+ showgrid: True
+ showline: True
+ showticklabels: True
+ tickcolor: "rgb(220, 220, 220)"
+ tickformat: ".4s"
+ tickfont:
+ size: 14
+ zeroline: True
+ rangemode: "tozero"
+ range: [0, 100]
+ legend:
+ orientation: "h"
+ font:
+ size: 14
+ xanchor: "left"
+ yanchor: "top"
+ x: 0
+ y: -0.2
+ bgcolor: "rgba(255, 255, 255, 0)"
+ bordercolor: "rgba(255, 255, 255, 0)"
+ traceorder: "normal"
+ autosize: False
+ margin:
+ 't': 50
+ 'b': 150
+ 'l': 85
+ 'r': 10
+ showlegend: True
+ width: 700
+ height: 700
+ paper_bgcolor: "#fff"
+ plot_bgcolor: "#fff"
+ hoverlabel:
+ namelength: -1
+ annotations: [
+ {
+ text: "_ _ __ ...",
+ align: "left",
+ showarrow: False,
+ xref: "paper",
+ yref: "paper",
+ xanchor: "left",
+ yanchor: "top",
+ x: 0,
+ y: -0.14,
+ font: {
+ family: "Consolas, Courier New",
+ size: 13
+ },
+ },
+ {
+ text: " Perfect Measured Limit",
+ align: "left",
+ showarrow: False,
+ xref: "paper",
+ yref: "paper",
+ xanchor: "left",
+ yanchor: "top",
+ x: 0,
+ y: -0.15,
+ font: {
+ family: "Consolas, Courier New",
+ size: 13
+ },
+ },
+ ]
diff --git a/resources/tools/dash/app/pal/report/report.py b/resources/tools/dash/app/pal/report/report.py
index 769a6dd63e..8330f8721e 100644
--- a/resources/tools/dash/app/pal/report/report.py
+++ b/resources/tools/dash/app/pal/report/report.py
@@ -11,21 +11,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Instantiate the Report Dash application.
+"""Instantiate the Report Dash applocation.
"""
-
import dash
-from dash import dcc
-from dash import html
-from dash import dash_table
+import dash_bootstrap_components as dbc
-from .data import read_stats
-from .data import read_trending_mrr, read_trending_ndrpdr
-from .data import read_iterative_mrr, read_iterative_ndrpdr
-from .layout import html_layout
+from .layout import Layout
-def init_report(server):
+def init_report(server, releases):
"""Create a Plotly Dash dashboard.
:param server: Flask server.
@@ -37,72 +31,19 @@ def init_report(server):
dash_app = dash.Dash(
server=server,
routes_pathname_prefix=u"/report/",
- external_stylesheets=[
- u"/static/dist/css/styles.css",
- u"https://fonts.googleapis.com/css?family=Lato",
- ],
+ external_stylesheets=[dbc.themes.LUX],
)
# Custom HTML layout
- dash_app.index_string = html_layout
-
- # Create Layout
- dash_app.layout = html.Div(
- children=[
- html.Div(
- children=create_data_table(
- read_stats().dropna(),
- u"database-table-stats"
- )
- ),
- html.Div(
- children=create_data_table(
- read_trending_mrr().dropna(),
- u"database-table-mrr"
- )
- ),
- html.Div(
- children=create_data_table(
- read_trending_ndrpdr().dropna(),
- u"database-table-ndrpdr"
- )
- ),
- html.Div(
- children=create_data_table(
- read_iterative_mrr().dropna(),
- u"database-table-iterative-mrr"
- )
- ),
- html.Div(
- children=create_data_table(
- read_iterative_ndrpdr().dropna(),
- u"database-table-iterative-ndrpdr"
- )
- )
- ],
- id=u"dash-container",
+ layout = Layout(
+ app=dash_app,
+ releases=releases,
+ html_layout_file="pal/templates/report_layout.jinja2",
+ graph_layout_file="pal/report/layout.yaml",
+ data_spec_file="pal/data/data.yaml",
+ tooltip_file="pal/data/tooltips.yaml"
)
- return dash_app.server
-
-
-def create_data_table(df, id):
- """Create Dash datatable from Pandas DataFrame.
+ dash_app.index_string = layout.html_layout
+ dash_app.layout = layout.add_content()
- DEMO
- """
-
- table = dash_table.DataTable(
- id=id,
- columns=[{u"name": i, u"id": i} for i in df.columns],
- data=df.to_dict(u"records"),
- fixed_rows={'headers': True},
- sort_action=u"native",
- sort_mode=u"native",
- page_size=5,
- style_header={
- 'overflow': 'hidden',
- 'textOverflow': 'ellipsis',
- 'minWidth': 95, 'maxWidth': 95, 'width': 95,
- }
- )
- return table
+ return dash_app.server
diff --git a/resources/tools/dash/app/pal/stats/layout.py b/resources/tools/dash/app/pal/stats/layout.py
index 2f43308f7b..5c3758ba76 100644
--- a/resources/tools/dash/app/pal/stats/layout.py
+++ b/resources/tools/dash/app/pal/stats/layout.py
@@ -45,7 +45,7 @@ class Layout:
"color": "#135d7c"
}
- def __init__(self, app: Flask, html_layout_file: str, spec_file: str,
+ def __init__(self, app: Flask, html_layout_file: str,
graph_layout_file: str, data_spec_file: str, tooltip_file: str,
time_period: int=None) -> None:
"""
@@ -54,7 +54,6 @@ class Layout:
# Inputs
self._app = app
self._html_layout_file = html_layout_file
- self._spec_file = spec_file
self._graph_layout_file = graph_layout_file
self._data_spec_file = data_spec_file
self._tooltip_file = tooltip_file
diff --git a/resources/tools/dash/app/pal/stats/stats.py b/resources/tools/dash/app/pal/stats/stats.py
index 56fe27f4f7..37a0875d24 100644
--- a/resources/tools/dash/app/pal/stats/stats.py
+++ b/resources/tools/dash/app/pal/stats/stats.py
@@ -38,7 +38,6 @@ def init_stats(server, time_period=None):
layout = Layout(
app=dash_app,
html_layout_file="pal/templates/stats_layout.jinja2",
- spec_file="pal/stats/spec_job_selection.yaml",
graph_layout_file="pal/stats/layout.yaml",
data_spec_file="pal/data/data.yaml",
tooltip_file="pal/data/tooltips.yaml",
diff --git a/resources/tools/dash/app/pal/templates/report_layout.jinja2 b/resources/tools/dash/app/pal/templates/report_layout.jinja2
new file mode 100644
index 0000000000..c535d37b03
--- /dev/null
+++ b/resources/tools/dash/app/pal/templates/report_layout.jinja2
@@ -0,0 +1,17 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+ <title>Iterative Test Runs</title>
+ {%metas%}
+ {%favicon%}
+ {%css%}
+</head>
+<body>
+ {%app_entry%}
+ <footer>
+ {%config%}
+ {%scripts%}
+ {%renderer%}
+ </footer>
+</body>
+</html> \ No newline at end of file
diff --git a/resources/tools/dash/app/pal/trending/layout.py b/resources/tools/dash/app/pal/trending/layout.py
index 6d9eca66f8..55d0d08e83 100644
--- a/resources/tools/dash/app/pal/trending/layout.py
+++ b/resources/tools/dash/app/pal/trending/layout.py
@@ -31,15 +31,19 @@ from json import loads, JSONDecodeError
from ast import literal_eval
from ..data.data import Data
+from ..data.url_processing import url_decode, url_encode
from .graphs import graph_trending, graph_hdrh_latency, \
select_trending_data
-from ..data.url_processing import url_decode, url_encode
class Layout:
"""
"""
+ # If True, clear all inputs in control panel when button "ADD SELECTED" is
+ # pressed.
+ CLEAR_ALL_INPUTS = False
+
STYLE_DISABLED = {"display": "none"}
STYLE_ENABLED = {"display": "inherit"}
@@ -81,7 +85,7 @@ class Layout:
"color": "#135d7c"
}
- def __init__(self, app: Flask, html_layout_file: str, spec_file: str,
+ def __init__(self, app: Flask, html_layout_file: str,
graph_layout_file: str, data_spec_file: str, tooltip_file: str,
time_period: str=None) -> None:
"""
@@ -90,7 +94,6 @@ class Layout:
# Inputs
self._app = app
self._html_layout_file = html_layout_file
- self._spec_file = spec_file
self._graph_layout_file = graph_layout_file
self._data_spec_file = data_spec_file
self._tooltip_file = tooltip_file
@@ -914,11 +917,9 @@ class Layout:
if trigger_id == "dd-ctrl-dut":
try:
+ dut = self.spec_tbs[dd_dut]
options = sorted(
- [
- {"label": v, "value": v}
- for v in self.spec_tbs[dd_dut].keys()
- ],
+ [{"label": v, "value": v}for v in dut.keys()],
key=lambda d: d["label"]
)
disabled = False
@@ -933,6 +934,7 @@ class Layout:
"dd-ctrl-area-value": str(),
"dd-ctrl-area-options": list(),
"dd-ctrl-area-disabled": True,
+ "dd-ctrl-test-value": str(),
"dd-ctrl-test-options": list(),
"dd-ctrl-test-disabled": True,
"cl-ctrl-core-options": list(),
@@ -951,11 +953,10 @@ class Layout:
elif trigger_id == "dd-ctrl-phy":
try:
dut = ctrl_panel.get("dd-ctrl-dut-value")
+ phy = self.spec_tbs[dut][dd_phy]
options = sorted(
- [
- {"label": self.label(v), "value": v}
- for v in self.spec_tbs[dut][dd_phy].keys()
- ],
+ [{"label": self.label(v), "value": v}
+ for v in phy.keys()],
key=lambda d: d["label"]
)
disabled = False
@@ -967,6 +968,7 @@ class Layout:
"dd-ctrl-area-value": str(),
"dd-ctrl-area-options": options,
"dd-ctrl-area-disabled": disabled,
+ "dd-ctrl-test-value": str(),
"dd-ctrl-test-options": list(),
"dd-ctrl-test-disabled": True,
"cl-ctrl-core-options": list(),
@@ -986,11 +988,9 @@ class Layout:
try:
dut = ctrl_panel.get("dd-ctrl-dut-value")
phy = ctrl_panel.get("dd-ctrl-phy-value")
+ area = self.spec_tbs[dut][phy][dd_area]
options = sorted(
- [
- {"label": v, "value": v}
- for v in self.spec_tbs[dut][phy][dd_area].keys()
- ],
+ [{"label": v, "value": v} for v in area.keys()],
key=lambda d: d["label"]
)
disabled = False
@@ -1022,19 +1022,17 @@ class Layout:
dut = ctrl_panel.get("dd-ctrl-dut-value")
phy = ctrl_panel.get("dd-ctrl-phy-value")
area = ctrl_panel.get("dd-ctrl-area-value")
- cores = self.spec_tbs[dut][phy][area][dd_test]["core"]
- fsizes = self.spec_tbs[dut][phy][area][dd_test]["frame-size"]
- ttypes = self.spec_tbs[dut][phy][area][dd_test]["test-type"]
+ test = self.spec_tbs[dut][phy][area][dd_test]
+ cores = test["core"]
+ fsizes = test["frame-size"]
+ ttypes = test["test-type"]
if dut and phy and area and dd_test:
- core_opts = [
- {"label": v, "value": v} for v in sorted(cores)
- ]
- framesize_opts = [
- {"label": v, "value": v} for v in sorted(fsizes)
- ]
- testtype_opts = [
- {"label": v, "value": v}for v in sorted(ttypes)
- ]
+ core_opts = [{"label": v, "value": v}
+ for v in sorted(cores)]
+ framesize_opts = [{"label": v, "value": v}
+ for v in sorted(fsizes)]
+ testtype_opts = [{"label": v, "value": v}
+ for v in sorted(ttypes)]
ctrl_panel.set({
"dd-ctrl-test-value": dd_test,
"cl-ctrl-core-options": core_opts,
@@ -1153,24 +1151,22 @@ class Layout:
store_sel = sorted(store_sel, key=lambda d: d["id"])
row_card_sel_tests = self.STYLE_ENABLED
row_btns_sel_tests = self.STYLE_ENABLED
- ctrl_panel.set(ctrl_panel.defaults)
+ if self.CLEAR_ALL_INPUTS:
+ ctrl_panel.set(ctrl_panel.defaults)
ctrl_panel.set({
"cl-selected-options": self._list_tests(store_sel)
})
row_fig_tput, row_fig_lat, row_btn_dwnld = \
_generate_plotting_area(
- graph_trending(
- self.data, store_sel, self.layout, d_start,
- d_end
- ),
+ graph_trending(self.data, store_sel, self.layout,
+ d_start, d_end),
_gen_new_url(parsed_url, store_sel, d_start, d_end)
)
elif trigger_id == "dpr-period":
row_fig_tput, row_fig_lat, row_btn_dwnld = \
_generate_plotting_area(
- graph_trending(
- self.data, store_sel, self.layout, d_start, d_end
- ),
+ graph_trending(self.data, store_sel, self.layout,
+ d_start, d_end),
_gen_new_url(parsed_url, store_sel, d_start, d_end)
)
elif trigger_id == "btn-sel-remove-all":
@@ -1181,9 +1177,7 @@ class Layout:
row_card_sel_tests = self.STYLE_DISABLED
row_btns_sel_tests = self.STYLE_DISABLED
store_sel = list()
- ctrl_panel.set({
- "cl-selected-options": list()
- })
+ ctrl_panel.set({"cl-selected-options": list()})
elif trigger_id == "btn-sel-remove":
_ = btn_remove
if list_sel:
@@ -1195,10 +1189,8 @@ class Layout:
if store_sel:
row_fig_tput, row_fig_lat, row_btn_dwnld = \
_generate_plotting_area(
- graph_trending(
- self.data, store_sel, self.layout, d_start,
- d_end
- ),
+ graph_trending(self.data, store_sel, self.layout,
+ d_start, d_end),
_gen_new_url(parsed_url, store_sel, d_start, d_end)
)
ctrl_panel.set({
@@ -1211,9 +1203,7 @@ class Layout:
row_card_sel_tests = self.STYLE_DISABLED
row_btns_sel_tests = self.STYLE_DISABLED
store_sel = list()
- ctrl_panel.set({
- "cl-selected-options": list()
- })
+ ctrl_panel.set({"cl-selected-options": list()})
elif trigger_id == "url":
# TODO: Add verification
url_params = parsed_url["params"]
@@ -1255,9 +1245,7 @@ class Layout:
disabled = False
else:
disabled = True
- ctrl_panel.set({
- "btn-ctrl-add-disabled": disabled
- })
+ ctrl_panel.set({"btn-ctrl-add-disabled": disabled})
ret_val = [
ctrl_panel.panel, store_sel,
diff --git a/resources/tools/dash/app/pal/trending/trending.py b/resources/tools/dash/app/pal/trending/trending.py
index 68dc420556..88b0815584 100644
--- a/resources/tools/dash/app/pal/trending/trending.py
+++ b/resources/tools/dash/app/pal/trending/trending.py
@@ -38,7 +38,6 @@ def init_trending(server, time_period=None):
layout = Layout(
app=dash_app,
html_layout_file="pal/templates/trending_layout.jinja2",
- spec_file="pal/trending/spec_test_selection.yaml",
graph_layout_file="pal/trending/layout.yaml",
data_spec_file="pal/data/data.yaml",
tooltip_file="pal/data/tooltips.yaml",