aboutsummaryrefslogtreecommitdiffstats
path: root/csit.infra.dash/app/cdash/data
diff options
context:
space:
mode:
Diffstat (limited to 'csit.infra.dash/app/cdash/data')
-rw-r--r--csit.infra.dash/app/cdash/data/__init__.py12
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/coverage_rls2306_devicebin0 -> 5373 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/coverage_rls2306_ndrpdrbin0 -> 11868 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/coverage_rls2310_devicebin0 -> 5373 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/coverage_rls2310_ndrpdrbin0 -> 11868 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/coverage_rls2402_devicebin0 -> 5373 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/coverage_rls2402_ndrpdrbin0 -> 11868 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/iterative_rls2306_hoststackbin0 -> 7882 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/iterative_rls2306_mrrbin0 -> 7919 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/iterative_rls2306_ndrpdrbin0 -> 13081 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/iterative_rls2310_hoststackbin0 -> 7882 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/iterative_rls2310_mrrbin0 -> 7919 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/iterative_rls2310_ndrpdrbin0 -> 15173 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/iterative_rls2402_hoststackbin0 -> 8303 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/iterative_rls2402_mrrbin0 -> 10179 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/iterative_rls2402_ndrpdrbin0 -> 15173 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/statisticsbin0 -> 4398 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/trending_hoststackbin0 -> 9628 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/trending_mrrbin0 -> 9832 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/trending_ndrpdrbin0 -> 16091 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/_metadata/trending_soakbin0 -> 9328 bytes
-rw-r--r--csit.infra.dash/app/cdash/data/data.py421
-rw-r--r--csit.infra.dash/app/cdash/data/data.yaml488
23 files changed, 921 insertions, 0 deletions
diff --git a/csit.infra.dash/app/cdash/data/__init__.py b/csit.infra.dash/app/cdash/data/__init__.py
new file mode 100644
index 0000000000..c6a5f639fe
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/__init__.py
@@ -0,0 +1,12 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/csit.infra.dash/app/cdash/data/_metadata/coverage_rls2306_device b/csit.infra.dash/app/cdash/data/_metadata/coverage_rls2306_device
new file mode 100644
index 0000000000..f619ce8a8e
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/coverage_rls2306_device
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/_metadata/coverage_rls2306_ndrpdr b/csit.infra.dash/app/cdash/data/_metadata/coverage_rls2306_ndrpdr
new file mode 100644
index 0000000000..06bc618bea
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/coverage_rls2306_ndrpdr
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/_metadata/coverage_rls2310_device b/csit.infra.dash/app/cdash/data/_metadata/coverage_rls2310_device
new file mode 100644
index 0000000000..f619ce8a8e
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/coverage_rls2310_device
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/_metadata/coverage_rls2310_ndrpdr b/csit.infra.dash/app/cdash/data/_metadata/coverage_rls2310_ndrpdr
new file mode 100644
index 0000000000..06bc618bea
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/coverage_rls2310_ndrpdr
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/_metadata/coverage_rls2402_device b/csit.infra.dash/app/cdash/data/_metadata/coverage_rls2402_device
new file mode 100644
index 0000000000..f619ce8a8e
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/coverage_rls2402_device
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/_metadata/coverage_rls2402_ndrpdr b/csit.infra.dash/app/cdash/data/_metadata/coverage_rls2402_ndrpdr
new file mode 100644
index 0000000000..06bc618bea
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/coverage_rls2402_ndrpdr
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2306_hoststack b/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2306_hoststack
new file mode 100644
index 0000000000..993d16c18c
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2306_hoststack
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2306_mrr b/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2306_mrr
new file mode 100644
index 0000000000..96832850b1
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2306_mrr
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2306_ndrpdr b/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2306_ndrpdr
new file mode 100644
index 0000000000..2291bb8349
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2306_ndrpdr
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2310_hoststack b/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2310_hoststack
new file mode 100644
index 0000000000..993d16c18c
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2310_hoststack
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2310_mrr b/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2310_mrr
new file mode 100644
index 0000000000..96832850b1
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2310_mrr
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2310_ndrpdr b/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2310_ndrpdr
new file mode 100644
index 0000000000..e76e6ab8e5
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2310_ndrpdr
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2402_hoststack b/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2402_hoststack
new file mode 100644
index 0000000000..1e9c708253
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2402_hoststack
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2402_mrr b/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2402_mrr
new file mode 100644
index 0000000000..416679acdb
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2402_mrr
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2402_ndrpdr b/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2402_ndrpdr
new file mode 100644
index 0000000000..e76e6ab8e5
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/iterative_rls2402_ndrpdr
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/_metadata/statistics b/csit.infra.dash/app/cdash/data/_metadata/statistics
new file mode 100644
index 0000000000..208e119735
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/statistics
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/_metadata/trending_hoststack b/csit.infra.dash/app/cdash/data/_metadata/trending_hoststack
new file mode 100644
index 0000000000..f6ab72be9a
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/trending_hoststack
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/_metadata/trending_mrr b/csit.infra.dash/app/cdash/data/_metadata/trending_mrr
new file mode 100644
index 0000000000..9d4e126e59
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/trending_mrr
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/_metadata/trending_ndrpdr b/csit.infra.dash/app/cdash/data/_metadata/trending_ndrpdr
new file mode 100644
index 0000000000..3f8b85c66e
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/trending_ndrpdr
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/_metadata/trending_soak b/csit.infra.dash/app/cdash/data/_metadata/trending_soak
new file mode 100644
index 0000000000..4502ca4f59
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/_metadata/trending_soak
Binary files differ
diff --git a/csit.infra.dash/app/cdash/data/data.py b/csit.infra.dash/app/cdash/data/data.py
new file mode 100644
index 0000000000..2c49992bf8
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/data.py
@@ -0,0 +1,421 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Prepare data for Plotly Dash applications.
+"""
+
+import logging
+import resource
+import awswrangler as wr
+import pandas as pd
+import pyarrow as pa
+
+from yaml import load, FullLoader, YAMLError
+from datetime import datetime, timedelta
+from time import time
+from pytz import UTC
+from awswrangler.exceptions import EmptyDataFrame, NoFilesFound
+from pyarrow.lib import ArrowInvalid, ArrowNotImplementedError
+
+from ..utils.constants import Constants as C
+
+
+# If True, pyarrow.Schema is generated. See also condition in the method
+# _write_parquet_schema.
+# To generate schema, select only one data set in data.yaml file.
+GENERATE_SCHEMA = False
+
+
+class Data:
+ """Gets the data from parquets and stores it for further use by dash
+ applications.
+ """
+
+ def __init__(self, data_spec_file: str) -> None:
+ """Initialize the Data object.
+
+ :param data_spec_file: Path to file specifying the data to be read from
+ parquets.
+ :type data_spec_file: str
+ :raises RuntimeError: if it is not possible to open data_spec_file or it
+ is not a valid yaml file.
+ """
+
+ # Inputs:
+ self._data_spec_file = data_spec_file
+
+ # Specification of data to be read from parquets:
+ self._data_spec = list()
+
+ # Data frame to keep the data:
+ self._data = {
+ "statistics": pd.DataFrame(),
+ "trending": pd.DataFrame(),
+ "iterative": pd.DataFrame(),
+ "coverage": pd.DataFrame()
+ }
+
+ # Read from files:
+ try:
+ with open(self._data_spec_file, "r") as file_read:
+ self._data_spec = load(file_read, Loader=FullLoader)
+ except IOError as err:
+ raise RuntimeError(
+ f"Not possible to open the file {self._data_spec_file,}\n{err}"
+ )
+ except YAMLError as err:
+ raise RuntimeError(
+ f"An error occurred while parsing the specification file "
+ f"{self._data_spec_file,}\n"
+ f"{err}"
+ )
+
+ @property
+ def data(self):
+ return self._data
+
+ @staticmethod
+ def _get_list_of_files(
+ path,
+ last_modified_begin=None,
+ last_modified_end=None,
+ days=None
+ ) -> list:
+ """Get list of interested files stored in S3 compatible storage and
+ returns it.
+
+ :param path: S3 prefix (accepts Unix shell-style wildcards)
+ (e.g. s3://bucket/prefix) or list of S3 objects paths
+ (e.g. [s3://bucket/key0, s3://bucket/key1]).
+ :param last_modified_begin: Filter the s3 files by the Last modified
+ date of the object. The filter is applied only after list all s3
+ files.
+ :param last_modified_end: Filter the s3 files by the Last modified date
+ of the object. The filter is applied only after list all s3 files.
+ :param days: Number of days to filter.
+ :type path: Union[str, List[str]]
+ :type last_modified_begin: datetime, optional
+ :type last_modified_end: datetime, optional
+ :type days: integer, optional
+ :returns: List of file names.
+ :rtype: list
+ """
+ file_list = list()
+ if days:
+ last_modified_begin = datetime.now(tz=UTC) - timedelta(days=days)
+ try:
+ file_list = wr.s3.list_objects(
+ path=path,
+ suffix="parquet",
+ last_modified_begin=last_modified_begin,
+ last_modified_end=last_modified_end
+ )
+ logging.debug("\n".join(file_list))
+ except NoFilesFound as err:
+ logging.error(f"No parquets found.\n{err}")
+ except EmptyDataFrame as err:
+ logging.error(f"No data.\n{err}")
+
+ return file_list
+
+ def _validate_columns(self, data_type: str) -> str:
+ """Check if all columns are present in the dataframe.
+
+ :param data_type: The data type defined in data.yaml
+ :type data_type: str
+ :returns: Error message if validation fails, otherwise empty string.
+ :rtype: str
+ """
+ defined_columns = set()
+ for data_set in self._data_spec:
+ if data_set.get("data_type", str()) == data_type:
+ defined_columns.update(data_set.get("columns", set()))
+
+ if not defined_columns:
+ return "No columns defined in the data set(s)."
+
+ if self.data[data_type].empty:
+ return "No data."
+
+ ret_msg = str()
+ for col in defined_columns:
+ if col not in self.data[data_type].columns:
+ if not ret_msg:
+ ret_msg = "Missing columns: "
+ else:
+ ret_msg += ", "
+ ret_msg += f"{col}"
+ return ret_msg
+
+ @staticmethod
+ def _write_parquet_schema(
+ path,
+ partition_filter=None,
+ columns=None,
+ validate_schema=False,
+ last_modified_begin=None,
+ last_modified_end=None,
+ days=None
+ ) -> None:
+ """Auxiliary function to write parquet schemas. Use it instead of
+ "_create_dataframe_from_parquet" in "read_all_data".
+
+ :param path: S3 prefix (accepts Unix shell-style wildcards)
+ (e.g. s3://bucket/prefix) or list of S3 objects paths
+ (e.g. [s3://bucket/key0, s3://bucket/key1]).
+ :param partition_filter: Callback Function filters to apply on PARTITION
+ columns (PUSH-DOWN filter). This function MUST receive a single
+ argument (Dict[str, str]) where keys are partitions names and values
+ are partitions values. Partitions values will be always strings
+ extracted from S3. This function MUST return a bool, True to read
+ the partition or False to ignore it. Ignored if dataset=False.
+ :param columns: Names of columns to read from the file(s).
+ :param validate_schema: Check that individual file schemas are all the
+ same / compatible. Schemas within a folder prefix should all be the
+ same. Disable if you have schemas that are different and want to
+ disable this check.
+ :param last_modified_begin: Filter the s3 files by the Last modified
+ date of the object. The filter is applied only after list all s3
+ files.
+ :param last_modified_end: Filter the s3 files by the Last modified date
+ of the object. The filter is applied only after list all s3 files.
+ :param days: Number of days to filter.
+ :type path: Union[str, List[str]]
+ :type partition_filter: Callable[[Dict[str, str]], bool], optional
+ :type columns: List[str], optional
+ :type validate_schema: bool, optional
+ :type last_modified_begin: datetime, optional
+ :type last_modified_end: datetime, optional
+ :type days: integer, optional
+ """
+ if days:
+ last_modified_begin = datetime.now(tz=UTC) - timedelta(days=days)
+
+ df = wr.s3.read_parquet(
+ path=path,
+ path_suffix="parquet",
+ ignore_empty=True,
+ validate_schema=validate_schema,
+ use_threads=True,
+ dataset=True,
+ columns=columns,
+ partition_filter=partition_filter,
+ last_modified_begin=last_modified_begin,
+ last_modified_end=last_modified_end,
+ chunked=1
+ )
+
+ for itm in df:
+ try:
+ # Specify the condition or remove it:
+ if all((
+ pd.api.types.is_string_dtype(itm["column_name"]),
+ pd.api.types.is_string_dtype(itm["telemetry"][0])
+ )):
+ schema = pa.Schema.from_pandas(itm)
+ pa.parquet.write_metadata(
+ schema, f"{C.PATH_TO_SCHEMAS}_tmp_schema"
+ )
+ logging.info(schema.to_string(
+ truncate_metadata=False,
+ show_field_metadata=True,
+ show_schema_metadata=True
+ ))
+ break
+ except KeyError:
+ pass
+
+ @staticmethod
+ def _create_dataframe_from_parquet(
+ path,
+ partition_filter=None,
+ columns=None,
+ validate_schema=False,
+ last_modified_begin=None,
+ last_modified_end=None,
+ days=None,
+ schema=None
+ ) -> pd.DataFrame:
+ """Read parquet stored in S3 compatible storage and returns Pandas
+ Dataframe.
+
+ :param path: S3 prefix (accepts Unix shell-style wildcards)
+ (e.g. s3://bucket/prefix) or list of S3 objects paths
+ (e.g. [s3://bucket/key0, s3://bucket/key1]).
+ :param partition_filter: Callback Function filters to apply on PARTITION
+ columns (PUSH-DOWN filter). This function MUST receive a single
+ argument (Dict[str, str]) where keys are partitions names and values
+ are partitions values. Partitions values will be always strings
+ extracted from S3. This function MUST return a bool, True to read
+ the partition or False to ignore it. Ignored if dataset=False.
+ :param columns: Names of columns to read from the file(s).
+ :param validate_schema: Check that individual file schemas are all the
+ same / compatible. Schemas within a folder prefix should all be the
+ same. Disable if you have schemas that are different and want to
+ disable this check.
+ :param last_modified_begin: Filter the s3 files by the Last modified
+ date of the object. The filter is applied only after list all s3
+ files.
+ :param last_modified_end: Filter the s3 files by the Last modified date
+ of the object. The filter is applied only after list all s3 files.
+ :param days: Number of days to filter.
+ :param schema: Path to schema to use when reading data from the parquet.
+ :type path: Union[str, List[str]]
+ :type partition_filter: Callable[[Dict[str, str]], bool], optional
+ :type columns: List[str], optional
+ :type validate_schema: bool, optional
+ :type last_modified_begin: datetime, optional
+ :type last_modified_end: datetime, optional
+ :type days: integer, optional
+ :type schema: string
+ :returns: Pandas DataFrame or None if DataFrame cannot be fetched.
+ :rtype: DataFrame
+ """
+ df = pd.DataFrame()
+ start = time()
+ if days:
+ last_modified_begin = datetime.now(tz=UTC) - timedelta(days=days)
+ try:
+ df = wr.s3.read_parquet(
+ path=path,
+ path_suffix="parquet",
+ ignore_empty=True,
+ schema=schema,
+ validate_schema=validate_schema,
+ use_threads=True,
+ dataset=True,
+ columns=columns,
+ partition_filter=partition_filter,
+ last_modified_begin=last_modified_begin,
+ last_modified_end=last_modified_end,
+ dtype_backend="pyarrow"
+ )
+
+ df.info(verbose=True, memory_usage="deep")
+ logging.debug(
+ f"\nCreation of dataframe {path} took: {time() - start}\n"
+ )
+ except (ArrowInvalid, ArrowNotImplementedError) as err:
+ logging.error(f"Reading of data from parquets FAILED.\n{repr(err)}")
+ except NoFilesFound as err:
+ logging.error(
+ f"Reading of data from parquets FAILED.\n"
+ f"No parquets found in specified time period.\n"
+ f"Nr of days: {days}\n"
+ f"last_modified_begin: {last_modified_begin}\n"
+ f"{repr(err)}"
+ )
+ except EmptyDataFrame as err:
+ logging.error(
+ f"Reading of data from parquets FAILED.\n"
+ f"No data in parquets in specified time period.\n"
+ f"Nr of days: {days}\n"
+ f"last_modified_begin: {last_modified_begin}\n"
+ f"{repr(err)}"
+ )
+
+ return df
+
+ def read_all_data(self, days: int=None) -> dict:
+ """Read all data necessary for all applications.
+
+ :param days: Number of days to filter. If None, all data will be
+ downloaded.
+ :type days: int
+ :returns: A dictionary where keys are names of parquets and values are
+ the pandas dataframes with fetched data.
+ :rtype: dict(str: pandas.DataFrame)
+ """
+
+ data_lists = {
+ "statistics": list(),
+ "trending": list(),
+ "iterative": list(),
+ "coverage": list()
+ }
+
+ logging.info("\n\nReading data:\n" + "-" * 13 + "\n")
+ for data_set in self._data_spec:
+ logging.info(
+ f"\n\nReading data for {data_set['data_type']} "
+ f"{data_set['partition_name']} {data_set.get('release', '')}\n"
+ )
+ schema_file = data_set.get("schema", None)
+ if schema_file:
+ try:
+ schema = pa.parquet.read_schema(
+ f"{C.PATH_TO_SCHEMAS}{schema_file}"
+ )
+ except FileNotFoundError as err:
+ logging.error(repr(err))
+ logging.error("Proceeding without schema.")
+ schema = None
+ else:
+ schema = None
+ partition_filter = lambda part: True \
+ if part[data_set["partition"]] == data_set["partition_name"] \
+ else False
+ if data_set["data_type"] in ("trending", "statistics"):
+ time_period = days
+ else:
+ time_period = None
+
+ if GENERATE_SCHEMA:
+ # Generate schema:
+ Data._write_parquet_schema(
+ path=data_set["path"],
+ partition_filter=partition_filter,
+ columns=data_set.get("columns", None),
+ days=time_period
+ )
+ return
+
+ # Read data:
+ data = Data._create_dataframe_from_parquet(
+ path=data_set["path"],
+ partition_filter=partition_filter,
+ columns=data_set.get("columns", None),
+ days=time_period,
+ schema=schema
+ )
+ if data_set["data_type"] in ("iterative", "coverage"):
+ data["release"] = data_set["release"]
+ data["release"] = data["release"].astype("category")
+
+ data_lists[data_set["data_type"]].append(data)
+
+ logging.info(
+ "\n\nData post-processing, validation and summary:\n" +
+ "-" * 45 + "\n"
+ )
+ for key in self._data.keys():
+ logging.info(f"\n\nDataframe {key}:\n")
+ self._data[key] = pd.concat(
+ data_lists[key],
+ ignore_index=True,
+ copy=False
+ )
+ self._data[key].info(verbose=True, memory_usage="deep")
+ err_msg = self._validate_columns(key)
+ if err_msg:
+ self._data[key] = pd.DataFrame()
+ logging.error(
+ f"Data validation FAILED.\n"
+ f"{err_msg}\n"
+ "Generated dataframe replaced by an empty dataframe."
+ )
+
+ mem_alloc = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
+ logging.info(f"\n\nMemory allocation: {mem_alloc:.0f}MB\n")
+
+ return self._data
diff --git a/csit.infra.dash/app/cdash/data/data.yaml b/csit.infra.dash/app/cdash/data/data.yaml
new file mode 100644
index 0000000000..ed5fc0b269
--- /dev/null
+++ b/csit.infra.dash/app/cdash/data/data.yaml
@@ -0,0 +1,488 @@
+- data_type: statistics
+ partition: stats_type
+ partition_name: sra
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/stats
+ schema: statistics
+ columns:
+ - job
+ - build
+ - start_time
+ - duration
+- data_type: trending
+ partition: test_type
+ partition_name: mrr
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/trending
+ schema: trending_mrr
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ - hosts
+ - start_time
+ - passed
+ - test_id
+ - version
+ - result_receive_rate_rate_avg
+ - result_receive_rate_rate_stdev
+ - result_receive_rate_rate_unit
+ - result_receive_rate_bandwidth_avg
+ - result_receive_rate_bandwidth_stdev
+ - result_receive_rate_bandwidth_unit
+ - telemetry
+- data_type: trending
+ partition: test_type
+ partition_name: ndrpdr
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/trending
+ schema: trending_ndrpdr
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ - hosts
+ - start_time
+ - passed
+ - test_id
+ - version
+ - result_pdr_lower_rate_unit
+ - result_pdr_lower_rate_value
+ - result_ndr_lower_rate_unit
+ - result_ndr_lower_rate_value
+ - result_pdr_lower_bandwidth_unit
+ - result_pdr_lower_bandwidth_value
+ - result_ndr_lower_bandwidth_unit
+ - result_ndr_lower_bandwidth_value
+ - result_latency_reverse_pdr_90_hdrh
+ - result_latency_reverse_pdr_50_hdrh
+ - result_latency_reverse_pdr_10_hdrh
+ - result_latency_reverse_pdr_0_hdrh
+ - result_latency_forward_pdr_90_hdrh
+ - result_latency_forward_pdr_50_avg
+ - result_latency_forward_pdr_50_hdrh
+ - result_latency_forward_pdr_50_unit
+ - result_latency_forward_pdr_10_hdrh
+ - result_latency_forward_pdr_0_hdrh
+ - telemetry
+- data_type: trending
+ partition: test_type
+ partition_name: hoststack
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/trending
+ schema: trending_hoststack
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ - hosts
+ - tg_type
+ - result_bandwidth_unit
+ - result_bandwidth_value
+ - result_rate_unit
+ - result_rate_value
+ - result_latency_unit
+ - result_latency_value
+ - start_time
+ - passed
+ - telemetry
+ - test_id
+ - version
+- data_type: trending
+ partition: test_type
+ partition_name: soak
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/trending
+ schema: trending_soak
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ - hosts
+ - tg_type
+ - result_critical_rate_lower_bandwidth_unit
+ - result_critical_rate_lower_bandwidth_value
+ - result_critical_rate_lower_rate_unit
+ - result_critical_rate_lower_rate_value
+ - start_time
+ - passed
+ - telemetry
+ - test_id
+ - version
+- data_type: iterative
+ partition: test_type
+ partition_name: mrr
+ release: rls2306
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/iterative_rls2306
+ schema: iterative_rls2306_mrr
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ - start_time
+ - passed
+ - test_id
+ - version
+ - result_receive_rate_rate_avg
+ - result_receive_rate_rate_stdev
+ - result_receive_rate_rate_unit
+ - result_receive_rate_rate_values
+- data_type: iterative
+ partition: test_type
+ partition_name: ndrpdr
+ release: rls2306
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/iterative_rls2306
+ schema: iterative_rls2306_ndrpdr
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ - start_time
+ - passed
+ - test_id
+ - version
+ - result_pdr_lower_rate_unit
+ - result_pdr_lower_rate_value
+ - result_ndr_lower_rate_unit
+ - result_ndr_lower_rate_value
+ - result_latency_reverse_pdr_90_hdrh
+ - result_latency_reverse_pdr_50_hdrh
+ - result_latency_reverse_pdr_10_hdrh
+ - result_latency_reverse_pdr_0_hdrh
+ - result_latency_forward_pdr_90_hdrh
+ - result_latency_forward_pdr_50_avg
+ - result_latency_forward_pdr_50_hdrh
+ - result_latency_forward_pdr_50_unit
+ - result_latency_forward_pdr_10_hdrh
+ - result_latency_forward_pdr_0_hdrh
+- data_type: iterative
+ partition: test_type
+ partition_name: hoststack
+ release: rls2306
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/iterative_rls2306
+ schema: iterative_rls2306_hoststack
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ - tg_type
+ - result_bandwidth_unit
+ - result_bandwidth_value
+ - result_rate_unit
+ - result_rate_value
+ - start_time
+ - passed
+ - test_id
+ - version
+- data_type: coverage
+ partition: test_type
+ partition_name: ndrpdr
+ release: rls2306
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/coverage_rls2306
+ schema: coverage_rls2306_ndrpdr
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ - tg_type
+ - start_time
+ - passed
+ - test_id
+ - version
+ - result_pdr_lower_rate_unit
+ - result_pdr_lower_rate_value
+ - result_ndr_lower_rate_value
+ - result_pdr_lower_bandwidth_value
+ - result_ndr_lower_bandwidth_value
+ - result_latency_reverse_pdr_90_hdrh
+ - result_latency_reverse_pdr_50_hdrh
+ - result_latency_reverse_pdr_10_hdrh
+ - result_latency_forward_pdr_90_hdrh
+ - result_latency_forward_pdr_50_hdrh
+ - result_latency_forward_pdr_10_hdrh
+- data_type: coverage
+ partition: test_type
+ partition_name: device
+ release: rls2306
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/coverage_rls2306
+ schema: coverage_rls2306_device
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ - passed
+ - test_id
+ - version
+- data_type: iterative
+ partition: test_type
+ partition_name: mrr
+ release: rls2310
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/iterative_rls2310
+ schema: iterative_rls2310_mrr
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ - start_time
+ - passed
+ - test_id
+ - version
+ - result_receive_rate_rate_avg
+ - result_receive_rate_rate_stdev
+ - result_receive_rate_rate_unit
+ - result_receive_rate_rate_values
+- data_type: iterative
+ partition: test_type
+ partition_name: ndrpdr
+ release: rls2310
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/iterative_rls2310
+ schema: iterative_rls2310_ndrpdr
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ - start_time
+ - passed
+ - test_id
+ - version
+ - result_pdr_lower_rate_unit
+ - result_pdr_lower_rate_value
+ - result_ndr_lower_rate_unit
+ - result_ndr_lower_rate_value
+ - result_pdr_lower_bandwidth_unit
+ - result_pdr_lower_bandwidth_value
+ - result_ndr_lower_bandwidth_unit
+ - result_ndr_lower_bandwidth_value
+ - result_latency_reverse_pdr_90_hdrh
+ - result_latency_reverse_pdr_50_hdrh
+ - result_latency_reverse_pdr_10_hdrh
+ - result_latency_reverse_pdr_0_hdrh
+ - result_latency_forward_pdr_90_hdrh
+ - result_latency_forward_pdr_50_avg
+ - result_latency_forward_pdr_50_hdrh
+ - result_latency_forward_pdr_50_unit
+ - result_latency_forward_pdr_10_hdrh
+ - result_latency_forward_pdr_0_hdrh
+- data_type: iterative
+ partition: test_type
+ partition_name: hoststack
+ release: rls2310
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/iterative_rls2310
+ schema: iterative_rls2310_hoststack
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ - tg_type
+ - result_bandwidth_unit
+ - result_bandwidth_value
+ - result_rate_unit
+ - result_rate_value
+ - start_time
+ - passed
+ - test_id
+ - version
+- data_type: coverage
+ partition: test_type
+ partition_name: ndrpdr
+ release: rls2310
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/coverage_rls2310
+ schema: coverage_rls2310_ndrpdr
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ - tg_type
+ - start_time
+ - passed
+ - test_id
+ - version
+ - result_pdr_lower_rate_unit
+ - result_pdr_lower_rate_value
+ - result_ndr_lower_rate_value
+ - result_pdr_lower_bandwidth_value
+ - result_ndr_lower_bandwidth_value
+ - result_latency_reverse_pdr_90_hdrh
+ - result_latency_reverse_pdr_50_hdrh
+ - result_latency_reverse_pdr_10_hdrh
+ - result_latency_forward_pdr_90_hdrh
+ - result_latency_forward_pdr_50_hdrh
+ - result_latency_forward_pdr_10_hdrh
+- data_type: coverage
+ partition: test_type
+ partition_name: mrr
+ release: rls2310
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/coverage_rls2310
+ schema: iterative_rls2310_mrr
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ - start_time
+ - passed
+ - test_id
+ - version
+ - result_receive_rate_rate_avg
+ - result_receive_rate_rate_stdev
+ - result_receive_rate_rate_unit
+- data_type: coverage
+ partition: test_type
+ partition_name: device
+ release: rls2310
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/coverage_rls2310
+ schema: coverage_rls2310_device
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ - passed
+ - test_id
+ - version
+- data_type: iterative
+ partition: test_type
+ partition_name: mrr
+ release: rls2402
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/iterative_rls2402
+ schema: iterative_rls2402_mrr
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ # - hosts
+ - start_time
+ - passed
+ - test_id
+ - version
+ - result_receive_rate_rate_avg
+ - result_receive_rate_rate_stdev
+ - result_receive_rate_rate_unit
+ - result_receive_rate_rate_values
+ - result_receive_rate_bandwidth_avg
+ - result_receive_rate_bandwidth_stdev
+ - result_receive_rate_bandwidth_unit
+ - result_receive_rate_bandwidth_values
+- data_type: iterative
+ partition: test_type
+ partition_name: ndrpdr
+ release: rls2402
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/iterative_rls2402
+ schema: iterative_rls2402_ndrpdr
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ # - hosts
+ - start_time
+ - passed
+ - test_id
+ - version
+ - result_pdr_lower_rate_unit
+ - result_pdr_lower_rate_value
+ - result_ndr_lower_rate_unit
+ - result_ndr_lower_rate_value
+ - result_pdr_lower_bandwidth_unit
+ - result_pdr_lower_bandwidth_value
+ - result_ndr_lower_bandwidth_unit
+ - result_ndr_lower_bandwidth_value
+ - result_latency_reverse_pdr_90_hdrh
+ - result_latency_reverse_pdr_50_hdrh
+ - result_latency_reverse_pdr_10_hdrh
+ - result_latency_reverse_pdr_0_hdrh
+ - result_latency_forward_pdr_90_hdrh
+ - result_latency_forward_pdr_50_avg
+ - result_latency_forward_pdr_50_hdrh
+ - result_latency_forward_pdr_50_unit
+ - result_latency_forward_pdr_10_hdrh
+ - result_latency_forward_pdr_0_hdrh
+- data_type: iterative
+ partition: test_type
+ partition_name: hoststack
+ release: rls2402
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/iterative_rls2402
+ schema: iterative_rls2402_hoststack
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ - hosts
+ - tg_type
+ - result_bandwidth_unit
+ - result_bandwidth_value
+ - result_rate_unit
+ - result_rate_value
+ - start_time
+ - passed
+ - test_id
+ - version
+- data_type: coverage
+ partition: test_type
+ partition_name: ndrpdr
+ release: rls2402
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/coverage_rls2402
+ schema: coverage_rls2402_ndrpdr
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ - tg_type
+ - start_time
+ - passed
+ - test_id
+ - version
+ - result_pdr_lower_rate_unit
+ - result_pdr_lower_rate_value
+ - result_ndr_lower_rate_value
+ - result_pdr_lower_bandwidth_value
+ - result_ndr_lower_bandwidth_value
+ - result_latency_reverse_pdr_90_hdrh
+ - result_latency_reverse_pdr_50_hdrh
+ - result_latency_reverse_pdr_10_hdrh
+ - result_latency_forward_pdr_90_hdrh
+ - result_latency_forward_pdr_50_hdrh
+ - result_latency_forward_pdr_10_hdrh
+- data_type: coverage
+ partition: test_type
+ partition_name: mrr
+ release: rls2402
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/coverage_rls2402
+ schema: iterative_rls2402_mrr
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ - start_time
+ - passed
+ - test_id
+ - version
+ - result_receive_rate_rate_avg
+ - result_receive_rate_rate_stdev
+ - result_receive_rate_rate_unit
+- data_type: coverage
+ partition: test_type
+ partition_name: device
+ release: rls2402
+ path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/coverage_rls2402
+ schema: coverage_rls2402_device
+ columns:
+ - job
+ - build
+ - dut_type
+ - dut_version
+ - passed
+ - test_id
+ - version