aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/dash/app/pal/data/data.py
diff options
context:
space:
mode:
authorPeter Mikus <pmikus@cisco.com>2022-04-13 12:29:45 +0200
committerTibor Frank <tifrank@cisco.com>2022-04-14 08:18:21 +0200
commit1832b3e0127df9fefc4421502b5cd8da288ff576 (patch)
tree6c96eb8df8495abc69572383c21afc4736514332 /resources/tools/dash/app/pal/data/data.py
parent45615cddd926540756f19328cad7078cfc9a1219 (diff)
feat(uti): Refactor grid layout
Signed-off-by: Peter Mikus <pmikus@cisco.com> Change-Id: I992ac4f2a4ecfa779c9fbf393a2f4dfa8cea704f
Diffstat (limited to 'resources/tools/dash/app/pal/data/data.py')
-rw-r--r--resources/tools/dash/app/pal/data/data.py31
1 files changed, 20 insertions, 11 deletions
diff --git a/resources/tools/dash/app/pal/data/data.py b/resources/tools/dash/app/pal/data/data.py
index 859c7d3458..a3b6c2a478 100644
--- a/resources/tools/dash/app/pal/data/data.py
+++ b/resources/tools/dash/app/pal/data/data.py
@@ -13,10 +13,12 @@
"""Prepare data for Plotly Dash."""
+from datetime import datetime, timedelta
import logging
from time import time
import awswrangler as wr
+from pytz import UTC
from yaml import load, FullLoader, YAMLError
from awswrangler.exceptions import EmptyDataFrame, NoFilesFound
@@ -82,7 +84,7 @@ class Data:
def _create_dataframe_from_parquet(self,
path, partition_filter=None, columns=None,
validate_schema=False, last_modified_begin=None,
- last_modified_end=None):
+ last_modified_end=None, days=None):
"""Read parquet stored in S3 compatible storage and returns Pandas
Dataframe.
@@ -116,6 +118,8 @@ class Data:
"""
df = None
start = time()
+ if days:
+ last_modified_begin = datetime.now(tz=UTC) - timedelta(days=days)
try:
df = wr.s3.read_parquet(
path=path,
@@ -144,7 +148,7 @@ class Data:
self._data = df
return df
- def read_stats(self):
+ def read_stats(self, days=None):
"""Read Suite Result Analysis data partition from parquet.
"""
lambda_f = lambda part: True if part["stats_type"] == "sra" else False
@@ -152,10 +156,11 @@ class Data:
return self._create_dataframe_from_parquet(
path=self._get_path("statistics"),
partition_filter=lambda_f,
- columns=None # Get all columns.
+ columns=None, # Get all columns.
+ days=days
)
- def read_trending_mrr(self):
+ def read_trending_mrr(self, days=None):
"""Read MRR data partition from parquet.
"""
lambda_f = lambda part: True if part["test_type"] == "mrr" else False
@@ -163,10 +168,11 @@ class Data:
return self._create_dataframe_from_parquet(
path=self._get_path("trending-mrr"),
partition_filter=lambda_f,
- columns=self._get_columns("trending-mrr")
+ columns=self._get_columns("trending-mrr"),
+ days=days
)
- def read_trending_ndrpdr(self):
+ def read_trending_ndrpdr(self, days=None):
"""Read NDRPDR data partition from iterative parquet.
"""
lambda_f = lambda part: True if part["test_type"] == "ndrpdr" else False
@@ -174,10 +180,11 @@ class Data:
return self._create_dataframe_from_parquet(
path=self._get_path("trending-ndrpdr"),
partition_filter=lambda_f,
- columns=self._get_columns("trending-ndrpdr")
+ columns=self._get_columns("trending-ndrpdr"),
+ days=days
)
- def read_iterative_mrr(self):
+ def read_iterative_mrr(self, days=None):
"""Read MRR data partition from iterative parquet.
"""
lambda_f = lambda part: True if part["test_type"] == "mrr" else False
@@ -185,10 +192,11 @@ class Data:
return self._create_dataframe_from_parquet(
path=self._get_path("iterative-mrr"),
partition_filter=lambda_f,
- columns=self._get_columns("iterative-mrr")
+ columns=self._get_columns("iterative-mrr"),
+ days=days
)
- def read_iterative_ndrpdr(self):
+ def read_iterative_ndrpdr(self, days=None):
"""Read NDRPDR data partition from parquet.
"""
lambda_f = lambda part: True if part["test_type"] == "ndrpdr" else False
@@ -196,5 +204,6 @@ class Data:
return self._create_dataframe_from_parquet(
path=self._get_path("iterative-ndrpdr"),
partition_filter=lambda_f,
- columns=self._get_columns("iterative-ndrpdr")
+ columns=self._get_columns("iterative-ndrpdr"),
+ days=days
)