aboutsummaryrefslogtreecommitdiffstats
path: root/resources
diff options
context:
space:
mode:
Diffstat (limited to 'resources')
-rw-r--r--resources/tools/presentation/generator_tables.py61
-rw-r--r--resources/tools/presentation/utils.py29
2 files changed, 58 insertions, 32 deletions
diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py
index a5a573ad94..724519f2d1 100644
--- a/resources/tools/presentation/generator_tables.py
+++ b/resources/tools/presentation/generator_tables.py
@@ -796,11 +796,14 @@ def table_performance_trending_dashboard(table, input_data):
else:
tmp_classification = "outlier" if classification == "failure" \
else classification
+ index = None
for idx in range(first_idx, len(classification_lst)):
if classification_lst[idx] == tmp_classification:
if rel_change_lst[idx]:
index = idx
break
+ if index is None:
+ continue
for idx in range(index+1, len(classification_lst)):
if classification_lst[idx] == tmp_classification:
if rel_change_lst[idx]:
@@ -808,31 +811,43 @@ def table_performance_trending_dashboard(table, input_data):
abs(rel_change_lst[index])):
index = idx
- trend = round(float(median_lst[-1]) / 1000000, 2) \
- if not isnan(median_lst[-1]) else '-'
- sample = round(float(sample_lst[index]) / 1000000, 2) \
- if not isnan(sample_lst[index]) else '-'
- rel_change = rel_change_lst[index] \
- if rel_change_lst[index] is not None else '-'
- if not isnan(max_median):
- if not isnan(sample_lst[index]):
- long_trend_threshold = max_median * \
- (table["long-trend-threshold"] / 100)
- if sample_lst[index] < long_trend_threshold:
- long_trend_classification = "failure"
+ logging.info("{}".format(name))
+ logging.info("sample_lst: {} - {}".format(len(sample_lst), sample_lst))
+ logging.info("median_lst: {} - {}".format(len(median_lst), median_lst))
+ logging.info("rel_change: {} - {}".format(len(rel_change_lst), rel_change_lst))
+ logging.info("classn_lst: {} - {}".format(len(classification_lst), classification_lst))
+ logging.info("index: {}".format(index))
+ logging.info("classifica: {}".format(classification))
+
+ try:
+ trend = round(float(median_lst[-1]) / 1000000, 2) \
+ if not isnan(median_lst[-1]) else '-'
+ sample = round(float(sample_lst[index]) / 1000000, 2) \
+ if not isnan(sample_lst[index]) else '-'
+ rel_change = rel_change_lst[index] \
+ if rel_change_lst[index] is not None else '-'
+ if not isnan(max_median):
+ if not isnan(sample_lst[index]):
+ long_trend_threshold = max_median * \
+ (table["long-trend-threshold"] / 100)
+ if sample_lst[index] < long_trend_threshold:
+ long_trend_classification = "failure"
+ else:
+ long_trend_classification = '-'
else:
- long_trend_classification = '-'
+ long_trend_classification = "failure"
else:
- long_trend_classification = "failure"
- else:
- long_trend_classification = '-'
- tbl_lst.append([name,
- trend,
- long_trend_classification,
- classification,
- '-' if classification == "normal" else sample,
- '-' if classification == "normal" else rel_change,
- nr_outliers])
+ long_trend_classification = '-'
+ tbl_lst.append([name,
+ trend,
+ long_trend_classification,
+ classification,
+ '-' if classification == "normal" else sample,
+ '-' if classification == "normal" else rel_change,
+ nr_outliers])
+ except IndexError as err:
+ logging.error("{}".format(err))
+ continue
# Sort the table according to the classification
tbl_sorted = list()
diff --git a/resources/tools/presentation/utils.py b/resources/tools/presentation/utils.py
index 2fbf70cadc..a15742a21f 100644
--- a/resources/tools/presentation/utils.py
+++ b/resources/tools/presentation/utils.py
@@ -81,15 +81,26 @@ def remove_outliers(input_list, outlier_const=1.5, window=14):
:rtype: list of floats
"""
- input_series = pd.Series()
- for index, value in enumerate(input_list):
- item_pd = pd.Series([value, ], index=[index, ])
- input_series.append(item_pd)
- output_series, _ = split_outliers(input_series, outlier_const=outlier_const,
- window=window)
- output_list = [y for x, y in output_series.items() if not np.isnan(y)]
-
- return output_list
+ data = np.array(input_list)
+ upper_quartile = np.percentile(data, 75)
+ lower_quartile = np.percentile(data, 25)
+ iqr = (upper_quartile - lower_quartile) * outlier_const
+ quartile_set = (lower_quartile - iqr, upper_quartile + iqr)
+ result_lst = list()
+ for y in data.tolist():
+ if quartile_set[0] <= y <= quartile_set[1]:
+ result_lst.append(y)
+ return result_lst
+
+ # input_series = pd.Series()
+ # for index, value in enumerate(input_list):
+ # item_pd = pd.Series([value, ], index=[index, ])
+ # input_series.append(item_pd)
+ # output_series, _ = split_outliers(input_series, outlier_const=outlier_const,
+ # window=window)
+ # output_list = [y for x, y in output_series.items() if not np.isnan(y)]
+ #
+ # return output_list
def split_outliers(input_series, outlier_const=1.5, window=14):