aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/presentation
diff options
context:
space:
mode:
authorVratko Polak <vrpolak@cisco.com>2018-04-20 14:23:11 +0200
committerTibor Frank <tifrank@cisco.com>2018-04-23 05:25:08 +0000
commit4f5872c1bb23873b3a93cb471aae8700d5ca029d (patch)
tree579500c580dd8fa90bc77a1aba76c7c895691246 /resources/tools/presentation
parentf3d66d9ca20e7fb2b153d83d809f005a93b76a8f (diff)
FIX: Use rolling window for outlier detection
+ Rename find_outliers to split_outliers. + Make remove_ouliers call split_outliers internally. + Add "window" argument to both functions. + Add TODOs to call sites not setting window size explicitly. + Improve docstrings. Change-Id: I24961e2859ddbfa62b543031284517c7389a2abb Signed-off-by: Vratko Polak <vrpolak@cisco.com>
Diffstat (limited to 'resources/tools/presentation')
-rw-r--r--resources/tools/presentation/generator_CPTA.py5
-rw-r--r--resources/tools/presentation/generator_tables.py17
-rw-r--r--resources/tools/presentation/utils.py73
3 files changed, 55 insertions, 40 deletions
diff --git a/resources/tools/presentation/generator_CPTA.py b/resources/tools/presentation/generator_CPTA.py
index 25be09f959..3817ea9043 100644
--- a/resources/tools/presentation/generator_CPTA.py
+++ b/resources/tools/presentation/generator_CPTA.py
@@ -25,7 +25,7 @@ import numpy as np
import pandas as pd
from collections import OrderedDict
-from utils import find_outliers, archive_input_data, execute_command
+from utils import split_outliers, archive_input_data, execute_command
# Command to build the html format of the report
@@ -247,7 +247,8 @@ def _generate_trending_traces(in_data, build_info, period, moving_win_size=10,
data_pd = pd.Series(data_y, index=data_x)
- t_data, outliers = find_outliers(data_pd, outlier_const=1.5)
+ t_data, outliers = split_outliers(data_pd, outlier_const=1.5,
+ window=moving_win_size)
results = _evaluate_results(data_pd, t_data, window=moving_win_size)
anomalies = pd.Series()
diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py
index 74579b0a9d..9b9f09f4be 100644
--- a/resources/tools/presentation/generator_tables.py
+++ b/resources/tools/presentation/generator_tables.py
@@ -25,7 +25,7 @@ from math import isnan
from xml.etree import ElementTree as ET
from errors import PresentationError
-from utils import mean, stdev, relative_change, remove_outliers, find_outliers
+from utils import mean, stdev, relative_change, remove_outliers, split_outliers
def generate_tables(spec, data):
@@ -405,14 +405,16 @@ def table_performance_comparison(table, input_data):
item = [tbl_dict[tst_name]["name"], ]
if tbl_dict[tst_name]["ref-data"]:
data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
- table["outlier-const"])
+ outlier_constant=table["outlier-const"])
+ # TODO: Specify window size.
item.append(round(mean(data_t) / 1000000, 2))
item.append(round(stdev(data_t) / 1000000, 2))
else:
item.extend([None, None])
if tbl_dict[tst_name]["cmp-data"]:
data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
- table["outlier-const"])
+ outlier_constant=table["outlier-const"])
+ # TODO: Specify window size.
item.append(round(mean(data_t) / 1000000, 2))
item.append(round(stdev(data_t) / 1000000, 2))
else:
@@ -594,14 +596,16 @@ def table_performance_comparison_mrr(table, input_data):
item = [tbl_dict[tst_name]["name"], ]
if tbl_dict[tst_name]["ref-data"]:
data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
- table["outlier-const"])
+ outlier_const=table["outlier-const"])
+ # TODO: Specify window size.
item.append(round(mean(data_t) / 1000000, 2))
item.append(round(stdev(data_t) / 1000000, 2))
else:
item.extend([None, None])
if tbl_dict[tst_name]["cmp-data"]:
data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
- table["outlier-const"])
+ outlier_const=table["outlier-const"])
+ # TODO: Specify window size.
item.append(round(mean(data_t) / 1000000, 2))
item.append(round(stdev(data_t) / 1000000, 2))
else:
@@ -708,7 +712,8 @@ def table_performance_trending_dashboard(table, input_data):
name = tbl_dict[tst_name]["name"]
median = pd_data.rolling(window=win_size, min_periods=2).median()
- trimmed_data, _ = find_outliers(pd_data, outlier_const=1.5)
+ trimmed_data, _ = split_outliers(pd_data, outlier_const=1.5,
+ window=win_size)
stdev_t = pd_data.rolling(window=win_size, min_periods=2).std()
rel_change_lst = [None, ]
diff --git a/resources/tools/presentation/utils.py b/resources/tools/presentation/utils.py
index 8365bfad5c..bc62268937 100644
--- a/resources/tools/presentation/utils.py
+++ b/resources/tools/presentation/utils.py
@@ -67,59 +67,68 @@ def relative_change(nr1, nr2):
return float(((nr2 - nr1) / nr1) * 100)
+def remove_outliers(input_list, outlier_const=1.5, window=14):
+ """Return list with outliers removed, using split_outliers.
-def remove_outliers(input_data, outlier_const):
- """
-
- :param input_data: Data from which the outliers will be removed.
+ :param input_list: Data from which the outliers will be removed.
:param outlier_const: Outlier constant.
- :type input_data: list
+ :param window: How many preceding values to take into account.
+ :type input_list: list of floats
:type outlier_const: float
+ :type window: int
:returns: The input list without outliers.
- :rtype: list
+ :rtype: list of floats
"""
- data = np.array(input_data)
- upper_quartile = np.percentile(data, 75)
- lower_quartile = np.percentile(data, 25)
- iqr = (upper_quartile - lower_quartile) * outlier_const
- quartile_set = (lower_quartile - iqr, upper_quartile + iqr)
- result_lst = list()
- for y in data.tolist():
- if quartile_set[0] <= y <= quartile_set[1]:
- result_lst.append(y)
- return result_lst
+ input_series = pd.Series()
+ for index, value in enumerate(input_list):
+ item_pd = pd.Series([value, ], index=[index, ])
+ input_series.append(item_pd)
+ output_series, _ = split_outliers(input_series, outlier_const=outlier_const,
+ window=window)
+ output_list = [y for x, y in output_series.items() if not np.isnan(y)]
+
+ return output_list
-def find_outliers(input_data, outlier_const=1.5):
+def split_outliers(input_series, outlier_const=1.5, window=14):
"""Go through the input data and generate two pandas series:
- - input data without outliers
+ - input data with outliers replaced by NAN
- outliers.
The function uses IQR to detect outliers.
- :param input_data: Data to be examined for outliers.
+ :param input_series: Data to be examined for outliers.
:param outlier_const: Outlier constant.
- :type input_data: pandas.Series
+ :param window: How many preceding values to take into account.
+ :type input_series: pandas.Series
:type outlier_const: float
- :returns: Tuple: input data with outliers removed; Outliers.
- :rtype: tuple (trimmed_data, outliers)
+ :type window: int
+ :returns: Input data with NAN outliers and Outliers.
+ :rtype: (pandas.Series, pandas.Series)
"""
- upper_quartile = input_data.quantile(q=0.75)
- lower_quartile = input_data.quantile(q=0.25)
- iqr = (upper_quartile - lower_quartile) * outlier_const
- low = lower_quartile - iqr
- high = upper_quartile + iqr
+ list_data = list(input_series.items())
+ head_size = min(window, len(list_data))
+ head_list = list_data[:head_size]
trimmed_data = pd.Series()
outliers = pd.Series()
- for item in input_data.items():
- item_pd = pd.Series([item[1], ], index=[item[0], ])
- if low <= item[1] <= high:
+ for item_x, item_y in head_list:
+ item_pd = pd.Series([item_y, ], index=[item_x, ])
+ trimmed_data = trimmed_data.append(item_pd)
+ for index, (item_x, item_y) in list(enumerate(list_data))[head_size:]:
+ y_rolling_list = [y for (x, y) in list_data[index - head_size:index]]
+ y_rolling_array = np.array(y_rolling_list)
+ q1 = np.percentile(y_rolling_array, 25)
+ q3 = np.percentile(y_rolling_array, 75)
+ iqr = (q3 - q1) * outlier_const
+ low, high = q1 - iqr, q3 + iqr
+ item_pd = pd.Series([item_y, ], index=[item_x, ])
+ if low <= item_y <= high:
trimmed_data = trimmed_data.append(item_pd)
else:
- trimmed_data = trimmed_data.append(pd.Series([np.nan, ],
- index=[item[0], ]))
outliers = outliers.append(item_pd)
+ nan_pd = pd.Series([np.nan, ], index=[item_x, ])
+ trimmed_data = trimmed_data.append(nan_pd)
return trimmed_data, outliers