aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/presentation
diff options
context:
space:
mode:
authorTibor Frank <tifrank@cisco.com>2018-04-17 07:28:54 +0200
committerTibor Frank <tifrank@cisco.com>2018-04-17 07:28:54 +0200
commit23731f392ad8705b17cf37f9c2d397b20305f924 (patch)
treeb8052d28bc21c8b4dc07dfd6b5ae6a15c491962b /resources/tools/presentation
parent9821b058c2f4901a9b4d66667018da214513ab28 (diff)
CSIT-1041: Trending dashboard
Change-Id: I983c5cccd165fb32742d395cf7e8aa02c7f9394a Signed-off-by: Tibor Frank <tifrank@cisco.com>
Diffstat (limited to 'resources/tools/presentation')
-rw-r--r--resources/tools/presentation/generator_CPTA.py16
-rw-r--r--resources/tools/presentation/generator_tables.py87
-rw-r--r--resources/tools/presentation/pal.py82
-rw-r--r--resources/tools/presentation/specification_CPTA.yaml3
4 files changed, 115 insertions, 73 deletions
diff --git a/resources/tools/presentation/generator_CPTA.py b/resources/tools/presentation/generator_CPTA.py
index 3a8ea93e0a..066bfbddc8 100644
--- a/resources/tools/presentation/generator_CPTA.py
+++ b/resources/tools/presentation/generator_CPTA.py
@@ -164,19 +164,21 @@ def _evaluate_results(in_data, trimmed_data, window=10):
if len(in_data) > 2:
win_size = in_data.size if in_data.size < window else window
- results = [0.0, ] * win_size
+ results = [0.0, ]
median = in_data.rolling(window=win_size).median()
stdev_t = trimmed_data.rolling(window=win_size, min_periods=2).std()
m_vals = median.values
s_vals = stdev_t.values
d_vals = in_data.values
- for day in range(win_size, in_data.size):
- if np.isnan(m_vals[day - 1]) or np.isnan(s_vals[day - 1]):
+ for day in range(1, in_data.size):
+ if np.isnan(m_vals[day]) \
+ or np.isnan(s_vals[day]) \
+ or np.isnan(d_vals[day]):
results.append(0.0)
- elif d_vals[day] < (m_vals[day - 1] - 3 * s_vals[day - 1]):
+ elif d_vals[day] < (m_vals[day] - 3 * s_vals[day]):
results.append(0.33)
- elif (m_vals[day - 1] - 3 * s_vals[day - 1]) <= d_vals[day] <= \
- (m_vals[day - 1] + 3 * s_vals[day - 1]):
+ elif (m_vals[day] - 3 * s_vals[day]) <= d_vals[day] <= \
+ (m_vals[day] + 3 * s_vals[day]):
results.append(0.66)
else:
results.append(1.0)
@@ -244,7 +246,7 @@ def _generate_trending_traces(in_data, build_info, period, moving_win_size=10,
data_y = [val for val in in_data.values()]
data_pd = pd.Series(data_y, index=data_x)
- t_data, outliers = find_outliers(data_pd)
+ t_data, outliers = find_outliers(data_pd, outlier_const=1.5)
results = _evaluate_results(data_pd, t_data, window=moving_win_size)
diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py
index 13c8efffdb..12cbee2dae 100644
--- a/resources/tools/presentation/generator_tables.py
+++ b/resources/tools/presentation/generator_tables.py
@@ -574,38 +574,75 @@ def table_performance_trending_dashboard(table, input_data):
tbl_lst = list()
for tst_name in tbl_dict.keys():
if len(tbl_dict[tst_name]["data"]) > 2:
- pd_data = pd.Series(tbl_dict[tst_name]["data"])
+ sample_lst = tbl_dict[tst_name]["data"]
+ pd_data = pd.Series(sample_lst)
win_size = pd_data.size \
if pd_data.size < table["window"] else table["window"]
# Test name:
name = tbl_dict[tst_name]["name"]
- # Throughput trend:
- trend = list(pd_data.rolling(window=win_size, min_periods=2).
- median())[-2]
- # Anomaly:
+
+ # Trend list:
+ trend_lst = list(pd_data.rolling(window=win_size, min_periods=2).
+ median())
+ # Stdevs list:
t_data, _ = find_outliers(pd_data)
- last = list(t_data)[-1]
- t_stdev = list(t_data.rolling(window=win_size, min_periods=2).
- std())[-2]
- if isnan(last):
- classification = "outlier"
- last = list(pd_data)[-1]
- elif last < (trend - 3 * t_stdev):
+ t_data_lst = list(t_data)
+ stdev_lst = list(t_data.rolling(window=win_size, min_periods=2).
+ std())
+
+ rel_change_lst = [None, ]
+ classification_lst = [None, ]
+ for idx in range(1, len(trend_lst)):
+ # Relative changes list:
+ if not isnan(sample_lst[idx]) \
+ and not isnan(trend_lst[idx])\
+ and trend_lst[idx] != 0:
+ rel_change_lst.append(
+ int(relative_change(float(trend_lst[idx]),
+ float(sample_lst[idx]))))
+ else:
+ rel_change_lst.append(None)
+ # Classification list:
+ if isnan(t_data_lst[idx]) or isnan(stdev_lst[idx]):
+ classification_lst.append("outlier")
+ elif sample_lst[idx] < (trend_lst[idx] - 3*stdev_lst[idx]):
+ classification_lst.append("regression")
+ elif sample_lst[idx] > (trend_lst[idx] + 3*stdev_lst[idx]):
+ classification_lst.append("progression")
+ else:
+ classification_lst.append("normal")
+
+ last_idx = len(sample_lst) - 1
+ first_idx = last_idx - int(table["evaluated-window"])
+ if first_idx < 0:
+ first_idx = 0
+
+ if "regression" in classification_lst[first_idx:]:
classification = "regression"
- elif last > (trend + 3 * t_stdev):
+ elif "outlier" in classification_lst[first_idx:]:
+ classification = "outlier"
+ elif "progression" in classification_lst[first_idx:]:
classification = "progression"
else:
classification = "normal"
- if not isnan(last) and not isnan(trend) and trend != 0:
- # Relative change:
- rel_change = int(relative_change(float(trend), float(last)))
-
- tbl_lst.append([name,
- round(float(trend) / 1000000, 2),
- round(float(last) / 1000000, 2),
- rel_change,
- classification])
+ idx = len(classification_lst) - 1
+ while idx:
+ if classification_lst[idx] == classification:
+ break
+ idx -= 1
+
+ trend = round(float(trend_lst[-2]) / 1000000, 2) \
+ if not isnan(trend_lst[-2]) else ''
+ sample = round(float(sample_lst[idx]) / 1000000, 2) \
+ if not isnan(sample_lst[idx]) else ''
+ rel_change = rel_change_lst[idx] \
+ if rel_change_lst[idx] is not None else ''
+ tbl_lst.append([name,
+ trend,
+ sample,
+ rel_change,
+ classification])
# Sort the table according to the classification
tbl_sorted = list()
@@ -684,11 +721,11 @@ def table_performance_trending_dashboard_html(table, input_data):
td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
if c_idx == 4:
if item == "regression":
- td.set("bgcolor", "#FF0000")
+ td.set("bgcolor", "#eca1a6")
elif item == "outlier":
- td.set("bgcolor", "#818181")
+ td.set("bgcolor", "#d6cbd3")
elif item == "progression":
- td.set("bgcolor", "#008000")
+ td.set("bgcolor", "#bdcebe")
td.text = item
try:
diff --git a/resources/tools/presentation/pal.py b/resources/tools/presentation/pal.py
index 98642c898c..aaeacaac15 100644
--- a/resources/tools/presentation/pal.py
+++ b/resources/tools/presentation/pal.py
@@ -87,48 +87,48 @@ def main():
return 1
ret_code = 0
- try:
- env = Environment(spec.environment, args.force)
- env.set_environment()
-
- if spec.is_debug:
- if spec.debug["input-format"] == "zip":
- unzip_files(spec)
- else:
- download_data_files(spec)
-
- prepare_static_content(spec)
-
- data = InputData(spec)
- data.read_data()
-
- generate_tables(spec, data)
- generate_plots(spec, data)
- generate_files(spec, data)
-
- if spec.output["output"] == "report":
- generate_report(args.release, spec)
- logging.info("Successfully finished.")
- elif spec.output["output"] == "CPTA":
- sys.stdout.write(generate_cpta(spec, data))
- logging.info("Successfully finished.")
- else:
- logging.critical("The output '{0}' is not supported.".
- format(spec.output["output"]))
- ret_code = 1
-
- except (KeyError, ValueError, PresentationError) as err:
- logging.info("Finished with an error.")
- logging.critical(str(err))
- ret_code = 1
- except Exception as err:
- logging.info("Finished with an unexpected error.")
- logging.critical(str(err))
+ # try:
+ env = Environment(spec.environment, args.force)
+ env.set_environment()
+
+ if spec.is_debug:
+ if spec.debug["input-format"] == "zip":
+ unzip_files(spec)
+ else:
+ download_data_files(spec)
+
+ prepare_static_content(spec)
+
+ data = InputData(spec)
+ data.read_data()
+
+ generate_tables(spec, data)
+ generate_plots(spec, data)
+ generate_files(spec, data)
+
+ if spec.output["output"] == "report":
+ generate_report(args.release, spec)
+ logging.info("Successfully finished.")
+ elif spec.output["output"] == "CPTA":
+ sys.stdout.write(generate_cpta(spec, data))
+ logging.info("Successfully finished.")
+ else:
+ logging.critical("The output '{0}' is not supported.".
+ format(spec.output["output"]))
ret_code = 1
- finally:
- if spec is not None and not spec.is_debug:
- clean_environment(spec.environment)
- return ret_code
+
+ # except (KeyError, ValueError, PresentationError) as err:
+ # logging.info("Finished with an error.")
+ # logging.critical(str(err))
+ # ret_code = 1
+ # except Exception as err:
+ # logging.info("Finished with an unexpected error.")
+ # logging.critical(str(err))
+ # ret_code = 1
+ # finally:
+ # if spec is not None and not spec.is_debug:
+ # clean_environment(spec.environment)
+ # return ret_code
if __name__ == '__main__':
diff --git a/resources/tools/presentation/specification_CPTA.yaml b/resources/tools/presentation/specification_CPTA.yaml
index 0544acd040..f6d07e8063 100644
--- a/resources/tools/presentation/specification_CPTA.yaml
+++ b/resources/tools/presentation/specification_CPTA.yaml
@@ -227,6 +227,7 @@
nr-of-tests-shown: 20
outlier-const: 1.5
window: 10
+ evaluated-window: 14
-
type: "table"
@@ -245,6 +246,7 @@
nr-of-tests-shown: 20
outlier-const: 1.5
window: 10
+ evaluated-window: 14
-
type: "table"
@@ -263,6 +265,7 @@
nr-of-tests-shown: 20
outlier-const: 1.5
window: 10
+ evaluated-window: 14
-
type: "table"