aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/presentation
diff options
context:
space:
mode:
authorViliam Luc <vluc@cisco.com>2021-12-03 14:54:25 +0100
committerTibor Frank <tifrank@cisco.com>2021-12-13 10:59:18 +0000
commit575b935029aa496629f138d0e5f756921b64d1e6 (patch)
treeeadc1e51c97b754f968bcc9c0fcdf85e7c750bb7 /resources/tools/presentation
parente82ebbd96e2ba73276e2f1b6d7f9c2d8a9442c3f (diff)
trending: regression and progression add info to email
adding trend in Mpps, runs for trend in #, trend change in % + dashboard - removed Short-Term change and added # of runs for trend Signed-off-by: Viliam Luc <vluc@cisco.com> Change-Id: Ib02d2a2224fc52b79832560241b0530aa2eaaf77
Diffstat (limited to 'resources/tools/presentation')
-rw-r--r--resources/tools/presentation/generator_cpta.py44
-rw-r--r--resources/tools/presentation/generator_tables.py26
2 files changed, 55 insertions, 15 deletions
diff --git a/resources/tools/presentation/generator_cpta.py b/resources/tools/presentation/generator_cpta.py
index 0320b9eec1..0bef38d82d 100644
--- a/resources/tools/presentation/generator_cpta.py
+++ b/resources/tools/presentation/generator_cpta.py
@@ -13,7 +13,6 @@
"""Generation of Continuous Performance Trending and Analysis.
"""
-
import re
import logging
import csv
@@ -21,6 +20,7 @@ import csv
from collections import OrderedDict
from datetime import datetime
from copy import deepcopy
+from os import listdir
import prettytable
import plotly.offline as ploff
@@ -838,22 +838,60 @@ def _generate_all_charts(spec, input_data):
# Evaluate result:
if anomaly_classifications:
+ legend_str = (f"Legend:\n[ Last trend in Mpps/Mcps | number of runs for"
+ f" last trend | ")
result = u"PASS"
for job_name, job_data in anomaly_classifications.items():
+ data = []
+ tb = u"-".join(job_name.split(u"-")[-2:])
+ for file in listdir(f"{spec.cpta[u'output-file']}"):
+ if tb in file and u"performance-trending-dashboard" in \
+ file and u"txt" in file:
+ file_to_read = f"{spec.cpta[u'output-file']}/{file}"
+ with open(f"{file_to_read}", u"rt") as input:
+ data = data + input.readlines()
file_name = \
f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
with open(file_name, u'w') as txt_file:
for test_name, classification in job_data.items():
if classification == u"regression":
- txt_file.write(test_name + u'\n')
+ tst = test_name.split(" ")[1].split(".")[1:]
+ nic = tst[0].split("-")[0]
+ tst_name = f"{nic}-{tst[1]}"
+
+ for line in data:
+ if tst_name in line:
+ line = line.replace(" ", "")
+ trend = line.split("|")[2]
+ number = line.split("|")[3]
+ ltc = line.split("|")[4]
+ txt_file.write(f"{tst_name} [ {trend}M | "
+ f"#{number} | {ltc}% ]\n")
+
if classification in (u"regression", u"outlier"):
result = u"FAIL"
+
+ txt_file.write(f"{legend_str}regression in percentage ]")
+
file_name = \
f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
with open(file_name, u'w') as txt_file:
for test_name, classification in job_data.items():
if classification == u"progression":
- txt_file.write(test_name + u'\n')
+ tst = test_name.split(" ")[1].split(".")[1:]
+ nic = tst[0].split("-")[0]
+ tst_name = f"{nic}-{tst[1]}"
+
+ for line in data:
+ if tst_name in line:
+ line = line.replace(" ", "")
+ trend = line.split("|")[2]
+ number = line.split("|")[3]
+ ltc = line.split("|")[4]
+ txt_file.write(f"{tst_name} [ {trend}M | "
+ f"#{number} | {ltc}% ]\n")
+
+ txt_file.write(f"{legend_str}progression in percentage ]")
else:
result = u"FAIL"
diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py
index 917f0412f5..0b063b1067 100644
--- a/resources/tools/presentation/generator_tables.py
+++ b/resources/tools/presentation/generator_tables.py
@@ -972,8 +972,8 @@ def table_perf_trending_dash(table, input_data):
header = [
u"Test Case",
u"Trend [Mpps]",
- u"Short-Term Change [%]",
- u"Long-Term Change [%]",
+ u"Number of runs [#]",
+ u"Trend Change [%]",
u"Regressions [#]",
u"Progressions [#]"
]
@@ -1034,6 +1034,13 @@ def table_perf_trending_dash(table, input_data):
last_avg = avgs[-1]
avg_week_ago = avgs[max(-win_size, -len(avgs))]
+ nr_of_last_avgs = 0;
+ for x in reversed(avgs):
+ if x == last_avg:
+ nr_of_last_avgs += 1
+ else:
+ break
+
if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
rel_change_last = nan
else:
@@ -1055,28 +1062,23 @@ def table_perf_trending_dash(table, input_data):
tbl_lst.append(
[tbl_dict[tst_name][u"name"],
round(last_avg / 1e6, 2),
- rel_change_last,
+ nr_of_last_avgs,
rel_change_long,
classification_lst[-win_size+1:].count(u"regression"),
classification_lst[-win_size+1:].count(u"progression")])
tbl_lst.sort(key=lambda rel: rel[0])
- tbl_lst.sort(key=lambda rel: rel[3])
tbl_lst.sort(key=lambda rel: rel[2])
-
- tbl_sorted = list()
- for nrr in range(table[u"window"], -1, -1):
- tbl_reg = [item for item in tbl_lst if item[4] == nrr]
- for nrp in range(table[u"window"], -1, -1):
- tbl_out = [item for item in tbl_reg if item[5] == nrp]
- tbl_sorted.extend(tbl_out)
+ tbl_lst.sort(key=lambda rel: rel[3])
+ tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
+ tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
logging.info(f" Writing file: {file_name}")
with open(file_name, u"wt") as file_handler:
file_handler.write(header_str)
- for test in tbl_sorted:
+ for test in tbl_lst:
file_handler.write(u",".join([str(item) for item in test]) + u'\n')
logging.info(f" Writing file: {table[u'output-file']}.txt")