aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/presentation/generator_tables.py
diff options
context:
space:
mode:
Diffstat (limited to 'resources/tools/presentation/generator_tables.py')
-rw-r--r--resources/tools/presentation/generator_tables.py175
1 files changed, 141 insertions, 34 deletions
diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py
index 449b2357a8..1a3e46ab8e 100644
--- a/resources/tools/presentation/generator_tables.py
+++ b/resources/tools/presentation/generator_tables.py
@@ -17,6 +17,7 @@
import logging
import csv
+import math
import re
from collections import OrderedDict
@@ -24,11 +25,11 @@ from xml.etree import ElementTree as ET
from datetime import datetime as dt
from datetime import timedelta
from copy import deepcopy
-from json import loads
import plotly.graph_objects as go
import plotly.offline as ploff
import pandas as pd
+import prettytable
from numpy import nan, isnan
from yaml import load, FullLoader, YAMLError
@@ -59,7 +60,8 @@ def generate_tables(spec, data):
u"table_failed_tests_html": table_failed_tests_html,
u"table_oper_data_html": table_oper_data_html,
u"table_comparison": table_comparison,
- u"table_weekly_comparison": table_weekly_comparison
+ u"table_weekly_comparison": table_weekly_comparison,
+ u"table_job_spec_duration": table_job_spec_duration
}
logging.info(u"Generating the tables ...")
@@ -76,6 +78,96 @@ def generate_tables(spec, data):
logging.info(u"Done.")
+def table_job_spec_duration(table, input_data):
+ """Generate the table(s) with algorithm: table_job_spec_duration
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ _ = input_data
+
+ logging.info(f" Generating the table {table.get(u'title', u'')} ...")
+
+ jb_type = table.get(u"jb-type", None)
+
+ tbl_lst = list()
+ if jb_type == u"iterative":
+ for line in table.get(u"lines", tuple()):
+ tbl_itm = {
+ u"name": line.get(u"job-spec", u""),
+ u"data": list()
+ }
+ for job, builds in line.get(u"data-set", dict()).items():
+ for build_nr in builds:
+ try:
+ minutes = input_data.metadata(
+ job, str(build_nr)
+ )[u"elapsedtime"] // 60000
+ except (KeyError, IndexError, ValueError, AttributeError):
+ continue
+ tbl_itm[u"data"].append(minutes)
+ tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
+ tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
+ tbl_lst.append(tbl_itm)
+ elif jb_type == u"coverage":
+ job = table.get(u"data", None)
+ if not job:
+ return
+ for line in table.get(u"lines", tuple()):
+ try:
+ tbl_itm = {
+ u"name": line.get(u"job-spec", u""),
+ u"mean": input_data.metadata(
+ list(job.keys())[0], str(line[u"build"])
+ )[u"elapsedtime"] // 60000,
+ u"stdev": float(u"nan")
+ }
+ tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
+ except (KeyError, IndexError, ValueError, AttributeError):
+ continue
+ tbl_lst.append(tbl_itm)
+ else:
+ logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
+ return
+
+ for line in tbl_lst:
+ line[u"mean"] = \
+ f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
+ if math.isnan(line[u"stdev"]):
+ line[u"stdev"] = u""
+ else:
+ line[u"stdev"] = \
+ f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
+
+ if not tbl_lst:
+ return
+
+ rows = list()
+ for itm in tbl_lst:
+ rows.append([
+ itm[u"name"],
+ f"{len(itm[u'data'])}",
+ f"{itm[u'mean']} +- {itm[u'stdev']}"
+ if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
+ ])
+
+ txt_table = prettytable.PrettyTable(
+ [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
+ )
+ for row in rows:
+ txt_table.add_row(row)
+ txt_table.align = u"r"
+ txt_table.align[u"Job Specification"] = u"l"
+
+ file_name = f"{table.get(u'output-file', u'')}.txt"
+ with open(file_name, u"wt", encoding='utf-8') as txt_file:
+ txt_file.write(str(txt_table))
+
+
def table_oper_data_html(table, input_data):
"""Generate the table(s) with algorithm: html_table_oper_data
specified in the specification file.
@@ -205,29 +297,15 @@ def table_oper_data_html(table, input_data):
threads = dict({idx: list() for idx in range(len(runtime))})
for idx, run_data in runtime.items():
for gnode, gdata in run_data.items():
- if gdata[u"vectors"] > 0:
- clocks = gdata[u"clocks"] / gdata[u"vectors"]
- elif gdata[u"calls"] > 0:
- clocks = gdata[u"clocks"] / gdata[u"calls"]
- elif gdata[u"suspends"] > 0:
- clocks = gdata[u"clocks"] / gdata[u"suspends"]
- else:
- clocks = 0.0
- if gdata[u"calls"] > 0:
- vectors_call = gdata[u"vectors"] / gdata[u"calls"]
- else:
- vectors_call = 0.0
- if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
- int(gdata[u"suspends"]):
- threads[idx].append([
- gnode,
- int(gdata[u"calls"]),
- int(gdata[u"vectors"]),
- int(gdata[u"suspends"]),
- clocks,
- vectors_call
- ])
-
+ threads[idx].append([
+ gnode,
+ int(gdata[u"calls"]),
+ int(gdata[u"vectors"]),
+ int(gdata[u"suspends"]),
+ float(gdata[u"clocks"]),
+ float(gdata[u"vectors"] / gdata[u"calls"]) \
+ if gdata[u"calls"] else 0.0
+ ])
bold = ET.SubElement(tcol, u"b")
bold.text = (
f"Host IP: {dut_data.get(u'host', '')}, "
@@ -477,6 +555,13 @@ def _tpc_insert_data(target, src, include_tests):
target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
elif include_tests == u"NDR":
target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
+ elif u"latency" in include_tests:
+ keys = include_tests.split(u"-")
+ if len(keys) == 4:
+ lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
+ target[u"data"].append(
+ float(u"nan") if lat == -1 else lat * 1e6
+ )
except (KeyError, TypeError):
pass
@@ -1367,7 +1452,11 @@ def table_last_failed_tests(table, input_data):
if not groups:
continue
nic = groups.group(0)
- failed_tests.append(f"{nic}-{tst_data[u'name']}")
+ msg = tst_data[u'msg'].replace(u"\n", u"")
+ msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
+ 'xxx.xxx.xxx.xxx', msg)
+ msg = msg.split(u'Also teardown failed')[0]
+ failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
tbl_list.append(passed)
tbl_list.append(failed)
tbl_list.append(duration)
@@ -1623,7 +1712,14 @@ def table_comparison(table, input_data):
tag = col.get(u"tag", None)
data = input_data.filter_data(
table,
- params=[u"throughput", u"result", u"name", u"parent", u"tags"],
+ params=[
+ u"throughput",
+ u"result",
+ u"latency",
+ u"name",
+ u"parent",
+ u"tags"
+ ],
data=col[u"data-set"],
continue_on_error=True
)
@@ -1661,7 +1757,14 @@ def table_comparison(table, input_data):
if replacement:
rpl_data = input_data.filter_data(
table,
- params=[u"throughput", u"result", u"name", u"parent", u"tags"],
+ params=[
+ u"throughput",
+ u"result",
+ u"latency",
+ u"name",
+ u"parent",
+ u"tags"
+ ],
data=replacement,
continue_on_error=True
)
@@ -1695,7 +1798,8 @@ def table_comparison(table, input_data):
include_tests=table[u"include-tests"]
)
- if table[u"include-tests"] in (u"NDR", u"PDR"):
+ if table[u"include-tests"] in (u"NDR", u"PDR") or \
+ u"latency" in table[u"include-tests"]:
for tst_name, tst_data in col_data[u"data"].items():
if tst_data[u"data"]:
tst_data[u"mean"] = mean(tst_data[u"data"])
@@ -1780,11 +1884,14 @@ def table_comparison(table, input_data):
cmp_itm[u"mean"] is not None and \
ref_itm[u"stdev"] is not None and \
cmp_itm[u"stdev"] is not None:
- delta, d_stdev = relative_change_stdev(
- ref_itm[u"mean"], cmp_itm[u"mean"],
- ref_itm[u"stdev"], cmp_itm[u"stdev"]
- )
- if delta is None:
+ try:
+ delta, d_stdev = relative_change_stdev(
+ ref_itm[u"mean"], cmp_itm[u"mean"],
+ ref_itm[u"stdev"], cmp_itm[u"stdev"]
+ )
+ except ZeroDivisionError:
+ break
+ if delta is None or math.isnan(delta):
break
new_row.append({
u"mean": delta * 1e6,