aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools
diff options
context:
space:
mode:
authorTibor Frank <tifrank@cisco.com>2020-04-09 10:46:54 +0200
committerTibor Frank <tifrank@cisco.com>2020-04-09 14:48:19 +0200
commitbf31370e5e5fa3b597229a631ca1d58788aae794 (patch)
tree5b9c1cd10f5e54b261d18c168d59511df44424e1 /resources/tools
parent8fcb84c0495e364daf65745a9b23b0d6f0a1d51e (diff)
PAL: Integrate new comp tables
Change-Id: Ibadf4fabb69bdd9d2412ed8f33c97ab12303800f Signed-off-by: Tibor Frank <tifrank@cisco.com>
Diffstat (limited to 'resources/tools')
-rw-r--r--resources/tools/presentation/generator_tables.py562
-rw-r--r--resources/tools/presentation/pal_utils.py29
-rw-r--r--resources/tools/presentation/rca/rca-2n-skx-2t1c-pdr.yaml2
-rw-r--r--resources/tools/presentation/rca/rca-3n-skx-2t1c-pdr.yaml2
-rw-r--r--resources/tools/presentation/specification_parser.py13
5 files changed, 517 insertions, 91 deletions
diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py
index e5309429bf..c9205642c6 100644
--- a/resources/tools/presentation/generator_tables.py
+++ b/resources/tools/presentation/generator_tables.py
@@ -23,6 +23,7 @@ from collections import OrderedDict
from xml.etree import ElementTree as ET
from datetime import datetime as dt
from datetime import timedelta
+from copy import deepcopy
import plotly.graph_objects as go
import plotly.offline as ploff
@@ -58,7 +59,8 @@ def generate_tables(spec, data):
u"table_last_failed_tests": table_last_failed_tests,
u"table_failed_tests": table_failed_tests,
u"table_failed_tests_html": table_failed_tests_html,
- u"table_oper_data_html": table_oper_data_html
+ u"table_oper_data_html": table_oper_data_html,
+ u"table_comparison": table_comparison
}
logging.info(u"Generating the tables ...")
@@ -373,11 +375,13 @@ def table_merged_details(table, input_data):
logging.info(u" Done.")
-def _tpc_modify_test_name(test_name):
+def _tpc_modify_test_name(test_name, ignore_nic=False):
"""Modify a test name by replacing its parts.
:param test_name: Test name to be modified.
+ :param ignore_nic: If True, NIC is removed from TC name.
:type test_name: str
+ :type ignore_nic: bool
:returns: Modified test name.
:rtype: str
"""
@@ -395,7 +399,9 @@ def _tpc_modify_test_name(test_name):
replace(u"4t4c", u"4c").\
replace(u"8t4c", u"4c")
- return re.sub(REGEX_NIC, u"", test_name_mod)
+ if ignore_nic:
+ return re.sub(REGEX_NIC, u"", test_name_mod)
+ return test_name_mod
def _tpc_modify_displayed_test_name(test_name):
@@ -484,7 +490,7 @@ def _tpc_sort_table(table):
def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
- footnote=u""):
+ footnote=u"", sort_data=True):
"""Generate html table from input data with simple sorting possibility.
:param header: Table header.
@@ -496,11 +502,13 @@ def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
generated html table is written.
:param legend: The legend to display below the table.
:param footnote: The footnote to display below the table (and legend).
+ :param sort_data: If True the data sorting is enabled.
:type header: list
:type data: list of lists
:type out_file_name: str
:type legend: str
:type footnote: str
+ :type sort_data: bool
"""
try:
@@ -508,75 +516,99 @@ def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
except ValueError:
idx = 0
params = {
- u"align-hdr": ([u"left", u"center"], [u"left", u"left", u"center"]),
- u"align-itm": ([u"left", u"right"], [u"left", u"left", u"right"]),
- u"width": ([28, 9], [4, 24, 10])
+ u"align-hdr": (
+ [u"left", u"center"],
+ [u"left", u"left", u"center"],
+ [u"left", u"left", u"left", u"center"]
+ ),
+ u"align-itm": (
+ [u"left", u"right"],
+ [u"left", u"left", u"right"],
+ [u"left", u"left", u"left", u"right"]
+ ),
+ u"width": ([28, 9], [4, 24, 10], [4, 4, 32, 10])
}
df_data = pd.DataFrame(data, columns=header)
- df_sorted = [df_data.sort_values(
- by=[key, header[idx]], ascending=[True, True]
- if key != header[idx] else [False, True]) for key in header]
- df_sorted_rev = [df_data.sort_values(
- by=[key, header[idx]], ascending=[False, True]
- if key != header[idx] else [True, True]) for key in header]
- df_sorted.extend(df_sorted_rev)
+ if sort_data:
+ df_sorted = [df_data.sort_values(
+ by=[key, header[idx]], ascending=[True, True]
+ if key != header[idx] else [False, True]) for key in header]
+ df_sorted_rev = [df_data.sort_values(
+ by=[key, header[idx]], ascending=[False, True]
+ if key != header[idx] else [True, True]) for key in header]
+ df_sorted.extend(df_sorted_rev)
+ else:
+ df_sorted = df_data
fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
for idx in range(len(df_data))]]
table_header = dict(
- values=[f"<b>{item}</b>" for item in header],
+ values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
fill_color=u"#7eade7",
align=params[u"align-hdr"][idx]
)
fig = go.Figure()
- for table in df_sorted:
- columns = [table.get(col) for col in header]
+ if sort_data:
+ for table in df_sorted:
+ columns = [table.get(col) for col in header]
+ fig.add_trace(
+ go.Table(
+ columnwidth=params[u"width"][idx],
+ header=table_header,
+ cells=dict(
+ values=columns,
+ fill_color=fill_color,
+ align=params[u"align-itm"][idx]
+ )
+ )
+ )
+
+ buttons = list()
+ menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
+ menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
+ menu_items.extend(menu_items_rev)
+ for idx, hdr in enumerate(menu_items):
+ visible = [False, ] * len(menu_items)
+ visible[idx] = True
+ buttons.append(
+ dict(
+ label=hdr.replace(u" [Mpps]", u""),
+ method=u"update",
+ args=[{u"visible": visible}],
+ )
+ )
+
+ fig.update_layout(
+ updatemenus=[
+ go.layout.Updatemenu(
+ type=u"dropdown",
+ direction=u"down",
+ x=0.0,
+ xanchor=u"left",
+ y=1.045,
+ yanchor=u"top",
+ active=len(menu_items) - 1,
+ buttons=list(buttons)
+ )
+ ],
+ )
+ else:
fig.add_trace(
go.Table(
columnwidth=params[u"width"][idx],
header=table_header,
cells=dict(
- values=columns,
+ values=[df_sorted.get(col) for col in header],
fill_color=fill_color,
align=params[u"align-itm"][idx]
)
)
)
- buttons = list()
- menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
- menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
- menu_items.extend(menu_items_rev)
- for idx, hdr in enumerate(menu_items):
- visible = [False, ] * len(menu_items)
- visible[idx] = True
- buttons.append(
- dict(
- label=hdr.replace(u" [Mpps]", u""),
- method=u"update",
- args=[{u"visible": visible}],
- )
- )
-
- fig.update_layout(
- updatemenus=[
- go.layout.Updatemenu(
- type=u"dropdown",
- direction=u"down",
- x=0.0,
- xanchor=u"left",
- y=1.045,
- yanchor=u"top",
- active=len(menu_items) - 1,
- buttons=list(buttons)
- )
- ]
- )
-
ploff.plot(
fig,
show_link=False,
@@ -599,7 +631,7 @@ def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
rst_file.write(
u".. raw:: html\n\n"
f' <iframe frameborder="0" scrolling="no" '
- f'width="1600" height="1000" '
+ f'width="1600" height="1200" '
f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
f'</iframe>\n\n'
)
@@ -637,9 +669,9 @@ def table_perf_comparison(table, input_data):
rca = table.get(u"rca", None)
if rca:
try:
- with open(rca.get(u"data-file", ""), u"r") as rca_file:
+ with open(rca.get(u"data-file", u""), u"r") as rca_file:
rca_data = load(rca_file, Loader=FullLoader)
- header.insert(0, rca.get(u"title", "RCA"))
+ header.insert(0, rca.get(u"title", u"RCA"))
legend += (
u"RCA: Reference to the Root Cause Analysis, see below.\n"
)
@@ -724,15 +756,14 @@ def table_perf_comparison(table, input_data):
u" 2n-" in table[u"title"].lower())):
tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
if tbl_dict.get(tst_name_mod, None) is None:
- groups = re.search(REGEX_NIC, tst_data[u"parent"])
- nic = groups.group(0) if groups else u""
- name = \
- f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
+ name = tst_data[u'name'].rsplit(u'-', 1)[0]
if u"across testbeds" in table[u"title"].lower() or \
u"across topologies" in table[u"title"].lower():
name = _tpc_modify_displayed_test_name(name)
tbl_dict[tst_name_mod] = {
u"name": name,
+ u"replace-ref": True,
+ u"replace-cmp": True,
u"ref-data": list(),
u"cmp-data": list()
}
@@ -742,7 +773,6 @@ def table_perf_comparison(table, input_data):
replacement = table[u"reference"].get(u"data-replacement", None)
if replacement:
- create_new_list = True
rpl_data = input_data.filter_data(
table, data=replacement, continue_on_error=True)
for job, builds in replacement.items():
@@ -754,18 +784,19 @@ def table_perf_comparison(table, input_data):
u" 2n-" in table[u"title"].lower())):
tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
if tbl_dict.get(tst_name_mod, None) is None:
- name = \
- f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
+ name = tst_data[u'name'].rsplit(u'-', 1)[0]
if u"across testbeds" in table[u"title"].lower() or \
u"across topologies" in table[u"title"].lower():
name = _tpc_modify_displayed_test_name(name)
tbl_dict[tst_name_mod] = {
u"name": name,
+ u"replace-ref": False,
+ u"replace-cmp": True,
u"ref-data": list(),
u"cmp-data": list()
}
- if create_new_list:
- create_new_list = False
+ if tbl_dict[tst_name_mod][u"replace-ref"]:
+ tbl_dict[tst_name_mod][u"replace-ref"] = False
tbl_dict[tst_name_mod][u"ref-data"] = list()
_tpc_insert_data(
@@ -783,15 +814,14 @@ def table_perf_comparison(table, input_data):
u" 2n-" in table[u"title"].lower())):
tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
if tbl_dict.get(tst_name_mod, None) is None:
- groups = re.search(REGEX_NIC, tst_data[u"parent"])
- nic = groups.group(0) if groups else u""
- name = \
- f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
+ name = tst_data[u'name'].rsplit(u'-', 1)[0]
if u"across testbeds" in table[u"title"].lower() or \
u"across topologies" in table[u"title"].lower():
name = _tpc_modify_displayed_test_name(name)
tbl_dict[tst_name_mod] = {
u"name": name,
+ u"replace-ref": False,
+ u"replace-cmp": True,
u"ref-data": list(),
u"cmp-data": list()
}
@@ -803,7 +833,6 @@ def table_perf_comparison(table, input_data):
replacement = table[u"compare"].get(u"data-replacement", None)
if replacement:
- create_new_list = True
rpl_data = input_data.filter_data(
table, data=replacement, continue_on_error=True)
for job, builds in replacement.items():
@@ -815,18 +844,19 @@ def table_perf_comparison(table, input_data):
u" 2n-" in table[u"title"].lower())):
tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
if tbl_dict.get(tst_name_mod, None) is None:
- name = \
- f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
+ name = tst_data[u'name'].rsplit(u'-', 1)[0]
if u"across testbeds" in table[u"title"].lower() or \
u"across topologies" in table[u"title"].lower():
name = _tpc_modify_displayed_test_name(name)
tbl_dict[tst_name_mod] = {
u"name": name,
+ u"replace-ref": False,
+ u"replace-cmp": False,
u"ref-data": list(),
u"cmp-data": list()
}
- if create_new_list:
- create_new_list = False
+ if tbl_dict[tst_name_mod][u"replace-cmp"]:
+ tbl_dict[tst_name_mod][u"replace-cmp"] = False
tbl_dict[tst_name_mod][u"cmp-data"] = list()
_tpc_insert_data(
@@ -1083,12 +1113,14 @@ def table_perf_comparison_nic(table, input_data):
u" 2n-" in table[u"title"].lower())):
tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
if tbl_dict.get(tst_name_mod, None) is None:
- name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
+ name = tst_data[u'name'].rsplit(u'-', 1)[0]
if u"across testbeds" in table[u"title"].lower() or \
u"across topologies" in table[u"title"].lower():
name = _tpc_modify_displayed_test_name(name)
tbl_dict[tst_name_mod] = {
u"name": name,
+ u"replace-ref": True,
+ u"replace-cmp": True,
u"ref-data": list(),
u"cmp-data": list()
}
@@ -1100,7 +1132,6 @@ def table_perf_comparison_nic(table, input_data):
replacement = table[u"reference"].get(u"data-replacement", None)
if replacement:
- create_new_list = True
rpl_data = input_data.filter_data(
table, data=replacement, continue_on_error=True)
for job, builds in replacement.items():
@@ -1114,18 +1145,19 @@ def table_perf_comparison_nic(table, input_data):
u" 2n-" in table[u"title"].lower())):
tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
if tbl_dict.get(tst_name_mod, None) is None:
- name = \
- f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
+ name = tst_data[u'name'].rsplit(u'-', 1)[0]
if u"across testbeds" in table[u"title"].lower() or \
u"across topologies" in table[u"title"].lower():
name = _tpc_modify_displayed_test_name(name)
tbl_dict[tst_name_mod] = {
u"name": name,
+ u"replace-ref": False,
+ u"replace-cmp": True,
u"ref-data": list(),
u"cmp-data": list()
}
- if create_new_list:
- create_new_list = False
+ if tbl_dict[tst_name_mod][u"replace-ref"]:
+ tbl_dict[tst_name_mod][u"replace-ref"] = False
tbl_dict[tst_name_mod][u"ref-data"] = list()
_tpc_insert_data(
@@ -1145,12 +1177,14 @@ def table_perf_comparison_nic(table, input_data):
u" 2n-" in table[u"title"].lower())):
tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
if tbl_dict.get(tst_name_mod, None) is None:
- name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
+ name = tst_data[u'name'].rsplit(u'-', 1)[0]
if u"across testbeds" in table[u"title"].lower() or \
u"across topologies" in table[u"title"].lower():
name = _tpc_modify_displayed_test_name(name)
tbl_dict[tst_name_mod] = {
u"name": name,
+ u"replace-ref": False,
+ u"replace-cmp": True,
u"ref-data": list(),
u"cmp-data": list()
}
@@ -1162,7 +1196,6 @@ def table_perf_comparison_nic(table, input_data):
replacement = table[u"compare"].get(u"data-replacement", None)
if replacement:
- create_new_list = True
rpl_data = input_data.filter_data(
table, data=replacement, continue_on_error=True)
for job, builds in replacement.items():
@@ -1176,18 +1209,19 @@ def table_perf_comparison_nic(table, input_data):
u" 2n-" in table[u"title"].lower())):
tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
if tbl_dict.get(tst_name_mod, None) is None:
- name = \
- f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
+ name = tst_data[u'name'].rsplit(u'-', 1)[0]
if u"across testbeds" in table[u"title"].lower() or \
u"across topologies" in table[u"title"].lower():
name = _tpc_modify_displayed_test_name(name)
tbl_dict[tst_name_mod] = {
u"name": name,
+ u"replace-ref": False,
+ u"replace-cmp": False,
u"ref-data": list(),
u"cmp-data": list()
}
- if create_new_list:
- create_new_list = False
+ if tbl_dict[tst_name_mod][u"replace-cmp"]:
+ tbl_dict[tst_name_mod][u"replace-cmp"] = False
tbl_dict[tst_name_mod][u"cmp-data"] = list()
_tpc_insert_data(
@@ -1404,9 +1438,9 @@ def table_nics_comparison(table, input_data):
for job, builds in table[u"data"].items():
for build in builds:
for tst_name, tst_data in data[job][str(build)].items():
- tst_name_mod = _tpc_modify_test_name(tst_name)
+ tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
if tbl_dict.get(tst_name_mod, None) is None:
- name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
+ name = tst_data[u'name'].rsplit(u'-', 1)[0]
tbl_dict[tst_name_mod] = {
u"name": name,
u"ref-data": list(),
@@ -1491,7 +1525,7 @@ def table_nics_comparison(table, input_data):
f"{table[u'output-file']}.txt",
delimiter=u";")
- with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
+ with open(table[u'output-file'], u'a') as txt_file:
txt_file.write(legend)
# Generate html table:
@@ -1760,13 +1794,13 @@ def table_perf_trending_dash(table, input_data):
rel_change_last = nan
else:
rel_change_last = round(
- ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
+ ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
rel_change_long = nan
else:
rel_change_long = round(
- ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
+ ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
if classification_lst:
if isnan(rel_change_last) and isnan(rel_change_long):
@@ -2306,3 +2340,359 @@ def table_failed_tests_html(table, input_data):
except KeyError:
logging.warning(u"The output file is not defined.")
return
+
+
+def table_comparison(table, input_data):
+ """Generate the table(s) with algorithm: table_comparison
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+ logging.info(f" Generating the table {table.get(u'title', u'')} ...")
+
+ # Transform the data
+ logging.info(
+ f" Creating the data set for the {table.get(u'type', u'')} "
+ f"{table.get(u'title', u'')}."
+ )
+
+ columns = table.get(u"columns", None)
+ if not columns:
+ logging.error(
+ f"No columns specified for {table.get(u'title', u'')}. Skipping."
+ )
+ return
+
+ cols = list()
+ for idx, col in enumerate(columns):
+ if col.get(u"data", None) is None:
+ logging.warning(f"No data for column {col.get(u'title', u'')}")
+ continue
+ data = input_data.filter_data(
+ table,
+ params=[u"throughput", u"result", u"name", u"parent", u"tags"],
+ data=col[u"data"],
+ continue_on_error=True
+ )
+ col_data = {
+ u"title": col.get(u"title", f"Column{idx}"),
+ u"data": dict()
+ }
+ for builds in data.values:
+ for build in builds:
+ for tst_name, tst_data in build.items():
+ tst_name_mod = \
+ _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
+ if col_data[u"data"].get(tst_name_mod, None) is None:
+ name = tst_data[u'name'].rsplit(u'-', 1)[0]
+ if u"across testbeds" in table[u"title"].lower() or \
+ u"across topologies" in table[u"title"].lower():
+ name = _tpc_modify_displayed_test_name(name)
+ col_data[u"data"][tst_name_mod] = {
+ u"name": name,
+ u"replace": True,
+ u"data": list(),
+ u"mean": None,
+ u"stdev": None
+ }
+ _tpc_insert_data(
+ target=col_data[u"data"][tst_name_mod][u"data"],
+ src=tst_data,
+ include_tests=table[u"include-tests"]
+ )
+
+ replacement = col.get(u"data-replacement", None)
+ if replacement:
+ rpl_data = input_data.filter_data(
+ table,
+ params=[u"throughput", u"result", u"name", u"parent", u"tags"],
+ data=replacement,
+ continue_on_error=True
+ )
+ for builds in rpl_data.values:
+ for build in builds:
+ for tst_name, tst_data in build.items():
+ tst_name_mod = \
+ _tpc_modify_test_name(tst_name).\
+ replace(u"2n1l-", u"")
+ if col_data[u"data"].get(tst_name_mod, None) is None:
+ name = tst_data[u'name'].rsplit(u'-', 1)[0]
+ if u"across testbeds" in table[u"title"].lower() \
+ or u"across topologies" in \
+ table[u"title"].lower():
+ name = _tpc_modify_displayed_test_name(name)
+ col_data[u"data"][tst_name_mod] = {
+ u"name": name,
+ u"replace": False,
+ u"data": list(),
+ u"mean": None,
+ u"stdev": None
+ }
+ if col_data[u"data"][tst_name_mod][u"replace"]:
+ col_data[u"data"][tst_name_mod][u"replace"] = False
+ col_data[u"data"][tst_name_mod][u"data"] = list()
+ _tpc_insert_data(
+ target=col_data[u"data"][tst_name_mod][u"data"],
+ src=tst_data,
+ include_tests=table[u"include-tests"]
+ )
+
+ if table[u"include-tests"] in (u"NDR", u"PDR"):
+ for tst_name, tst_data in col_data[u"data"].items():
+ if tst_data[u"data"]:
+ tst_data[u"mean"] = mean(tst_data[u"data"])
+ tst_data[u"stdev"] = stdev(tst_data[u"data"])
+ elif table[u"include-tests"] in (u"MRR", ):
+ for tst_name, tst_data in col_data[u"data"].items():
+ if tst_data[u"data"]:
+ tst_data[u"mean"] = tst_data[u"data"][0]
+ tst_data[u"stdev"] = tst_data[u"data"][0]
+
+ cols.append(col_data)
+
+ tbl_dict = dict()
+ for col in cols:
+ for tst_name, tst_data in col[u"data"].items():
+ if tbl_dict.get(tst_name, None) is None:
+ tbl_dict[tst_name] = {
+ "name": tst_data[u"name"]
+ }
+ tbl_dict[tst_name][col[u"title"]] = {
+ u"mean": tst_data[u"mean"],
+ u"stdev": tst_data[u"stdev"]
+ }
+
+ tbl_lst = list()
+ for tst_data in tbl_dict.values():
+ row = [tst_data[u"name"], ]
+ for col in cols:
+ row.append(tst_data.get(col[u"title"], None))
+ tbl_lst.append(row)
+
+ comparisons = table.get(u"comparisons", None)
+ if comparisons and isinstance(comparisons, list):
+ for idx, comp in enumerate(comparisons):
+ try:
+ col_ref = int(comp[u"reference"])
+ col_cmp = int(comp[u"compare"])
+ except KeyError:
+ logging.warning(u"Comparison: No references defined! Skipping.")
+ comparisons.pop(idx)
+ continue
+ if not (0 < col_ref <= len(cols) and
+ 0 < col_cmp <= len(cols)) or \
+ col_ref == col_cmp:
+ logging.warning(f"Wrong values of reference={col_ref} "
+ f"and/or compare={col_cmp}. Skipping.")
+ comparisons.pop(idx)
+ continue
+
+ tbl_cmp_lst = list()
+ if comparisons:
+ for row in tbl_lst:
+ new_row = deepcopy(row)
+ add_to_tbl = False
+ for comp in comparisons:
+ ref_itm = row[int(comp[u"reference"])]
+ if ref_itm is None and \
+ comp.get(u"reference-alt", None) is not None:
+ ref_itm = row[int(comp[u"reference-alt"])]
+ cmp_itm = row[int(comp[u"compare"])]
+ if ref_itm is not None and cmp_itm is not None and \
+ ref_itm[u"mean"] is not None and \
+ cmp_itm[u"mean"] is not None and \
+ ref_itm[u"stdev"] is not None and \
+ cmp_itm[u"stdev"] is not None:
+ delta, d_stdev = relative_change_stdev(
+ ref_itm[u"mean"], cmp_itm[u"mean"],
+ ref_itm[u"stdev"], cmp_itm[u"stdev"]
+ )
+ new_row.append(
+ {
+ u"mean": delta * 1e6,
+ u"stdev": d_stdev * 1e6
+ }
+ )
+ add_to_tbl = True
+ else:
+ new_row.append(None)
+ if add_to_tbl:
+ tbl_cmp_lst.append(new_row)
+
+ tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
+ tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
+
+ rcas = list()
+ rca_in = table.get(u"rca", None)
+ if rca_in and isinstance(rca_in, list):
+ for idx, itm in enumerate(rca_in):
+ try:
+ with open(itm.get(u"data", u""), u"r") as rca_file:
+ rcas.append(
+ {
+ u"title": itm.get(u"title", f"RCA{idx}"),
+ u"data": load(rca_file, Loader=FullLoader)
+ }
+ )
+ except (YAMLError, IOError) as err:
+ logging.warning(
+ f"The RCA file {itm.get(u'data', u'')} does not exist or "
+ f"it is corrupted!"
+ )
+ logging.debug(repr(err))
+
+ tbl_for_csv = list()
+ for line in tbl_cmp_lst:
+
+ row = [line[0], ]
+
+ for idx, rca in enumerate(rcas):
+ rca_nr = rca[u"data"].get(row[0 + idx], u"-")
+ row.insert(idx, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
+
+ for idx, itm in enumerate(line[1:]):
+ if itm is None:
+ row.append(u"NT")
+ row.append(u"NT")
+ else:
+ row.append(round(float(itm[u'mean']) / 1e6, 3))
+ row.append(round(float(itm[u'stdev']) / 1e6, 3))
+ tbl_for_csv.append(row)
+
+ header_csv = [rca[u"title"] for rca in rcas]
+ header_csv.append(u"Test Case")
+ for col in cols:
+ header_csv.append(f"Avg({col[u'title']})")
+ header_csv.append(f"Stdev({col[u'title']})")
+ for comp in comparisons:
+ header_csv.append(
+ f"Avg({cols[comp[u'reference'] - 1][u'title']},"
+ f"{cols[comp[u'compare'] - 1][u'title']})"
+ )
+ header_csv.append(
+ f"Stdev({cols[comp[u'reference'] - 1][u'title']},"
+ f"{cols[comp[u'compare'] - 1][u'title']})"
+ )
+
+ csv_file = f"{table[u'output-file']}-csv.csv"
+ with open(csv_file, u"wt", encoding='utf-8') as file_handler:
+ file_handler.write(u";".join(header_csv) + u"\n")
+ for test in tbl_for_csv:
+ file_handler.write(u";".join([str(item) for item in test]) + u"\n")
+
+ tbl_final = list()
+ for line in tbl_cmp_lst:
+ row = [line[0], ]
+ for idx, rca in enumerate(rcas):
+ rca_nr = rca[u"data"].get(row[0 + idx], u"-")
+ row.insert(idx, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
+ for idx, itm in enumerate(line[1:]):
+ if itm is None:
+ row.append(u"NT")
+ else:
+ if idx < len(cols):
+ row.append(
+ f"{round(float(itm[u'mean']) / 1e6, 1)} "
+ f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
+ replace(u"nan", u"NaN")
+ )
+ else:
+ row.append(
+ f"{round(float(itm[u'mean']) / 1e6, 1):+} "
+ f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
+ replace(u"nan", u"NaN")
+ )
+ tbl_final.append(row)
+
+ header = [rca[u"title"] for rca in rcas]
+ header.append(u"Test Case")
+ header.extend([col[u"title"] for col in cols])
+ header.extend(
+ [f"Diff({cols[comp[u'reference'] - 1][u'title']},"
+ f"{cols[comp[u'compare'] - 1][u'title']})"
+ for comp in comparisons]
+ )
+
+ # Generate csv tables:
+ csv_file = f"{table[u'output-file']}.csv"
+ with open(csv_file, u"wt", encoding='utf-8') as file_handler:
+ file_handler.write(u";".join(header) + u"\n")
+ for test in tbl_final:
+ file_handler.write(u";".join([str(item) for item in test]) + u"\n")
+
+ # Generate txt table:
+ txt_file_name = f"{table[u'output-file']}.txt"
+ convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
+
+ # Generate rst table:
+ file_name = table[u'output-file'].split(u"/")[-1]
+ if u"vpp" in table[u'output-file']:
+ path = u"_tmp/src/vpp_performance_tests/comparisons/"
+ else:
+ path = u"_tmp/src/dpdk_performance_tests/comparisons/"
+ rst_file_name = f"{path}{file_name}-txt.rst"
+ csv_file_name = f"{path}{file_name}.csv"
+ with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
+ file_handler.write(
+ u",".join(
+ [f'"{itm}"' for itm in header]
+ ) + u"\n"
+ )
+ for test in tbl_final:
+ file_handler.write(
+ u",".join(
+ [f'"{itm}"' for itm in test]
+ ) + u"\n"
+ )
+
+ convert_csv_to_pretty_txt(csv_file_name, rst_file_name, delimiter=u",")
+
+ legend = u"\nLegend:\n"
+ for idx, rca in enumerate(rcas):
+ try:
+ desc = (
+ f"Diff({cols[comparisons[idx][u'reference'] - 1][u'title']},"
+ f"{cols[comparisons[idx][u'compare'] - 1][u'title']})\n"
+ )
+ except (KeyError, IndexError):
+ desc = u"\n"
+ legend += f"{rca[u'title']}: Root Cause Analysis for {desc}"
+ legend += (
+ u"First part of the result is a mean value [Mpps].\n"
+ f"Second part of the result following '\u00B1' is a standard "
+ u"deviation [Mpps].\n"
+ u"First part of Diff is a relative change of mean values [%].\n"
+ f"Second part of Diff following '\u00B1' is a standard deviation "
+ u"of the Diff [percentual points].\n"
+ u"NT: Not tested.\n"
+ )
+
+ footnote = u""
+ for rca in rcas:
+ footnote += f"\n{rca[u'title']}:\n"
+ footnote += rca[u"data"].get(u"footnote", u"")
+
+ with open(txt_file_name, u'a', encoding='utf-8') as txt_file:
+ txt_file.write(legend)
+ if footnote:
+ txt_file.write(footnote)
+ txt_file.write(u":END")
+
+ with open(rst_file_name, u'a', encoding='utf-8') as txt_file:
+ txt_file.write(legend.replace(u"\n", u" |br| "))
+ if footnote:
+ txt_file.write(footnote.replace(u"\n", u" |br| "))
+ txt_file.write(u":END")
+
+ # Generate html table:
+ _tpc_generate_html_table(
+ header,
+ tbl_final,
+ table[u'output-file'],
+ legend=legend,
+ footnote=footnote,
+ sort_data=False
+ )
diff --git a/resources/tools/presentation/pal_utils.py b/resources/tools/presentation/pal_utils.py
index f296c6b16c..3efa00af2c 100644
--- a/resources/tools/presentation/pal_utils.py
+++ b/resources/tools/presentation/pal_utils.py
@@ -309,7 +309,7 @@ def convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u","):
"""
txt_table = None
- with open(csv_file_name, u"rt") as csv_file:
+ with open(csv_file_name, u"rt", encoding='utf-8') as csv_file:
csv_content = csv.reader(csv_file, delimiter=delimiter, quotechar=u'"')
for row in csv_content:
if txt_table is None:
@@ -319,6 +319,29 @@ def convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u","):
txt_table.align = u"r"
txt_table.align[u"Test Case"] = u"l"
txt_table.align[u"RCA"] = u"l"
- if txt_table:
- with open(txt_file_name, u"wt") as txt_file:
+ txt_table.align[u"RCA1"] = u"l"
+ txt_table.align[u"RCA2"] = u"l"
+ txt_table.align[u"RCA3"] = u"l"
+
+ if not txt_table:
+ return
+
+ if txt_file_name.endswith(u".txt"):
+ with open(txt_file_name, u"wt", encoding='utf-8') as txt_file:
txt_file.write(str(txt_table))
+ elif txt_file_name.endswith(u".rst"):
+ with open(txt_file_name, u"wt") as txt_file:
+ txt_file.write(
+ u"\n"
+ u".. |br| raw:: html\n\n <br />\n\n\n"
+ u".. |prein| raw:: html\n\n <pre>\n\n\n"
+ u".. |preout| raw:: html\n\n </pre>\n\n"
+ )
+ txt_file.write(
+ u"\n.. only:: html\n\n"
+ u" .. csv-table::\n"
+ u" :header-rows: 1\n"
+ u" :widths: auto\n"
+ u" :align: center\n"
+ f" :file: {csv_file_name.split(u'/')[-1]}\n"
+ )
diff --git a/resources/tools/presentation/rca/rca-2n-skx-2t1c-pdr.yaml b/resources/tools/presentation/rca/rca-2n-skx-2t1c-pdr.yaml
index 44c5bd5a1b..ab8ac178cc 100644
--- a/resources/tools/presentation/rca/rca-2n-skx-2t1c-pdr.yaml
+++ b/resources/tools/presentation/rca/rca-2n-skx-2t1c-pdr.yaml
@@ -27,4 +27,4 @@
64b-2t1c-avf-ethip4-ip4scale20k: 1
64b-2t1c-avf-ethip4-ip4scale2m: 1
64b-2t1c-ethip4udp-ip4base-nat44: 1
-footnote: "\nRoot Cause Analysis:\n[1] Impact of Skx ucode upgrade from 0x2000043 to 0x2000065 in combination with SuperMicro motherboards/firmware and kernel updates, subjecto to the ongoing detailed RCA investigation with Intel NPG.\n[2] Applied fix of FVL NIC firmware 6.0.1 for increasing TRex Mpps rate from 27 Mpps to 37 Mpps, [CSIT-1503], [TRex-519].\n[3] Applied VPP PAPI fix to enable memif zero-copy, [CSIT-1592], [VPP-1764].\n[4] To-Be-Investigated, vhost-user avg PDR throughput rate has higher stdev than before.\n[5] To-Be-Investigated, dot1q-l2xc with DPDK FVL.\n"
+footnote: "[1] Impact of Skx ucode upgrade from 0x2000043 to 0x2000065 in combination with SuperMicro motherboards/firmware and kernel updates, subjecto to the ongoing detailed RCA investigation with Intel NPG.\n[2] Applied fix of FVL NIC firmware 6.0.1 for increasing TRex Mpps rate from 27 Mpps to 37 Mpps, [CSIT-1503], [TRex-519].\n[3] Applied VPP PAPI fix to enable memif zero-copy, [CSIT-1592], [VPP-1764].\n[4] To-Be-Investigated, vhost-user avg PDR throughput rate has higher stdev than before.\n[5] To-Be-Investigated, dot1q-l2xc with DPDK FVL.\n"
diff --git a/resources/tools/presentation/rca/rca-3n-skx-2t1c-pdr.yaml b/resources/tools/presentation/rca/rca-3n-skx-2t1c-pdr.yaml
index f562927cca..2ae4edc12f 100644
--- a/resources/tools/presentation/rca/rca-3n-skx-2t1c-pdr.yaml
+++ b/resources/tools/presentation/rca/rca-3n-skx-2t1c-pdr.yaml
@@ -36,4 +36,4 @@ imix-2t1c-ethip4ipsec4tnlsw-ip4base-int-aes256gcm: 1
64b-2t1c-avf-ethip4-ip4scale20k: 1
64b-2t1c-avf-ethip4-ip4scale2m: 1
64b-2t1c-ethip4udp-ip4base-nat44: 1
-footnote: "\nRoot Cause Analysis:\n[1] Impact of Skx ucode upgrade from 0x2000043 to 0x2000065 in combination with SuperMicro motherboards/firmware and kernel updates, subjecto to the ongoing detailed RCA investigation with Intel NPG.\n[2] Applied fix of FVL NIC firmware 6.0.1 for increasing TRex Mpps rate from 27 Mpps to 37 Mpps, [CSIT-1503], [TRex-519].\n[3] Applied VPP PAPI fix to enable memif zero-copy, [CSIT-1592], [VPP-1764].\n[4] To-Be-Investigated, vhost-user avg PDR throughput rate has higher stdev than before.\n[5] To-Be-Investigated, dot1q-l2xc with DPDK FVL.\n"
+footnote: "[1] Impact of Skx ucode upgrade from 0x2000043 to 0x2000065 in combination with SuperMicro motherboards/firmware and kernel updates, subjecto to the ongoing detailed RCA investigation with Intel NPG.\n[2] Applied fix of FVL NIC firmware 6.0.1 for increasing TRex Mpps rate from 27 Mpps to 37 Mpps, [CSIT-1503], [TRex-519].\n[3] Applied VPP PAPI fix to enable memif zero-copy, [CSIT-1592], [VPP-1764].\n[4] To-Be-Investigated, vhost-user avg PDR throughput rate has higher stdev than before.\n[5] To-Be-Investigated, dot1q-l2xc with DPDK FVL.\n"
diff --git a/resources/tools/presentation/specification_parser.py b/resources/tools/presentation/specification_parser.py
index e3ad4f5387..62e9442481 100644
--- a/resources/tools/presentation/specification_parser.py
+++ b/resources/tools/presentation/specification_parser.py
@@ -694,6 +694,19 @@ class Specification:
if isinstance(data_set, str):
table[u"history"][i][u"data-replacement"] = \
self.configuration[u"data-sets"][data_set]
+
+ if table.get(u"columns", None):
+ for i in range(len(table[u"columns"])):
+ data_set = table[u"columns"][i].get(u"data", None)
+ if isinstance(data_set, str):
+ table[u"columns"][i][u"data"] = \
+ self.configuration[u"data-sets"][data_set]
+ data_set = table[u"columns"][i].get(
+ u"data-replacement", None)
+ if isinstance(data_set, str):
+ table[u"columns"][i][u"data-replacement"] = \
+ self.configuration[u"data-sets"][data_set]
+
except KeyError:
raise PresentationError(
f"Wrong data set used in {table.get(u'title', u'')}."