aboutsummaryrefslogtreecommitdiffstats
path: root/resources
diff options
context:
space:
mode:
Diffstat (limited to 'resources')
-rw-r--r--resources/tools/presentation/generator_tables.py219
-rw-r--r--resources/tools/presentation/pal_utils.py2
2 files changed, 185 insertions, 36 deletions
diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py
index e865d3a57b..afc154a024 100644
--- a/resources/tools/presentation/generator_tables.py
+++ b/resources/tools/presentation/generator_tables.py
@@ -499,7 +499,7 @@ def _tpc_generate_html_table(header, data, output_file_name):
"""
try:
- idx = header.index(u"Test case")
+ idx = header.index(u"Test Case")
except ValueError:
idx = 0
params = {
@@ -607,7 +607,8 @@ def table_perf_comparison(table, input_data):
# Prepare the header of the tables
try:
- header = [u"Test case", ]
+ header = [u"Test Case", ]
+ legend = u"\nLegend:\n"
rca_data = None
rca = table.get(u"rca", None)
@@ -616,6 +617,9 @@ def table_perf_comparison(table, input_data):
with open(rca.get(u"data-file", ""), u"r") as rca_file:
rca_data = load(rca_file, Loader=FullLoader)
header.insert(0, rca.get(u"title", "RCA"))
+ legend += (
+ u"RCA: Reference to the Root Cause Analysis, see below.\n"
+ )
except (YAMLError, IOError) as err:
logging.warning(repr(err))
@@ -627,6 +631,16 @@ def table_perf_comparison(table, input_data):
f"{item[u'title']} Stdev({table[u'include-tests']})"
]
)
+ legend += (
+ f"{item[u'title']} Avg({table[u'include-tests']}): "
+ f"Mean value of {table[u'include-tests']} [Mpps] computed from "
+ f"a series of runs of the listed tests executed against "
+ f"rls{item[u'title']}.\n"
+ f"{item[u'title']} Stdev({table[u'include-tests']}): "
+ f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+ f"computed from a series of runs of the listed tests executed "
+ f"against rls{item[u'title']}.\n"
+ )
header.extend(
[
f"{table[u'reference'][u'title']} "
@@ -643,6 +657,35 @@ def table_perf_comparison(table, input_data):
]
)
header_str = u";".join(header) + u"\n"
+ legend += (
+ f"{table[u'reference'][u'title']} "
+ f"Avg({table[u'include-tests']}): "
+ f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
+ f"series of runs of the listed tests executed against "
+ f"rls{table[u'reference'][u'title']}.\n"
+ f"{table[u'reference'][u'title']} "
+ f"Stdev({table[u'include-tests']}): "
+ f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+ f"computed from a series of runs of the listed tests executed "
+ f"against rls{table[u'reference'][u'title']}.\n"
+ f"{table[u'compare'][u'title']} "
+ f"Avg({table[u'include-tests']}): "
+ f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
+ f"series of runs of the listed tests executed against "
+ f"rls{table[u'compare'][u'title']}.\n"
+ f"{table[u'compare'][u'title']} "
+ f"Stdev({table[u'include-tests']}): "
+ f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+ f"computed from a series of runs of the listed tests executed "
+ f"against rls{table[u'compare'][u'title']}.\n"
+ f"Diff({table[u'reference'][u'title']},"
+ f"{table[u'compare'][u'title']}): "
+ f"Percentage change calculated for mean values.\n"
+ u"Stdev(Diff): "
+ u"Standard deviation of percentage change calculated for mean "
+ u"values.\n"
+ u"NT: Not Tested\n"
+ )
except (AttributeError, KeyError) as err:
logging.error(f"The model is invalid, missing parameter: {repr(err)}")
return
@@ -815,9 +858,9 @@ def table_perf_comparison(table, input_data):
item.append(round(mean(hist_data) / 1e6, 1))
item.append(round(stdev(hist_data) / 1e6, 1))
else:
- item.extend([u"Not tested", u"Not tested"])
+ item.extend([u"NT", u"NT"])
else:
- item.extend([u"Not tested", u"Not tested"])
+ item.extend([u"NT", u"NT"])
data_r = tbl_dict[tst_name][u"ref-data"]
if data_r:
if table[u"include-tests"] == u"MRR":
@@ -831,7 +874,7 @@ def table_perf_comparison(table, input_data):
else:
data_r_mean = None
data_r_stdev = None
- item.extend([u"Not tested", u"Not tested"])
+ item.extend([u"NT", u"NT"])
data_c = tbl_dict[tst_name][u"cmp-data"]
if data_c:
if table[u"include-tests"] == u"MRR":
@@ -845,10 +888,10 @@ def table_perf_comparison(table, input_data):
else:
data_c_mean = None
data_c_stdev = None
- item.extend([u"Not tested", u"Not tested"])
- if item[-2] == u"Not tested":
+ item.extend([u"NT", u"NT"])
+ if item[-2] == u"NT":
pass
- elif item[-4] == u"Not tested":
+ elif item[-4] == u"NT":
item.append(u"New in CSIT-2001")
item.append(u"New in CSIT-2001")
elif data_r_mean is not None and data_c_mean is not None:
@@ -866,7 +909,7 @@ def table_perf_comparison(table, input_data):
if rca_data:
rca_nr = rca_data.get(item[0], u"-")
item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
- if (len(item) == len(header)) and (item[-4] != u"Not tested"):
+ if (len(item) == len(header)) and (item[-4] != u"NT"):
tbl_lst.append(item)
tbl_lst = _tpc_sort_table(tbl_lst)
@@ -881,11 +924,14 @@ def table_perf_comparison(table, input_data):
txt_file_name = f"{table[u'output-file']}.txt"
convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
- if rca_data:
- footnote = rca_data.get(u"footnote", "")
- if footnote:
- with open(txt_file_name, u'a') as txt_file:
- txt_file.writelines(footnote)
+ with open(txt_file_name, u'a') as txt_file:
+ txt_file.write(legend)
+ if rca_data:
+ footnote = rca_data.get(u"footnote", u"")
+ if footnote:
+ txt_file.write(u"\n")
+ txt_file.write(footnote)
+ txt_file.write(u":END")
# Generate html table:
_tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
@@ -912,7 +958,8 @@ def table_perf_comparison_nic(table, input_data):
# Prepare the header of the tables
try:
- header = [u"Test case", ]
+ header = [u"Test Case", ]
+ legend = u"\nLegend:\n"
rca_data = None
rca = table.get(u"rca", None)
@@ -921,6 +968,9 @@ def table_perf_comparison_nic(table, input_data):
with open(rca.get(u"data-file", ""), u"r") as rca_file:
rca_data = load(rca_file, Loader=FullLoader)
header.insert(0, rca.get(u"title", "RCA"))
+ legend += (
+ u"RCA: Reference to the Root Cause Analysis, see below.\n"
+ )
except (YAMLError, IOError) as err:
logging.warning(repr(err))
@@ -932,6 +982,16 @@ def table_perf_comparison_nic(table, input_data):
f"{item[u'title']} Stdev({table[u'include-tests']})"
]
)
+ legend += (
+ f"{item[u'title']} Avg({table[u'include-tests']}): "
+ f"Mean value of {table[u'include-tests']} [Mpps] computed from "
+ f"a series of runs of the listed tests executed against "
+ f"rls{item[u'title']}.\n"
+ f"{item[u'title']} Stdev({table[u'include-tests']}): "
+ f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+ f"computed from a series of runs of the listed tests executed "
+ f"against rls{item[u'title']}.\n"
+ )
header.extend(
[
f"{table[u'reference'][u'title']} "
@@ -948,6 +1008,35 @@ def table_perf_comparison_nic(table, input_data):
]
)
header_str = u";".join(header) + u"\n"
+ legend += (
+ f"{table[u'reference'][u'title']} "
+ f"Avg({table[u'include-tests']}): "
+ f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
+ f"series of runs of the listed tests executed against "
+ f"rls{table[u'reference'][u'title']}.\n"
+ f"{table[u'reference'][u'title']} "
+ f"Stdev({table[u'include-tests']}): "
+ f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+ f"computed from a series of runs of the listed tests executed "
+ f"against rls{table[u'reference'][u'title']}.\n"
+ f"{table[u'compare'][u'title']} "
+ f"Avg({table[u'include-tests']}): "
+ f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
+ f"series of runs of the listed tests executed against "
+ f"rls{table[u'compare'][u'title']}.\n"
+ f"{table[u'compare'][u'title']} "
+ f"Stdev({table[u'include-tests']}): "
+ f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+ f"computed from a series of runs of the listed tests executed "
+ f"against rls{table[u'compare'][u'title']}.\n"
+ f"Diff({table[u'reference'][u'title']},"
+ f"{table[u'compare'][u'title']}): "
+ f"Percentage change calculated for mean values.\n"
+ u"Stdev(Diff): "
+ u"Standard deviation of percentage change calculated for mean "
+ u"values.\n"
+ u"NT: Not Tested\n"
+ )
except (AttributeError, KeyError) as err:
logging.error(f"The model is invalid, missing parameter: {repr(err)}")
return
@@ -1126,9 +1215,9 @@ def table_perf_comparison_nic(table, input_data):
item.append(round(mean(hist_data) / 1e6, 1))
item.append(round(stdev(hist_data) / 1e6, 1))
else:
- item.extend([u"Not tested", u"Not tested"])
+ item.extend([u"NT", u"NT"])
else:
- item.extend([u"Not tested", u"Not tested"])
+ item.extend([u"NT", u"NT"])
data_r = tbl_dict[tst_name][u"ref-data"]
if data_r:
if table[u"include-tests"] == u"MRR":
@@ -1142,7 +1231,7 @@ def table_perf_comparison_nic(table, input_data):
else:
data_r_mean = None
data_r_stdev = None
- item.extend([u"Not tested", u"Not tested"])
+ item.extend([u"NT", u"NT"])
data_c = tbl_dict[tst_name][u"cmp-data"]
if data_c:
if table[u"include-tests"] == u"MRR":
@@ -1156,10 +1245,10 @@ def table_perf_comparison_nic(table, input_data):
else:
data_c_mean = None
data_c_stdev = None
- item.extend([u"Not tested", u"Not tested"])
- if item[-2] == u"Not tested":
+ item.extend([u"NT", u"NT"])
+ if item[-2] == u"NT":
pass
- elif item[-4] == u"Not tested":
+ elif item[-4] == u"NT":
item.append(u"New in CSIT-2001")
item.append(u"New in CSIT-2001")
elif data_r_mean is not None and data_c_mean is not None:
@@ -1177,7 +1266,7 @@ def table_perf_comparison_nic(table, input_data):
if rca_data:
rca_nr = rca_data.get(item[0], u"-")
item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
- if (len(item) == len(header)) and (item[-4] != u"Not tested"):
+ if (len(item) == len(header)) and (item[-4] != u"NT"):
tbl_lst.append(item)
tbl_lst = _tpc_sort_table(tbl_lst)
@@ -1192,11 +1281,14 @@ def table_perf_comparison_nic(table, input_data):
txt_file_name = f"{table[u'output-file']}.txt"
convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
- if rca_data:
- footnote = rca_data.get(u"footnote", "")
- if footnote:
- with open(txt_file_name, u'a') as txt_file:
- txt_file.writelines(footnote)
+ with open(txt_file_name, u'a') as txt_file:
+ txt_file.write(legend)
+ if rca_data:
+ footnote = rca_data.get(u"footnote", u"")
+ if footnote:
+ txt_file.write(u"\n")
+ txt_file.write(footnote)
+ txt_file.write(u":END")
# Generate html table:
_tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
@@ -1224,7 +1316,7 @@ def table_nics_comparison(table, input_data):
# Prepare the header of the tables
try:
header = [
- u"Test case",
+ u"Test Case",
f"{table[u'reference'][u'title']} "
f"Avg({table[u'include-tests']})",
f"{table[u'reference'][u'title']} "
@@ -1237,6 +1329,36 @@ def table_nics_comparison(table, input_data):
f"{table[u'compare'][u'title']})",
u"Stdev(Diff)"
]
+ legend = (
+ u"\nLegend:\n"
+ f"{table[u'reference'][u'title']} "
+ f"Avg({table[u'include-tests']}): "
+ f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
+ f"series of runs of the listed tests executed using "
+ f"{table[u'reference'][u'title']} NIC.\n"
+ f"{table[u'reference'][u'title']} "
+ f"Stdev({table[u'include-tests']}): "
+ f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+ f"computed from a series of runs of the listed tests executed "
+ f"using {table[u'reference'][u'title']} NIC.\n"
+ f"{table[u'compare'][u'title']} "
+ f"Avg({table[u'include-tests']}): "
+ f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
+ f"series of runs of the listed tests executed using "
+ f"{table[u'compare'][u'title']} NIC.\n"
+ f"{table[u'compare'][u'title']} "
+ f"Stdev({table[u'include-tests']}): "
+ f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+ f"computed from a series of runs of the listed tests executed "
+ f"using {table[u'compare'][u'title']} NIC.\n"
+ f"Diff({table[u'reference'][u'title']},"
+ f"{table[u'compare'][u'title']}): "
+ f"Percentage change calculated for mean values.\n"
+ u"Stdev(Diff): "
+ u"Standard deviation of percentage change calculated for mean "
+ u"values.\n"
+ u":END"
+ )
except (AttributeError, KeyError) as err:
logging.error(f"The model is invalid, missing parameter: {repr(err)}")
@@ -1334,6 +1456,9 @@ def table_nics_comparison(table, input_data):
f"{table[u'output-file']}.txt",
delimiter=u";")
+ with open(table[u'output-file'], u'a') as txt_file:
+ txt_file.write(legend)
+
# Generate html table:
_tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
@@ -1360,15 +1485,37 @@ def table_soak_vs_ndr(table, input_data):
# Prepare the header of the table
try:
header = [
- u"Test case",
- f"{table[u'reference'][u'title']} Thput [Mpps]",
- f"{table[u'reference'][u'title']} Stdev [Mpps]",
- f"{table[u'compare'][u'title']} Thput [Mpps]",
- f"{table[u'compare'][u'title']} Stdev [Mpps]",
- u"Delta [%]",
- u"Stdev of delta [%]"
+ u"Test Case",
+ f"Avg({table[u'reference'][u'title']})",
+ f"Stdev({table[u'reference'][u'title']})",
+ f"Avg({table[u'compare'][u'title']})",
+ f"Stdev{table[u'compare'][u'title']})",
+ u"Diff",
+ u"Stdev(Diff)"
]
header_str = u";".join(header) + u"\n"
+ legend = (
+ u"\nLegend:\n"
+ f"Avg({table[u'reference'][u'title']}): "
+ f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
+ f"from a series of runs of the listed tests.\n"
+ f"Stdev({table[u'reference'][u'title']}): "
+ f"Standard deviation value of {table[u'reference'][u'title']} "
+ f"[Mpps] computed from a series of runs of the listed tests.\n"
+ f"Avg({table[u'compare'][u'title']}): "
+ f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
+ f"a series of runs of the listed tests.\n"
+ f"Stdev({table[u'compare'][u'title']}): "
+ f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
+ f"computed from a series of runs of the listed tests.\n"
+ f"Diff({table[u'reference'][u'title']},"
+ f"{table[u'compare'][u'title']}): "
+ f"Percentage change calculated for mean values.\n"
+ u"Stdev(Diff): "
+ u"Standard deviation of percentage change calculated for mean "
+ u"values.\n"
+ u":END"
+ )
except (AttributeError, KeyError) as err:
logging.error(f"The model is invalid, missing parameter: {repr(err)}")
return
@@ -1484,6 +1631,8 @@ def table_soak_vs_ndr(table, input_data):
convert_csv_to_pretty_txt(
csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
)
+ with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
+ txt_file.write(legend)
# Generate html table:
_tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
diff --git a/resources/tools/presentation/pal_utils.py b/resources/tools/presentation/pal_utils.py
index 0c51bb2981..f296c6b16c 100644
--- a/resources/tools/presentation/pal_utils.py
+++ b/resources/tools/presentation/pal_utils.py
@@ -317,7 +317,7 @@ def convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u","):
else:
txt_table.add_row(row)
txt_table.align = u"r"
- txt_table.align[u"Test case"] = u"l"
+ txt_table.align[u"Test Case"] = u"l"
txt_table.align[u"RCA"] = u"l"
if txt_table:
with open(txt_file_name, u"wt") as txt_file: