aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/presentation
diff options
context:
space:
mode:
Diffstat (limited to 'resources/tools/presentation')
-rw-r--r--resources/tools/presentation/generator_tables.py348
-rw-r--r--resources/tools/presentation/input_data_parser.py4
-rw-r--r--resources/tools/presentation/new/generator_tables.py349
-rw-r--r--resources/tools/presentation/new/input_data_parser.py4
-rw-r--r--resources/tools/presentation/new/specification_CPTA.yaml35
-rw-r--r--resources/tools/presentation/new/utils.py26
-rw-r--r--resources/tools/presentation/specification_CPTA.yaml35
-rw-r--r--resources/tools/presentation/utils.py25
8 files changed, 603 insertions, 223 deletions
diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py
index abece8590b..6aa57db796 100644
--- a/resources/tools/presentation/generator_tables.py
+++ b/resources/tools/presentation/generator_tables.py
@@ -17,7 +17,6 @@
import logging
import csv
-import prettytable
import pandas as pd
from string import replace
@@ -27,7 +26,7 @@ from xml.etree import ElementTree as ET
from errors import PresentationError
from utils import mean, stdev, relative_change, remove_outliers,\
- split_outliers, classify_anomalies
+ split_outliers, classify_anomalies, convert_csv_to_pretty_txt
def generate_tables(spec, data):
@@ -522,18 +521,8 @@ def table_performance_comparison(table, input_data):
]
for i, txt_name in enumerate(tbl_names_txt):
- txt_table = None
logging.info(" Writing file: '{0}'".format(txt_name))
- with open(tbl_names[i], 'rb') as csv_file:
- csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
- for row in csv_content:
- if txt_table is None:
- txt_table = prettytable.PrettyTable(row)
- else:
- txt_table.add_row(row)
- txt_table.align["Test case"] = "l"
- with open(txt_name, "w") as txt_file:
- txt_file.write(str(txt_table))
+ convert_csv_to_pretty_txt(tbl_names[i], txt_name)
# Selected tests in csv:
input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
@@ -711,18 +700,8 @@ def table_performance_comparison_mrr(table, input_data):
]
for i, txt_name in enumerate(tbl_names_txt):
- txt_table = None
logging.info(" Writing file: '{0}'".format(txt_name))
- with open(tbl_names[i], 'rb') as csv_file:
- csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
- for row in csv_content:
- if txt_table is None:
- txt_table = prettytable.PrettyTable(row)
- else:
- txt_table.add_row(row)
- txt_table.align["Test case"] = "l"
- with open(txt_name, "w") as txt_file:
- txt_file.write(str(txt_table))
+ convert_csv_to_pretty_txt(tbl_names[i], txt_name)
def table_performance_trending_dashboard(table, input_data):
@@ -852,18 +831,92 @@ def table_performance_trending_dashboard(table, input_data):
file_handler.write(",".join([str(item) for item in test]) + '\n')
txt_file_name = "{0}.txt".format(table["output-file"])
- txt_table = None
logging.info(" Writing file: '{0}'".format(txt_file_name))
- with open(file_name, 'rb') as csv_file:
- csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
- for row in csv_content:
- if txt_table is None:
- txt_table = prettytable.PrettyTable(row)
- else:
- txt_table.add_row(row)
- txt_table.align["Test case"] = "l"
- with open(txt_file_name, "w") as txt_file:
- txt_file.write(str(txt_table))
+ convert_csv_to_pretty_txt(file_name, txt_file_name)
+
+
+def _generate_url(base, test_name):
+ """Generate URL to a trending plot from the name of the test case.
+
+ :param base: The base part of URL common to all test cases.
+ :param test_name: The name of the test case.
+ :type base: str
+ :type test_name: str
+ :returns: The URL to the plot with the trending data for the given test
+ case.
+ :rtype str
+ """
+
+ url = base
+ file_name = ""
+ anchor = "#"
+ feature = ""
+
+ if "lbdpdk" in test_name or "lbvpp" in test_name:
+ file_name = "link_bonding.html"
+
+ elif "testpmd" in test_name or "l3fwd" in test_name:
+ file_name = "dpdk.html"
+
+ elif "memif" in test_name:
+ file_name = "container_memif.html"
+
+ elif "srv6" in test_name:
+ file_name = "srv6.html"
+
+ elif "vhost" in test_name:
+ if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
+ file_name = "vm_vhost_l2.html"
+ elif "ip4base" in test_name:
+ file_name = "vm_vhost_ip4.html"
+
+ elif "ipsec" in test_name:
+ file_name = "ipsec.html"
+
+ elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
+ file_name = "ip4_tunnels.html"
+
+ elif "ip4base" in test_name or "ip4scale" in test_name:
+ file_name = "ip4.html"
+ if "iacl" in test_name or "snat" in test_name or "cop" in test_name:
+ feature = "-features"
+
+ elif "ip6base" in test_name or "ip6scale" in test_name:
+ file_name = "ip6.html"
+
+ elif "l2xcbase" in test_name or "l2xcscale" in test_name \
+ or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
+ or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
+ file_name = "l2.html"
+ if "iacl" in test_name:
+ feature = "-features"
+
+ if "x520" in test_name:
+ anchor += "x520-"
+ elif "x710" in test_name:
+ anchor += "x710-"
+ elif "xl710" in test_name:
+ anchor += "xl710-"
+
+ if "64b" in test_name:
+ anchor += "64b-"
+ elif "78b" in test_name:
+ anchor += "78b-"
+ elif "imix" in test_name:
+ anchor += "imix-"
+ elif "9000b" in test_name:
+ anchor += "9000b-"
+ elif "1518" in test_name:
+ anchor += "1518b-"
+
+ if "1t1c" in test_name:
+ anchor += "1t1c"
+ elif "2t2c" in test_name:
+ anchor += "2t2c"
+ elif "4t4c" in test_name:
+ anchor += "4t4c"
+
+ return url + file_name + anchor + feature
def table_performance_trending_dashboard_html(table, input_data):
@@ -924,83 +977,12 @@ def table_performance_trending_dashboard_html(table, input_data):
alignment = "left" if c_idx == 0 else "center"
td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
# Name:
- url = "../trending/"
- file_name = ""
- anchor = "#"
- feature = ""
if c_idx == 0:
- if "lbdpdk" in item or "lbvpp" in item:
- file_name = "link_bonding.html"
-
- elif "testpmd" in item or "l3fwd" in item:
- file_name = "dpdk.html"
-
- elif "memif" in item:
- file_name = "container_memif.html"
-
- elif "srv6" in item:
- file_name = "srv6.html"
-
- elif "vhost" in item:
- if "l2xcbase" in item or "l2bdbasemaclrn" in item:
- file_name = "vm_vhost_l2.html"
- elif "ip4base" in item:
- file_name = "vm_vhost_ip4.html"
-
- elif "ipsec" in item:
- file_name = "ipsec.html"
-
- elif "ethip4lispip" in item or "ethip4vxlan" in item:
- file_name = "ip4_tunnels.html"
-
- elif "ip4base" in item or "ip4scale" in item:
- file_name = "ip4.html"
- if "iacl" in item or "snat" in item or "cop" in item:
- feature = "-features"
-
- elif "ip6base" in item or "ip6scale" in item:
- file_name = "ip6.html"
-
- elif "l2xcbase" in item or "l2xcscale" in item \
- or "l2bdbasemaclrn" in item or "l2bdscale" in item \
- or "l2dbbasemaclrn" in item or "l2dbscale" in item:
- file_name = "l2.html"
- if "iacl" in item:
- feature = "-features"
-
- if "x520" in item:
- anchor += "x520-"
- elif "x710" in item:
- anchor += "x710-"
- elif "xl710" in item:
- anchor += "xl710-"
-
- if "64b" in item:
- anchor += "64b-"
- elif "78b" in item:
- anchor += "78b-"
- elif "imix" in item:
- anchor += "imix-"
- elif "9000b" in item:
- anchor += "9000b-"
- elif "1518" in item:
- anchor += "1518b-"
-
- if "1t1c" in item:
- anchor += "1t1c"
- elif "2t2c" in item:
- anchor += "2t2c"
- elif "4t4c" in item:
- anchor += "4t4c"
-
- url = url + file_name + anchor + feature
-
+ url = _generate_url("../trending/", item)
ref = ET.SubElement(td, "a", attrib=dict(href=url))
ref.text = item
-
else:
td.text = item
-
try:
with open(table["output-file"], 'w') as html_file:
logging.info(" Writing file: '{0}'".format(table["output-file"]))
@@ -1010,3 +992,155 @@ def table_performance_trending_dashboard_html(table, input_data):
except KeyError:
logging.warning("The output file is not defined.")
return
+
+
+def table_failed_tests(table, input_data):
+ """Generate the table(s) with algorithm: table_failed_tests
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ logging.info(" Generating the table {0} ...".
+ format(table.get("title", "")))
+
+ # Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
+ data = input_data.filter_data(table, continue_on_error=True)
+
+ # Prepare the header of the tables
+ header = ["Test Case",
+ "Fails [#]",
+ "Last Fail [Timestamp]",
+ "Last Fail [VPP Build]",
+ "Last Fail [CSIT Build]"]
+
+ # Generate the data for the table according to the model in the table
+ # specification
+ tbl_dict = dict()
+ for job, builds in table["data"].items():
+ for build in builds:
+ build = str(build)
+ for tst_name, tst_data in data[job][build].iteritems():
+ if tst_name.lower() in table["ignore-list"]:
+ continue
+ if tbl_dict.get(tst_name, None) is None:
+ name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
+ "-".join(tst_data["name"].
+ split("-")[1:]))
+ tbl_dict[tst_name] = {"name": name,
+ "data": OrderedDict()}
+ try:
+ tbl_dict[tst_name]["data"][build] = (
+ tst_data["status"],
+ input_data.metadata(job, build).get("generated", ""),
+ input_data.metadata(job, build).get("version", ""),
+ build)
+ except (TypeError, KeyError):
+ pass # No data in output.xml for this test
+
+ tbl_lst = list()
+ for tst_data in tbl_dict.values():
+ win_size = min(len(tst_data["data"]), table["window"])
+ fails_nr = 0
+ for val in tst_data["data"].values()[-win_size:]:
+ if val[0] == "FAIL":
+ fails_nr += 1
+ fails_last_date = val[1]
+ fails_last_vpp = val[2]
+ fails_last_csit = val[3]
+ if fails_nr:
+ tbl_lst.append([tst_data["name"],
+ fails_nr,
+ fails_last_date,
+ fails_last_vpp,
+ "mrr-daily-build-{0}".format(fails_last_csit)])
+
+ tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
+ tbl_sorted = list()
+ for nrf in range(table["window"], -1, -1):
+ tbl_fails = [item for item in tbl_lst if item[1] == nrf]
+ tbl_sorted.extend(tbl_fails)
+ file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
+
+ logging.info(" Writing file: '{0}'".format(file_name))
+ with open(file_name, "w") as file_handler:
+ file_handler.write(",".join(header) + "\n")
+ for test in tbl_sorted:
+ file_handler.write(",".join([str(item) for item in test]) + '\n')
+
+ txt_file_name = "{0}.txt".format(table["output-file"])
+ logging.info(" Writing file: '{0}'".format(txt_file_name))
+ convert_csv_to_pretty_txt(file_name, txt_file_name)
+
+
+def table_failed_tests_html(table, input_data):
+ """Generate the table(s) with algorithm: table_failed_tests_html
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ logging.info(" Generating the table {0} ...".
+ format(table.get("title", "")))
+
+ try:
+ with open(table["input-file"], 'rb') as csv_file:
+ csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
+ csv_lst = [item for item in csv_content]
+ except KeyError:
+ logging.warning("The input file is not defined.")
+ return
+ except csv.Error as err:
+ logging.warning("Not possible to process the file '{0}'.\n{1}".
+ format(table["input-file"], err))
+ return
+
+ # Table:
+ failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
+
+ # Table header:
+ tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
+ for idx, item in enumerate(csv_lst[0]):
+ alignment = "left" if idx == 0 else "center"
+ th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
+ th.text = item
+
+ # Rows:
+ colors = {"very-bad": ("#ffcccc", "#ff9999"),
+ "bad": ("#e9f1fb", "#d4e4f7")}
+ for r_idx, row in enumerate(csv_lst[1:]):
+ if int(row[1]) > 7:
+ color = "very-bad"
+ else:
+ color = "bad"
+ background = colors[color][r_idx % 2]
+ tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
+
+ # Columns:
+ for c_idx, item in enumerate(row):
+ alignment = "left" if c_idx == 0 else "center"
+ td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
+ # Name:
+ if c_idx == 0:
+ url = _generate_url("../trending/", item)
+ ref = ET.SubElement(td, "a", attrib=dict(href=url))
+ ref.text = item
+ else:
+ td.text = item
+ try:
+ with open(table["output-file"], 'w') as html_file:
+ logging.info(" Writing file: '{0}'".format(table["output-file"]))
+ html_file.write(".. raw:: html\n\n\t")
+ html_file.write(ET.tostring(failed_tests))
+ html_file.write("\n\t<p><br><br></p>\n")
+ except KeyError:
+ logging.warning("The output file is not defined.")
+ return
diff --git a/resources/tools/presentation/input_data_parser.py b/resources/tools/presentation/input_data_parser.py
index 0bb2b6ce71..cf13237774 100644
--- a/resources/tools/presentation/input_data_parser.py
+++ b/resources/tools/presentation/input_data_parser.py
@@ -464,6 +464,7 @@ class ExecutionChecker(ResultVisitor):
test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
test_result["msg"] = test.message.replace('\n', ' |br| '). \
replace('\r', '').replace('"', "'")
+ test_result["status"] = test.status
if test.status == "PASS" and ("NDRPDRDISC" in tags or
"TCP" in tags or
"MRR" in tags):
@@ -507,6 +508,7 @@ class ExecutionChecker(ResultVisitor):
test_result["result"] = dict()
test_result["result"]["value"] = int(groups.group(2))
test_result["result"]["unit"] = groups.group(1)
+
elif test_type in ("MRR", ):
groups = re.search(self.REGEX_MRR, test.message)
test_result["result"] = dict()
@@ -516,8 +518,6 @@ class ExecutionChecker(ResultVisitor):
test_result["result"]["throughput"] = int(
test_result["result"]["rx"] /
test_result["result"]["duration"])
- else:
- test_result["status"] = test.status
self._test_ID = test.longname.lower()
self._data["tests"][self._test_ID] = test_result
diff --git a/resources/tools/presentation/new/generator_tables.py b/resources/tools/presentation/new/generator_tables.py
index 735fd2185f..43117cc4ed 100644
--- a/resources/tools/presentation/new/generator_tables.py
+++ b/resources/tools/presentation/new/generator_tables.py
@@ -17,7 +17,6 @@
import logging
import csv
-import prettytable
import pandas as pd
from string import replace
@@ -26,7 +25,8 @@ from numpy import nan, isnan
from xml.etree import ElementTree as ET
from errors import PresentationError
-from utils import mean, stdev, relative_change, classify_anomalies
+from utils import mean, stdev, relative_change, classify_anomalies, \
+ convert_csv_to_pretty_txt
def generate_tables(spec, data):
@@ -506,18 +506,8 @@ def table_performance_comparison(table, input_data):
]
for i, txt_name in enumerate(tbl_names_txt):
- txt_table = None
logging.info(" Writing file: '{0}'".format(txt_name))
- with open(tbl_names[i], 'rb') as csv_file:
- csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
- for row in csv_content:
- if txt_table is None:
- txt_table = prettytable.PrettyTable(row)
- else:
- txt_table.add_row(row)
- txt_table.align["Test case"] = "l"
- with open(txt_name, "w") as txt_file:
- txt_file.write(str(txt_table))
+ convert_csv_to_pretty_txt(tbl_names[i], txt_name)
# Selected tests in csv:
input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
@@ -685,18 +675,8 @@ def table_performance_comparison_mrr(table, input_data):
]
for i, txt_name in enumerate(tbl_names_txt):
- txt_table = None
logging.info(" Writing file: '{0}'".format(txt_name))
- with open(tbl_names[i], 'rb') as csv_file:
- csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
- for row in csv_content:
- if txt_table is None:
- txt_table = prettytable.PrettyTable(row)
- else:
- txt_table.add_row(row)
- txt_table.align["Test case"] = "l"
- with open(txt_name, "w") as txt_file:
- txt_file.write(str(txt_table))
+ convert_csv_to_pretty_txt(tbl_names[i], txt_name)
def table_performance_trending_dashboard(table, input_data):
@@ -810,18 +790,92 @@ def table_performance_trending_dashboard(table, input_data):
file_handler.write(",".join([str(item) for item in test]) + '\n')
txt_file_name = "{0}.txt".format(table["output-file"])
- txt_table = None
logging.info(" Writing file: '{0}'".format(txt_file_name))
- with open(file_name, 'rb') as csv_file:
- csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
- for row in csv_content:
- if txt_table is None:
- txt_table = prettytable.PrettyTable(row)
- else:
- txt_table.add_row(row)
- txt_table.align["Test case"] = "l"
- with open(txt_file_name, "w") as txt_file:
- txt_file.write(str(txt_table))
+ convert_csv_to_pretty_txt(file_name, txt_file_name)
+
+
+def _generate_url(base, test_name):
+ """Generate URL to a trending plot from the name of the test case.
+
+ :param base: The base part of URL common to all test cases.
+ :param test_name: The name of the test case.
+ :type base: str
+ :type test_name: str
+ :returns: The URL to the plot with the trending data for the given test
+ case.
+ :rtype str
+ """
+
+ url = base
+ file_name = ""
+ anchor = "#"
+ feature = ""
+
+ if "lbdpdk" in test_name or "lbvpp" in test_name:
+ file_name = "link_bonding.html"
+
+ elif "testpmd" in test_name or "l3fwd" in test_name:
+ file_name = "dpdk.html"
+
+ elif "memif" in test_name:
+ file_name = "container_memif.html"
+
+ elif "srv6" in test_name:
+ file_name = "srv6.html"
+
+ elif "vhost" in test_name:
+ if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
+ file_name = "vm_vhost_l2.html"
+ elif "ip4base" in test_name:
+ file_name = "vm_vhost_ip4.html"
+
+ elif "ipsec" in test_name:
+ file_name = "ipsec.html"
+
+ elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
+ file_name = "ip4_tunnels.html"
+
+ elif "ip4base" in test_name or "ip4scale" in test_name:
+ file_name = "ip4.html"
+ if "iacl" in test_name or "snat" in test_name or "cop" in test_name:
+ feature = "-features"
+
+ elif "ip6base" in test_name or "ip6scale" in test_name:
+ file_name = "ip6.html"
+
+ elif "l2xcbase" in test_name or "l2xcscale" in test_name \
+ or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
+ or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
+ file_name = "l2.html"
+ if "iacl" in test_name:
+ feature = "-features"
+
+ if "x520" in test_name:
+ anchor += "x520-"
+ elif "x710" in test_name:
+ anchor += "x710-"
+ elif "xl710" in test_name:
+ anchor += "xl710-"
+
+ if "64b" in test_name:
+ anchor += "64b-"
+ elif "78b" in test_name:
+ anchor += "78b-"
+ elif "imix" in test_name:
+ anchor += "imix-"
+ elif "9000b" in test_name:
+ anchor += "9000b-"
+ elif "1518" in test_name:
+ anchor += "1518b-"
+
+ if "1t1c" in test_name:
+ anchor += "1t1c"
+ elif "2t2c" in test_name:
+ anchor += "2t2c"
+ elif "4t4c" in test_name:
+ anchor += "4t4c"
+
+ return url + file_name + anchor + feature
def table_performance_trending_dashboard_html(table, input_data):
@@ -879,83 +933,12 @@ def table_performance_trending_dashboard_html(table, input_data):
alignment = "left" if c_idx == 0 else "center"
td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
# Name:
- url = "../trending/"
- file_name = ""
- anchor = "#"
- feature = ""
if c_idx == 0:
- if "lbdpdk" in item or "lbvpp" in item:
- file_name = "link_bonding.html"
-
- elif "testpmd" in item or "l3fwd" in item:
- file_name = "dpdk.html"
-
- elif "memif" in item:
- file_name = "container_memif.html"
-
- elif "srv6" in item:
- file_name = "srv6.html"
-
- elif "vhost" in item:
- if "l2xcbase" in item or "l2bdbasemaclrn" in item:
- file_name = "vm_vhost_l2.html"
- elif "ip4base" in item:
- file_name = "vm_vhost_ip4.html"
-
- elif "ipsec" in item:
- file_name = "ipsec.html"
-
- elif "ethip4lispip" in item or "ethip4vxlan" in item:
- file_name = "ip4_tunnels.html"
-
- elif "ip4base" in item or "ip4scale" in item:
- file_name = "ip4.html"
- if "iacl" in item or "snat" in item or "cop" in item:
- feature = "-features"
-
- elif "ip6base" in item or "ip6scale" in item:
- file_name = "ip6.html"
-
- elif "l2xcbase" in item or "l2xcscale" in item \
- or "l2bdbasemaclrn" in item or "l2bdscale" in item \
- or "l2dbbasemaclrn" in item or "l2dbscale" in item:
- file_name = "l2.html"
- if "iacl" in item:
- feature = "-features"
-
- if "x520" in item:
- anchor += "x520-"
- elif "x710" in item:
- anchor += "x710-"
- elif "xl710" in item:
- anchor += "xl710-"
-
- if "64b" in item:
- anchor += "64b-"
- elif "78b" in item:
- anchor += "78b-"
- elif "imix" in item:
- anchor += "imix-"
- elif "9000b" in item:
- anchor += "9000b-"
- elif "1518" in item:
- anchor += "1518b-"
-
- if "1t1c" in item:
- anchor += "1t1c"
- elif "2t2c" in item:
- anchor += "2t2c"
- elif "4t4c" in item:
- anchor += "4t4c"
-
- url = url + file_name + anchor + feature
-
+ url = _generate_url("../trending/", item)
ref = ET.SubElement(td, "a", attrib=dict(href=url))
ref.text = item
-
else:
td.text = item
-
try:
with open(table["output-file"], 'w') as html_file:
logging.info(" Writing file: '{0}'".format(table["output-file"]))
@@ -965,3 +948,155 @@ def table_performance_trending_dashboard_html(table, input_data):
except KeyError:
logging.warning("The output file is not defined.")
return
+
+
+def table_failed_tests(table, input_data):
+ """Generate the table(s) with algorithm: table_failed_tests
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ logging.info(" Generating the table {0} ...".
+ format(table.get("title", "")))
+
+ # Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
+ data = input_data.filter_data(table, continue_on_error=True)
+
+ # Prepare the header of the tables
+ header = ["Test Case",
+ "Fails [#]",
+ "Last Fail [Timestamp]",
+ "Last Fail [VPP Build]",
+ "Last Fail [CSIT Build]"]
+
+ # Generate the data for the table according to the model in the table
+ # specification
+ tbl_dict = dict()
+ for job, builds in table["data"].items():
+ for build in builds:
+ build = str(build)
+ for tst_name, tst_data in data[job][build].iteritems():
+ if tst_name.lower() in table["ignore-list"]:
+ continue
+ if tbl_dict.get(tst_name, None) is None:
+ name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
+ "-".join(tst_data["name"].
+ split("-")[1:]))
+ tbl_dict[tst_name] = {"name": name,
+ "data": OrderedDict()}
+ try:
+ tbl_dict[tst_name]["data"][build] = (
+ tst_data["status"],
+ input_data.metadata(job, build).get("generated", ""),
+ input_data.metadata(job, build).get("version", ""),
+ build)
+ except (TypeError, KeyError):
+ pass # No data in output.xml for this test
+
+ tbl_lst = list()
+ for tst_data in tbl_dict.values():
+ win_size = min(len(tst_data["data"]), table["window"])
+ fails_nr = 0
+ for val in tst_data["data"].values()[-win_size:]:
+ if val[0] == "FAIL":
+ fails_nr += 1
+ fails_last_date = val[1]
+ fails_last_vpp = val[2]
+ fails_last_csit = val[3]
+ if fails_nr:
+ tbl_lst.append([tst_data["name"],
+ fails_nr,
+ fails_last_date,
+ fails_last_vpp,
+ "mrr-daily-build-{0}".format(fails_last_csit)])
+
+ tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
+ tbl_sorted = list()
+ for nrf in range(table["window"], -1, -1):
+ tbl_fails = [item for item in tbl_lst if item[1] == nrf]
+ tbl_sorted.extend(tbl_fails)
+ file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
+
+ logging.info(" Writing file: '{0}'".format(file_name))
+ with open(file_name, "w") as file_handler:
+ file_handler.write(",".join(header) + "\n")
+ for test in tbl_sorted:
+ file_handler.write(",".join([str(item) for item in test]) + '\n')
+
+ txt_file_name = "{0}.txt".format(table["output-file"])
+ logging.info(" Writing file: '{0}'".format(txt_file_name))
+ convert_csv_to_pretty_txt(file_name, txt_file_name)
+
+
+def table_failed_tests_html(table, input_data):
+ """Generate the table(s) with algorithm: table_failed_tests_html
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ logging.info(" Generating the table {0} ...".
+ format(table.get("title", "")))
+
+ try:
+ with open(table["input-file"], 'rb') as csv_file:
+ csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
+ csv_lst = [item for item in csv_content]
+ except KeyError:
+ logging.warning("The input file is not defined.")
+ return
+ except csv.Error as err:
+ logging.warning("Not possible to process the file '{0}'.\n{1}".
+ format(table["input-file"], err))
+ return
+
+ # Table:
+ failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
+
+ # Table header:
+ tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
+ for idx, item in enumerate(csv_lst[0]):
+ alignment = "left" if idx == 0 else "center"
+ th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
+ th.text = item
+
+ # Rows:
+ colors = {"very-bad": ("#ffcccc", "#ff9999"),
+ "bad": ("#e9f1fb", "#d4e4f7")}
+ for r_idx, row in enumerate(csv_lst[1:]):
+ if int(row[1]) > 7:
+ color = "very-bad"
+ else:
+ color = "bad"
+ background = colors[color][r_idx % 2]
+ tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
+
+ # Columns:
+ for c_idx, item in enumerate(row):
+ alignment = "left" if c_idx == 0 else "center"
+ td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
+ # Name:
+ if c_idx == 0:
+ url = _generate_url("../trending/", item)
+ ref = ET.SubElement(td, "a", attrib=dict(href=url))
+ ref.text = item
+ else:
+ td.text = item
+ try:
+ with open(table["output-file"], 'w') as html_file:
+ logging.info(" Writing file: '{0}'".format(table["output-file"]))
+ html_file.write(".. raw:: html\n\n\t")
+ html_file.write(ET.tostring(failed_tests))
+ html_file.write("\n\t<p><br><br></p>\n")
+ except KeyError:
+ logging.warning("The output file is not defined.")
+ return
diff --git a/resources/tools/presentation/new/input_data_parser.py b/resources/tools/presentation/new/input_data_parser.py
index 0bb2b6ce71..cf13237774 100644
--- a/resources/tools/presentation/new/input_data_parser.py
+++ b/resources/tools/presentation/new/input_data_parser.py
@@ -464,6 +464,7 @@ class ExecutionChecker(ResultVisitor):
test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
test_result["msg"] = test.message.replace('\n', ' |br| '). \
replace('\r', '').replace('"', "'")
+ test_result["status"] = test.status
if test.status == "PASS" and ("NDRPDRDISC" in tags or
"TCP" in tags or
"MRR" in tags):
@@ -507,6 +508,7 @@ class ExecutionChecker(ResultVisitor):
test_result["result"] = dict()
test_result["result"]["value"] = int(groups.group(2))
test_result["result"]["unit"] = groups.group(1)
+
elif test_type in ("MRR", ):
groups = re.search(self.REGEX_MRR, test.message)
test_result["result"] = dict()
@@ -516,8 +518,6 @@ class ExecutionChecker(ResultVisitor):
test_result["result"]["throughput"] = int(
test_result["result"]["rx"] /
test_result["result"]["duration"])
- else:
- test_result["status"] = test.status
self._test_ID = test.longname.lower()
self._data["tests"][self._test_ID] = test_result
diff --git a/resources/tools/presentation/new/specification_CPTA.yaml b/resources/tools/presentation/new/specification_CPTA.yaml
index 555cfaee2c..2273eb65a1 100644
--- a/resources/tools/presentation/new/specification_CPTA.yaml
+++ b/resources/tools/presentation/new/specification_CPTA.yaml
@@ -91,7 +91,7 @@
height: 800
yaxis:
showticklabels: True
- tickformat: ".3s"
+ tickformat: ".4s"
title: "Throughput [pps]"
hoverformat: ".4s"
range: []
@@ -161,7 +161,7 @@
height: 800
yaxis:
showticklabels: True
- tickformat: ".3s"
+ tickformat: ".4s"
title: "Throughput [pps]"
hoverformat: ".4s"
range: []
@@ -269,6 +269,8 @@
- "parent"
- "result"
ignore-list:
+ # This test is "ndrdisc" test and was improperly tagged. It was fixed
+ # but it remains in the old output.xml files.
- "tests.vpp.perf.l2.10ge2p1x520-eth-l2bdscale1mmaclrn-mrr.tc01-64b-1t1c-eth-l2bdscale1mmaclrn-ndrdisc"
outlier-const: 1.5
window: 14
@@ -288,6 +290,8 @@
- "parent"
- "result"
ignore-list:
+ # This test is "ndrdisc" test and was improperly tagged. It was fixed
+ # but it remains in the old output.xml files.
- "tests.vpp.perf.l2.10ge2p1x520-eth-l2bdscale1mmaclrn-mrr.tc05-64b-2t2c-eth-l2bdscale1mmaclrn-ndrdisc"
outlier-const: 1.5
window: 14
@@ -307,6 +311,8 @@
- "parent"
- "result"
ignore-list:
+ # This test is "ndrdisc" test and was improperly tagged. It was fixed
+ # but it remains in the old output.xml files.
- "tests.vpp.perf.l2.10ge2p1x520-eth-l2bdscale1mmaclrn-mrr.tc09-64b-4t4c-eth-l2bdscale1mmaclrn-ndrdisc"
outlier-const: 1.5
window: 14
@@ -334,6 +340,31 @@
input-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-4t4c.csv"
output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-4t4c.rst"
+-
+ type: "table"
+ title: "Failed MRR tests"
+ algorithm: "table_failed_tests"
+ output-file-ext: ".csv"
+ output-file: "{DIR[STATIC,VPP]}/failed-tests"
+ data: "plot-performance-trending-all"
+ filter: "'MRR'"
+ parameters:
+ - "name"
+ - "parent"
+ - "status"
+ ignore-list:
+ # This test is "ndrdisc" test and was improperly tagged. It was fixed
+ # but it remains in the old output.xml files.
+ - "tests.vpp.perf.l2.10ge2p1x520-eth-l2bdscale1mmaclrn-mrr.tc01-64b-1t1c-eth-l2bdscale1mmaclrn-ndrdisc"
+ window: 14
+
+-
+ type: "table"
+ title: "HTML Failed MRR tests"
+ algorithm: "table_failed_tests_html"
+ input-file: "{DIR[STATIC,VPP]}/failed-tests.csv"
+ output-file: "{DIR[STATIC,VPP]}/failed-tests.rst"
+
################################################################################
### C P T A ###
diff --git a/resources/tools/presentation/new/utils.py b/resources/tools/presentation/new/utils.py
index a4e24663b5..a2aa0dc071 100644
--- a/resources/tools/presentation/new/utils.py
+++ b/resources/tools/presentation/new/utils.py
@@ -17,8 +17,9 @@
import multiprocessing
import subprocess
import numpy as np
-import pandas as pd
import logging
+import csv
+import prettytable
from os import walk, makedirs, environ
from os.path import join, isdir
@@ -253,6 +254,29 @@ def classify_anomalies(data):
return classification, avgs
+def convert_csv_to_pretty_txt(csv_file, txt_file):
+ """Convert the given csv table to pretty text table.
+
+ :param csv_file: The path to the input csv file.
+ :param txt_file: The path to the output pretty text file.
+ :type csv_file: str
+ :type txt_file: str
+ """
+
+ txt_table = None
+ with open(csv_file, 'rb') as csv_file:
+ csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
+ for row in csv_content:
+ if txt_table is None:
+ txt_table = prettytable.PrettyTable(row)
+ else:
+ txt_table.add_row(row)
+ txt_table.align["Test case"] = "l"
+ if txt_table:
+ with open(txt_file, "w") as txt_file:
+ txt_file.write(str(txt_table))
+
+
class Worker(multiprocessing.Process):
"""Worker class used to process tasks in separate parallel processes.
"""
diff --git a/resources/tools/presentation/specification_CPTA.yaml b/resources/tools/presentation/specification_CPTA.yaml
index 19011658c6..7ee0c0c710 100644
--- a/resources/tools/presentation/specification_CPTA.yaml
+++ b/resources/tools/presentation/specification_CPTA.yaml
@@ -91,7 +91,7 @@
height: 800
yaxis:
showticklabels: True
- tickformat: ".3s"
+ tickformat: ".4s"
title: "Throughput [pps]"
hoverformat: ".4s"
range: []
@@ -161,7 +161,7 @@
height: 800
yaxis:
showticklabels: True
- tickformat: ".3s"
+ tickformat: ".4s"
title: "Throughput [pps]"
hoverformat: ".4s"
range: []
@@ -269,6 +269,8 @@
- "parent"
- "result"
ignore-list:
+ # This test is "ndrdisc" test and was improperly tagged. It was fixed
+ # but it remains in the old output.xml files.
- "tests.vpp.perf.l2.10ge2p1x520-eth-l2bdscale1mmaclrn-mrr.tc01-64b-1t1c-eth-l2bdscale1mmaclrn-ndrdisc"
outlier-const: 1.5
window: 14
@@ -288,6 +290,8 @@
- "parent"
- "result"
ignore-list:
+ # This test is "ndrdisc" test and was improperly tagged. It was fixed
+ # but it remains in the old output.xml files.
- "tests.vpp.perf.l2.10ge2p1x520-eth-l2bdscale1mmaclrn-mrr.tc05-64b-2t2c-eth-l2bdscale1mmaclrn-ndrdisc"
outlier-const: 1.5
window: 14
@@ -307,6 +311,8 @@
- "parent"
- "result"
ignore-list:
+ # This test is "ndrdisc" test and was improperly tagged. It was fixed
+ # but it remains in the old output.xml files.
- "tests.vpp.perf.l2.10ge2p1x520-eth-l2bdscale1mmaclrn-mrr.tc09-64b-4t4c-eth-l2bdscale1mmaclrn-ndrdisc"
outlier-const: 1.5
window: 14
@@ -334,6 +340,31 @@
input-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-4t4c.csv"
output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-4t4c.rst"
+-
+ type: "table"
+ title: "Failed MRR tests"
+ algorithm: "table_failed_tests"
+ output-file-ext: ".csv"
+ output-file: "{DIR[STATIC,VPP]}/failed-tests"
+ data: "plot-performance-trending-all"
+ filter: "'MRR'"
+ parameters:
+ - "name"
+ - "parent"
+ - "status"
+ ignore-list:
+ # This test is "ndrdisc" test and was improperly tagged. It was fixed
+ # but it remains in the old output.xml files.
+ - "tests.vpp.perf.l2.10ge2p1x520-eth-l2bdscale1mmaclrn-mrr.tc01-64b-1t1c-eth-l2bdscale1mmaclrn-ndrdisc"
+ window: 14
+
+-
+ type: "table"
+ title: "HTML Failed MRR tests"
+ algorithm: "table_failed_tests_html"
+ input-file: "{DIR[STATIC,VPP]}/failed-tests.csv"
+ output-file: "{DIR[STATIC,VPP]}/failed-tests.rst"
+
################################################################################
### C P T A ###
diff --git a/resources/tools/presentation/utils.py b/resources/tools/presentation/utils.py
index 0a9d985a88..ba32932187 100644
--- a/resources/tools/presentation/utils.py
+++ b/resources/tools/presentation/utils.py
@@ -19,6 +19,8 @@ import subprocess
import numpy as np
import pandas as pd
import logging
+import csv
+import prettytable
from os import walk, makedirs, environ
from os.path import join, isdir
@@ -316,6 +318,29 @@ def classify_anomalies(data, window):
return classification
+def convert_csv_to_pretty_txt(csv_file, txt_file):
+ """Convert the given csv table to pretty text table.
+
+ :param csv_file: The path to the input csv file.
+ :param txt_file: The path to the output pretty text file.
+ :type csv_file: str
+ :type txt_file: str
+ """
+
+ txt_table = None
+ with open(csv_file, 'rb') as csv_file:
+ csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
+ for row in csv_content:
+ if txt_table is None:
+ txt_table = prettytable.PrettyTable(row)
+ else:
+ txt_table.add_row(row)
+ txt_table.align["Test case"] = "l"
+ if txt_table:
+ with open(txt_file, "w") as txt_file:
+ txt_file.write(str(txt_table))
+
+
class Worker(multiprocessing.Process):
"""Worker class used to process tasks in separate parallel processes.
"""