# Copyright (c) 2020 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Algorithms to generate tables. """ import logging import csv import re from collections import OrderedDict from xml.etree import ElementTree as ET from datetime import datetime as dt from datetime import timedelta from copy import deepcopy import plotly.graph_objects as go import plotly.offline as ploff import pandas as pd from numpy import nan, isnan from yaml import load, FullLoader, YAMLError from pal_utils import mean, stdev, classify_anomalies, \ convert_csv_to_pretty_txt, relative_change_stdev REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)') def generate_tables(spec, data): """Generate all tables specified in the specification file. :param spec: Specification read from the specification file. :param data: Data to process. :type spec: Specification :type data: InputData """ generator = { u"table_merged_details": table_merged_details, u"table_perf_comparison": table_perf_comparison, u"table_perf_comparison_nic": table_perf_comparison_nic, u"table_nics_comparison": table_nics_comparison, u"table_soak_vs_ndr": table_soak_vs_ndr, u"table_perf_trending_dash": table_perf_trending_dash, u"table_perf_trending_dash_html": table_perf_trending_dash_html, u"table_last_failed_tests": table_last_failed_tests, u"table_failed_tests": table_failed_tests, u"table_failed_tests_html": table_failed_tests_html, u"table_oper_data_html": table_oper_data_html, u"table_comparison": table_comparison } logging.info(u"Generating the tables ...") for table in spec.tables: try: generator[table[u"algorithm"]](table, data) except NameError as err: logging.error( f"Probably algorithm {table[u'algorithm']} is not defined: " f"{repr(err)}" ) logging.info(u"Done.") def table_oper_data_html(table, input_data): """Generate the table(s) with algorithm: html_table_oper_data specified in the specification file. :param table: Table to generate. :param input_data: Data to process. :type table: pandas.Series :type input_data: InputData """ logging.info(f" Generating the table {table.get(u'title', u'')} ...") # Transform the data logging.info( f" Creating the data set for the {table.get(u'type', u'')} " f"{table.get(u'title', u'')}." ) data = input_data.filter_data( table, params=[u"name", u"parent", u"show-run", u"type"], continue_on_error=True ) if data.empty: return data = input_data.merge_data(data) sort_tests = table.get(u"sort", None) if sort_tests: args = dict( inplace=True, ascending=(sort_tests == u"ascending") ) data.sort_index(**args) suites = input_data.filter_data( table, continue_on_error=True, data_set=u"suites" ) if suites.empty: return suites = input_data.merge_data(suites) def _generate_html_table(tst_data): """Generate an HTML table with operational data for the given test. :param tst_data: Test data to be used to generate the table. :type tst_data: pandas.Series :returns: HTML table with operational data. :rtype: str """ colors = { u"header": u"#7eade7", u"empty": u"#ffffff", u"body": (u"#e9f1fb", u"#d4e4f7") } tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0")) trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])) thead = ET.SubElement( trow, u"th", attrib=dict(align=u"left", colspan=u"6") ) thead.text = tst_data[u"name"] trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])) thead = ET.SubElement( trow, u"th", attrib=dict(align=u"left", colspan=u"6") ) thead.text = u"\t" if tst_data.get(u"show-run", u"No Data") == u"No Data": trow = ET.SubElement( tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]) ) tcol = ET.SubElement( trow, u"td", attrib=dict(align=u"left", colspan=u"6") ) tcol.text = u"No Data" trow = ET.SubElement( tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]) ) thead = ET.SubElement( trow, u"th", attrib=dict(align=u"left", colspan=u"6") ) font = ET.SubElement( thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff") ) font.text = u"." return str(ET.tostring(tbl, encoding=u"unicode")) tbl_hdr = ( u"Name", u"Nr of Vectors", u"Nr of Packets", u"Suspends", u"Cycles per Packet", u"Average Vector Size" ) for dut_data in tst_data[u"show-run"].values(): trow = ET.SubElement( tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]) ) tcol = ET.SubElement( trow, u"td", attrib=dict(align=u"left", colspan=u"6") ) if dut_data.get(u"threads", None) is None: tcol.text = u"No Data" continue bold = ET.SubElement(tcol, u"b") bold.text = ( f"Host IP: {dut_data.get(u'host', '')}, " f"Socket: {dut_data.get(u'socket', '')}" ) trow = ET.SubElement( tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]) ) thead = ET.SubElement( trow, u"th", attrib=dict(align=u"left", colspan=u"6") ) thead.text = u"\t" for thread_nr, thread in dut_data[u"threads"].items(): trow = ET.SubElement( tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]) ) tcol = ET.SubElement( trow, u"td", attrib=dict(align=u"left", colspan=u"6") ) bold = ET.SubElement(tcol, u"b") bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}" trow = ET.SubElement( tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]) ) for idx, col in enumerate(tbl_hdr): tcol = ET.SubElement( trow, u"td", attrib=dict(align=u"right" if idx else u"left") ) font = ET.SubElement( tcol, u"font", attrib=dict(size=u"2") ) bold = ET.SubElement(font, u"b") bold.text = col for row_nr, row in enumerate(thread): trow = ET.SubElement( tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][row_nr % 2]) ) for idx, col in enumerate(row): tcol = ET.SubElement( trow, u"td", attrib=dict(align=u"right" if idx else u"left") ) font = ET.SubElement( tcol, u"font", attrib=dict(size=u"2") ) if isinstance(col, float): font.text = f"{col:.2f}" else: font.text = str(col) trow = ET.SubElement( tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]) ) thead = ET.SubElement( trow, u"th", attrib=dict(align=u"left", colspan=u"6") ) thead.text = u"\t" trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])) thead = ET.SubElement( trow, u"th", attrib=dict(align=u"left", colspan=u"6") ) font = ET.SubElement( thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff") ) font.text = u"." return str(ET.tostring(tbl, encoding=u"unicode")) for suite in suites.values: html_table = str() for test_data in data.values: if test_data[u"parent"] not in suite[u"name"]: continue html_table += _generate_html_table(test_data) if not html_table: continue try: file_name = f"{table[u'output-file']}{suite[u'name']}.rst" with open(f"{file_name}", u'w') as html_file: logging.info(f" Writing file: {file_name}") html_file.write(u".. raw:: html\n\n\t") html_file.write(html_table) html_file.write(u"\n\t
\n\n\n" u".. |preout| raw:: html\n\n\n\n" ) if title: rst_file.write(f"{title}\n") rst_file.write(f"{u'`' * len(title)}\n\n") rst_file.write( u".. raw:: html\n\n" f' \n\n' ) if legend: rst_file.write(legend[1:].replace(u"\n", u" |br| ")) if footnote: rst_file.write(footnote.replace(u"\n", u" |br| ")[1:]) def table_perf_comparison(table, input_data): """Generate the table(s) with algorithm: table_perf_comparison specified in the specification file. :param table: Table to generate. :param input_data: Data to process. :type table: pandas.Series :type input_data: InputData """ logging.info(f" Generating the table {table.get(u'title', u'')} ...") # Transform the data logging.info( f" Creating the data set for the {table.get(u'type', u'')} " f"{table.get(u'title', u'')}." ) data = input_data.filter_data(table, continue_on_error=True) # Prepare the header of the tables try: header = [u"Test Case", ] legend = u"\nLegend:\n" rca_data = None rca = table.get(u"rca", None) if rca: try: with open(rca.get(u"data-file", u""), u"r") as rca_file: rca_data = load(rca_file, Loader=FullLoader) header.insert(0, rca.get(u"title", u"RCA")) legend += ( u"RCA: Reference to the Root Cause Analysis, see below.\n" ) except (YAMLError, IOError) as err: logging.warning(repr(err)) history = table.get(u"history", list()) for item in history: header.extend( [ f"{item[u'title']} Avg({table[u'include-tests']})", f"{item[u'title']} Stdev({table[u'include-tests']})" ] ) legend += ( f"{item[u'title']} Avg({table[u'include-tests']}): " f"Mean value of {table[u'include-tests']} [Mpps] computed from " f"a series of runs of the listed tests executed against " f"{item[u'title']}.\n" f"{item[u'title']} Stdev({table[u'include-tests']}): " f"Standard deviation value of {table[u'include-tests']} [Mpps] " f"computed from a series of runs of the listed tests executed " f"against {item[u'title']}.\n" ) header.extend( [ f"{table[u'reference'][u'title']} " f"Avg({table[u'include-tests']})", f"{table[u'reference'][u'title']} " f"Stdev({table[u'include-tests']})", f"{table[u'compare'][u'title']} " f"Avg({table[u'include-tests']})", f"{table[u'compare'][u'title']} " f"Stdev({table[u'include-tests']})", f"Diff({table[u'reference'][u'title']}," f"{table[u'compare'][u'title']})", u"Stdev(Diff)" ] ) header_str = u";".join(header) + u"\n" legend += ( f"{table[u'reference'][u'title']} " f"Avg({table[u'include-tests']}): " f"Mean value of {table[u'include-tests']} [Mpps] computed from a " f"series of runs of the listed tests executed against " f"{table[u'reference'][u'title']}.\n" f"{table[u'reference'][u'title']} " f"Stdev({table[u'include-tests']}): " f"Standard deviation value of {table[u'include-tests']} [Mpps] " f"computed from a series of runs of the listed tests executed " f"against {table[u'reference'][u'title']}.\n" f"{table[u'compare'][u'title']} " f"Avg({table[u'include-tests']}): " f"Mean value of {table[u'include-tests']} [Mpps] computed from a " f"series of runs of the listed tests executed against " f"{table[u'compare'][u'title']}.\n" f"{table[u'compare'][u'title']} " f"Stdev({table[u'include-tests']}): " f"Standard deviation value of {table[u'include-tests']} [Mpps] " f"computed from a series of runs of the listed tests executed " f"against {table[u'compare'][u'title']}.\n" f"Diff({table[u'reference'][u'title']}," f"{table[u'compare'][u'title']}): " f"Percentage change calculated for mean values.\n" u"Stdev(Diff): " u"Standard deviation of percentage change calculated for mean " u"values.\n" u"NT: Not Tested\n" ) except (AttributeError, KeyError) as err: logging.error(f"The model is invalid, missing parameter: {repr(err)}") return # Prepare data to the table: tbl_dict = dict() for job, builds in table[u"reference"][u"data"].items(): for build in builds: for tst_name, tst_data in data[job][str(build)].items(): tst_name_mod = _tpc_modify_test_name(tst_name) if (u"across topologies" in table[u"title"].lower() or (u" 3n-" in table[u"title"].lower() and u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: name = tst_data[u'name'].rsplit(u'-', 1)[0] if u"across testbeds" in table[u"title"].lower() or \ u"across topologies" in table[u"title"].lower(): name = _tpc_modify_displayed_test_name(name) tbl_dict[tst_name_mod] = { u"name": name, u"replace-ref": True, u"replace-cmp": True, u"ref-data": list(), u"cmp-data": list() } _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"], src=tst_data, include_tests=table[u"include-tests"]) replacement = table[u"reference"].get(u"data-replacement", None) if replacement: rpl_data = input_data.filter_data( table, data=replacement, continue_on_error=True) for job, builds in replacement.items(): for build in builds: for tst_name, tst_data in rpl_data[job][str(build)].items(): tst_name_mod = _tpc_modify_test_name(tst_name) if (u"across topologies" in table[u"title"].lower() or (u" 3n-" in table[u"title"].lower() and u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: name = tst_data[u'name'].rsplit(u'-', 1)[0] if u"across testbeds" in table[u"title"].lower() or \ u"across topologies" in table[u"title"].lower(): name = _tpc_modify_displayed_test_name(name) tbl_dict[tst_name_mod] = { u"name": name, u"replace-ref": False, u"replace-cmp": True, u"ref-data": list(), u"cmp-data": list() } if tbl_dict[tst_name_mod][u"replace-ref"]: tbl_dict[tst_name_mod][u"replace-ref"] = False tbl_dict[tst_name_mod][u"ref-data"] = list() _tpc_insert_data( target=tbl_dict[tst_name_mod][u"ref-data"], src=tst_data, include_tests=table[u"include-tests"] ) for job, builds in table[u"compare"][u"data"].items(): for build in builds: for tst_name, tst_data in data[job][str(build)].items(): tst_name_mod = _tpc_modify_test_name(tst_name) if (u"across topologies" in table[u"title"].lower() or (u" 3n-" in table[u"title"].lower() and u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: name = tst_data[u'name'].rsplit(u'-', 1)[0] if u"across testbeds" in table[u"title"].lower() or \ u"across topologies" in table[u"title"].lower(): name = _tpc_modify_displayed_test_name(name) tbl_dict[tst_name_mod] = { u"name": name, u"replace-ref": False, u"replace-cmp": True, u"ref-data": list(), u"cmp-data": list() } _tpc_insert_data( target=tbl_dict[tst_name_mod][u"cmp-data"], src=tst_data, include_tests=table[u"include-tests"] ) replacement = table[u"compare"].get(u"data-replacement", None) if replacement: rpl_data = input_data.filter_data( table, data=replacement, continue_on_error=True) for job, builds in replacement.items(): for build in builds: for tst_name, tst_data in rpl_data[job][str(build)].items(): tst_name_mod = _tpc_modify_test_name(tst_name) if (u"across topologies" in table[u"title"].lower() or (u" 3n-" in table[u"title"].lower() and u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: name = tst_data[u'name'].rsplit(u'-', 1)[0] if u"across testbeds" in table[u"title"].lower() or \ u"across topologies" in table[u"title"].lower(): name = _tpc_modify_displayed_test_name(name) tbl_dict[tst_name_mod] = { u"name": name, u"replace-ref": False, u"replace-cmp": False, u"ref-data": list(), u"cmp-data": list() } if tbl_dict[tst_name_mod][u"replace-cmp"]: tbl_dict[tst_name_mod][u"replace-cmp"] = False tbl_dict[tst_name_mod][u"cmp-data"] = list() _tpc_insert_data( target=tbl_dict[tst_name_mod][u"cmp-data"], src=tst_data, include_tests=table[u"include-tests"] ) for item in history: for job, builds in item[u"data"].items(): for build in builds: for tst_name, tst_data in data[job][str(build)].items(): tst_name_mod = _tpc_modify_test_name(tst_name) if (u"across topologies" in table[u"title"].lower() or (u" 3n-" in table[u"title"].lower() and u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: continue if tbl_dict[tst_name_mod].get(u"history", None) is None: tbl_dict[tst_name_mod][u"history"] = OrderedDict() if tbl_dict[tst_name_mod][u"history"].\ get(item[u"title"], None) is None: tbl_dict[tst_name_mod][u"history"][item[ u"title"]] = list() try: if table[u"include-tests"] == u"MRR": res = (tst_data[u"result"][u"receive-rate"], tst_data[u"result"][u"receive-stdev"]) elif table[u"include-tests"] == u"PDR": res = tst_data[u"throughput"][u"PDR"][u"LOWER"] elif table[u"include-tests"] == u"NDR": res = tst_data[u"throughput"][u"NDR"][u"LOWER"] else: continue tbl_dict[tst_name_mod][u"history"][item[u"title"]].\ append(res) except (TypeError, KeyError): pass tbl_lst = list() for tst_name in tbl_dict: item = [tbl_dict[tst_name][u"name"], ] if history: if tbl_dict[tst_name].get(u"history", None) is not None: for hist_data in tbl_dict[tst_name][u"history"].values(): if hist_data: if table[u"include-tests"] == u"MRR": item.append(round(hist_data[0][0] / 1e6, 1)) item.append(round(hist_data[0][1] / 1e6, 1)) else: item.append(round(mean(hist_data) / 1e6, 1)) item.append(round(stdev(hist_data) / 1e6, 1)) else: item.extend([u"NT", u"NT"]) else: item.extend([u"NT", u"NT"]) data_r = tbl_dict[tst_name][u"ref-data"] if data_r: if table[u"include-tests"] == u"MRR": data_r_mean = data_r[0][0] data_r_stdev = data_r[0][1] else: data_r_mean = mean(data_r) data_r_stdev = stdev(data_r) item.append(round(data_r_mean / 1e6, 1)) item.append(round(data_r_stdev / 1e6, 1)) else: data_r_mean = None data_r_stdev = None item.extend([u"NT", u"NT"]) data_c = tbl_dict[tst_name][u"cmp-data"] if data_c: if table[u"include-tests"] == u"MRR": data_c_mean = data_c[0][0] data_c_stdev = data_c[0][1] else: data_c_mean = mean(data_c) data_c_stdev = stdev(data_c) item.append(round(data_c_mean / 1e6, 1)) item.append(round(data_c_stdev / 1e6, 1)) else: data_c_mean = None data_c_stdev = None item.extend([u"NT", u"NT"]) if item[-2] == u"NT": pass elif item[-4] == u"NT": item.append(u"New in CSIT-2001") item.append(u"New in CSIT-2001") elif data_r_mean is not None and data_c_mean is not None: delta, d_stdev = relative_change_stdev( data_r_mean, data_c_mean, data_r_stdev, data_c_stdev ) try: item.append(round(delta)) except ValueError: item.append(delta) try: item.append(round(d_stdev)) except ValueError: item.append(d_stdev) if rca_data: rca_nr = rca_data.get(item[0], u"-") item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-") if (len(item) == len(header)) and (item[-4] != u"NT"): tbl_lst.append(item) tbl_lst = _tpc_sort_table(tbl_lst) # Generate csv tables: csv_file = f"{table[u'output-file']}.csv" with open(csv_file, u"wt") as file_handler: file_handler.write(header_str) for test in tbl_lst: file_handler.write(u";".join([str(item) for item in test]) + u"\n") txt_file_name = f"{table[u'output-file']}.txt" convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";") footnote = u"" with open(txt_file_name, u'a') as txt_file: txt_file.write(legend) if rca_data: footnote = rca_data.get(u"footnote", u"") if footnote: txt_file.write(footnote) txt_file.write(u":END") # Generate html table: _tpc_generate_html_table( header, tbl_lst, table[u'output-file'], legend=legend, footnote=footnote, title=table.get(u"title", u"") ) def table_perf_comparison_nic(table, input_data): """Generate the table(s) with algorithm: table_perf_comparison specified in the specification file. :param table: Table to generate. :param input_data: Data to process. :type table: pandas.Series :type input_data: InputData """ logging.info(f" Generating the table {table.get(u'title', u'')} ...") # Transform the data logging.info( f" Creating the data set for the {table.get(u'type', u'')} " f"{table.get(u'title', u'')}." ) data = input_data.filter_data(table, continue_on_error=True) # Prepare the header of the tables try: header = [u"Test Case", ] legend = u"\nLegend:\n" rca_data = None rca = table.get(u"rca", None) if rca: try: with open(rca.get(u"data-file", ""), u"r") as rca_file: rca_data = load(rca_file, Loader=FullLoader) header.insert(0, rca.get(u"title", "RCA")) legend += ( u"RCA: Reference to the Root Cause Analysis, see below.\n" ) except (YAMLError, IOError) as err: logging.warning(repr(err)) history = table.get(u"history", list()) for item in history: header.extend( [ f"{item[u'title']} Avg({table[u'include-tests']})", f"{item[u'title']} Stdev({table[u'include-tests']})" ] ) legend += ( f"{item[u'title']} Avg({table[u'include-tests']}): " f"Mean value of {table[u'include-tests']} [Mpps] computed from " f"a series of runs of the listed tests executed against " f"{item[u'title']}.\n" f"{item[u'title']} Stdev({table[u'include-tests']}): " f"Standard deviation value of {table[u'include-tests']} [Mpps] " f"computed from a series of runs of the listed tests executed " f"against {item[u'title']}.\n" ) header.extend( [ f"{table[u'reference'][u'title']} " f"Avg({table[u'include-tests']})", f"{table[u'reference'][u'title']} " f"Stdev({table[u'include-tests']})", f"{table[u'compare'][u'title']} " f"Avg({table[u'include-tests']})", f"{table[u'compare'][u'title']} " f"Stdev({table[u'include-tests']})", f"Diff({table[u'reference'][u'title']}," f"{table[u'compare'][u'title']})", u"Stdev(Diff)" ] ) header_str = u";".join(header) + u"\n" legend += ( f"{table[u'reference'][u'title']} " f"Avg({table[u'include-tests']}): " f"Mean value of {table[u'include-tests']} [Mpps] computed from a " f"series of runs of the listed tests executed against " f"{table[u'reference'][u'title']}.\n" f"{table[u'reference'][u'title']} " f"Stdev({table[u'include-tests']}): " f"Standard deviation value of {table[u'include-tests']} [Mpps] " f"computed from a series of runs of the listed tests executed " f"against {table[u'reference'][u'title']}.\n" f"{table[u'compare'][u'title']} " f"Avg({table[u'include-tests']}): " f"Mean value of {table[u'include-tests']} [Mpps] computed from a " f"series of runs of the listed tests executed against " f"{table[u'compare'][u'title']}.\n" f"{table[u'compare'][u'title']} " f"Stdev({table[u'include-tests']}): " f"Standard deviation value of {table[u'include-tests']} [Mpps] " f"computed from a series of runs of the listed tests executed " f"against {table[u'compare'][u'title']}.\n" f"Diff({table[u'reference'][u'title']}," f"{table[u'compare'][u'title']}): " f"Percentage change calculated for mean values.\n" u"Stdev(Diff): " u"Standard deviation of percentage change calculated for mean " u"values.\n" u"NT: Not Tested\n" ) except (AttributeError, KeyError) as err: logging.error(f"The model is invalid, missing parameter: {repr(err)}") return # Prepare data to the table: tbl_dict = dict() for job, builds in table[u"reference"][u"data"].items(): for build in builds: for tst_name, tst_data in data[job][str(build)].items(): if table[u"reference"][u"nic"] not in tst_data[u"tags"]: continue tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True) if (u"across topologies" in table[u"title"].lower() or (u" 3n-" in table[u"title"].lower() and u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: name = tst_data[u'name'].rsplit(u'-', 1)[0] if u"across testbeds" in table[u"title"].lower() or \ u"across topologies" in table[u"title"].lower(): name = _tpc_modify_displayed_test_name(name) tbl_dict[tst_name_mod] = { u"name": name, u"replace-ref": True, u"replace-cmp": True, u"ref-data": list(), u"cmp-data": list() } _tpc_insert_data( target=tbl_dict[tst_name_mod][u"ref-data"], src=tst_data, include_tests=table[u"include-tests"] ) replacement = table[u"reference"].get(u"data-replacement", None) if replacement: rpl_data = input_data.filter_data( table, data=replacement, continue_on_error=True) for job, builds in replacement.items(): for build in builds: for tst_name, tst_data in rpl_data[job][str(build)].items(): if table[u"reference"][u"nic"] not in tst_data[u"tags"]: continue tst_name_mod = \ _tpc_modify_test_name(tst_name, ignore_nic=True) if (u"across topologies" in table[u"title"].lower() or (u" 3n-" in table[u"title"].lower() and u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: name = tst_data[u'name'].rsplit(u'-', 1)[0] if u"across testbeds" in table[u"title"].lower() or \ u"across topologies" in table[u"title"].lower(): name = _tpc_modify_displayed_test_name(name) tbl_dict[tst_name_mod] = { u"name": name, u"replace-ref": False, u"replace-cmp": True, u"ref-data": list(), u"cmp-data": list() } if tbl_dict[tst_name_mod][u"replace-ref"]: tbl_dict[tst_name_mod][u"replace-ref"] = False tbl_dict[tst_name_mod][u"ref-data"] = list() _tpc_insert_data( target=tbl_dict[tst_name_mod][u"ref-data"], src=tst_data, include_tests=table[u"include-tests"] ) for job, builds in table[u"compare"][u"data"].items(): for build in builds: for tst_name, tst_data in data[job][str(build)].items(): if table[u"compare"][u"nic"] not in tst_data[u"tags"]: continue tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True) if (u"across topologies" in table[u"title"].lower() or (u" 3n-" in table[u"title"].lower() and u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: name = tst_data[u'name'].rsplit(u'-', 1)[0] if u"across testbeds" in table[u"title"].lower() or \ u"across topologies" in table[u"title"].lower(): name = _tpc_modify_displayed_test_name(name) tbl_dict[tst_name_mod] = { u"name": name, u"replace-ref": False, u"replace-cmp": True, u"ref-data": list(), u"cmp-data": list() } _tpc_insert_data( target=tbl_dict[tst_name_mod][u"cmp-data"], src=tst_data, include_tests=table[u"include-tests"] ) replacement = table[u"compare"].get(u"data-replacement", None) if replacement: rpl_data = input_data.filter_data( table, data=replacement, continue_on_error=True) for job, builds in replacement.items(): for build in builds: for tst_name, tst_data in rpl_data[job][str(build)].items(): if table[u"compare"][u"nic"] not in tst_data[u"tags"]: continue tst_name_mod = \ _tpc_modify_test_name(tst_name, ignore_nic=True) if (u"across topologies" in table[u"title"].lower() or (u" 3n-" in table[u"title"].lower() and u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: name = tst_data[u'name'].rsplit(u'-', 1)[0] if u"across testbeds" in table[u"title"].lower() or \ u"across topologies" in table[u"title"].lower(): name = _tpc_modify_displayed_test_name(name) tbl_dict[tst_name_mod] = { u"name": name, u"replace-ref": False, u"replace-cmp": False, u"ref-data": list(), u"cmp-data": list() } if tbl_dict[tst_name_mod][u"replace-cmp"]: tbl_dict[tst_name_mod][u"replace-cmp"] = False tbl_dict[tst_name_mod][u"cmp-data"] = list() _tpc_insert_data( target=tbl_dict[tst_name_mod][u"cmp-data"], src=tst_data, include_tests=table[u"include-tests"] ) for item in history: for job, builds in item[u"data"].items(): for build in builds: for tst_name, tst_data in data[job][str(build)].items(): if item[u"nic"] not in tst_data[u"tags"]: continue tst_name_mod = \ _tpc_modify_test_name(tst_name, ignore_nic=True) if (u"across topologies" in table[u"title"].lower() or (u" 3n-" in table[u"title"].lower() and u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: continue if tbl_dict[tst_name_mod].get(u"history", None) is None: tbl_dict[tst_name_mod][u"history"] = OrderedDict() if tbl_dict[tst_name_mod][u"history"].\ get(item[u"title"], None) is None: tbl_dict[tst_name_mod][u"history"][item[ u"title"]] = list() try: if table[u"include-tests"] == u"MRR": res = (tst_data[u"result"][u"receive-rate"], tst_data[u"result"][u"receive-stdev"]) elif table[u"include-tests"] == u"PDR": res = tst_data[u"throughput"][u"PDR"][u"LOWER"] elif table[u"include-tests"] == u"NDR": res = tst_data[u"throughput"][u"NDR"][u"LOWER"] else: continue tbl_dict[tst_name_mod][u"history"][item[u"title"]].\ append(res) except (TypeError, KeyError): pass tbl_lst = list() for tst_name in tbl_dict: item = [tbl_dict[tst_name][u"name"], ] if history: if tbl_dict[tst_name].get(u"history", None) is not None: for hist_data in tbl_dict[tst_name][u"history"].values(): if hist_data: if table[u"include-tests"] == u"MRR": item.append(round(hist_data[0][0] / 1e6, 1)) item.append(round(hist_data[0][1] / 1e6, 1)) else: item.append(round(mean(hist_data) / 1e6, 1)) item.append(round(stdev(hist_data) / 1e6, 1)) else: item.extend([u"NT", u"NT"]) else: item.extend([u"NT", u"NT"]) data_r = tbl_dict[tst_name][u"ref-data"] if data_r: if table[u"include-tests"] == u"MRR": data_r_mean = data_r[0][0] data_r_stdev = data_r[0][1] else: data_r_mean = mean(data_r) data_r_stdev = stdev(data_r) item.append(round(data_r_mean / 1e6, 1)) item.append(round(data_r_stdev / 1e6, 1)) else: data_r_mean = None data_r_stdev = None item.extend([u"NT", u"NT"]) data_c = tbl_dict[tst_name][u"cmp-data"] if data_c: if table[u"include-tests"] == u"MRR": data_c_mean = data_c[0][0] data_c_stdev = data_c[0][1] else: data_c_mean = mean(data_c) data_c_stdev = stdev(data_c) item.append(round(data_c_mean / 1e6, 1)) item.append(round(data_c_stdev / 1e6, 1)) else: data_c_mean = None data_c_stdev = None item.extend([u"NT", u"NT"]) if item[-2] == u"NT": pass elif item[-4] == u"NT": item.append(u"New in CSIT-2001") item.append(u"New in CSIT-2001") elif data_r_mean is not None and data_c_mean is not None: delta, d_stdev = relative_change_stdev( data_r_mean, data_c_mean, data_r_stdev, data_c_stdev ) try: item.append(round(delta)) except ValueError: item.append(delta) try: item.append(round(d_stdev)) except ValueError: item.append(d_stdev) if rca_data: rca_nr = rca_data.get(item[0], u"-") item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-") if (len(item) == len(header)) and (item[-4] != u"NT"): tbl_lst.append(item) tbl_lst = _tpc_sort_table(tbl_lst) # Generate csv tables: csv_file = f"{table[u'output-file']}.csv" with open(csv_file, u"wt") as file_handler: file_handler.write(header_str) for test in tbl_lst: file_handler.write(u";".join([str(item) for item in test]) + u"\n") txt_file_name = f"{table[u'output-file']}.txt" convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";") footnote = u"" with open(txt_file_name, u'a') as txt_file: txt_file.write(legend) if rca_data: footnote = rca_data.get(u"footnote", u"") if footnote: txt_file.write(footnote) txt_file.write(u":END") # Generate html table: _tpc_generate_html_table( header, tbl_lst, table[u'output-file'], legend=legend, footnote=footnote, title=table.get(u"title", u"") ) def table_nics_comparison(table, input_data): """Generate the table(s) with algorithm: table_nics_comparison specified in the specification file. :param table: Table to generate. :param input_data: Data to process. :type table: pandas.Series :type input_data: InputData """ logging.info(f" Generating the table {table.get(u'title', u'')} ...") # Transform the data logging.info( f" Creating the data set for the {table.get(u'type', u'')} " f"{table.get(u'title', u'')}." ) data = input_data.filter_data(table, continue_on_error=True) # Prepare the header of the tables try: header = [ u"Test Case", f"{table[u'reference'][u'title']} " f"Avg({table[u'include-tests']})", f"{table[u'reference'][u'title']} " f"Stdev({table[u'include-tests']})", f"{table[u'compare'][u'title']} " f"Avg({table[u'include-tests']})", f"{table[u'compare'][u'title']} " f"Stdev({table[u'include-tests']})", f"Diff({table[u'reference'][u'title']}," f"{table[u'compare'][u'title']})", u"Stdev(Diff)" ] legend = ( u"\nLegend:\n" f"{table[u'reference'][u'title']} " f"Avg({table[u'include-tests']}): " f"Mean value of {table[u'include-tests']} [Mpps] computed from a " f"series of runs of the listed tests executed using " f"{table[u'reference'][u'title']} NIC.\n" f"{table[u'reference'][u'title']} " f"Stdev({table[u'include-tests']}): " f"Standard deviation value of {table[u'include-tests']} [Mpps] " f"computed from a series of runs of the listed tests executed " f"using {table[u'reference'][u'title']} NIC.\n" f"{table[u'compare'][u'title']} " f"Avg({table[u'include-tests']}): " f"Mean value of {table[u'include-tests']} [Mpps] computed from a " f"series of runs of the listed tests executed using " f"{table[u'compare'][u'title']} NIC.\n" f"{table[u'compare'][u'title']} " f"Stdev({table[u'include-tests']}): " f"Standard deviation value of {table[u'include-tests']} [Mpps] " f"computed from a series of runs of the listed tests executed " f"using {table[u'compare'][u'title']} NIC.\n" f"Diff({table[u'reference'][u'title']}," f"{table[u'compare'][u'title']}): " f"Percentage change calculated for mean values.\n" u"Stdev(Diff): " u"Standard deviation of percentage change calculated for mean " u"values.\n" u":END" ) except (AttributeError, KeyError) as err: logging.error(f"The model is invalid, missing parameter: {repr(err)}") return # Prepare data to the table: tbl_dict = dict() for job, builds in table[u"data"].items(): for build in builds: for tst_name, tst_data in data[job][str(build)].items(): tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True) if tbl_dict.get(tst_name_mod, None) is None: name = tst_data[u'name'].rsplit(u'-', 1)[0] tbl_dict[tst_name_mod] = { u"name": name, u"ref-data": list(), u"cmp-data": list() } try: if table[u"include-tests"] == u"MRR": result = (tst_data[u"result"][u"receive-rate"], tst_data[u"result"][u"receive-stdev"]) elif table[u"include-tests"] == u"PDR": result = tst_data[u"throughput"][u"PDR"][u"LOWER"] elif table[u"include-tests"] == u"NDR": result = tst_data[u"throughput"][u"NDR"][u"LOWER"] else: continue if result and \ table[u"reference"][u"nic"] in tst_data[u"tags"]: tbl_dict[tst_name_mod][u"ref-data"].append(result) elif result and \ table[u"compare"][u"nic"] in tst_data[u"tags"]: tbl_dict[tst_name_mod][u"cmp-data"].append(result) except (TypeError, KeyError) as err: logging.debug(f"No data for {tst_name}\n{repr(err)}") # No data in output.xml for this test tbl_lst = list() for tst_name in tbl_dict: item = [tbl_dict[tst_name][u"name"], ] data_r = tbl_dict[tst_name][u"ref-data"] if data_r: if table[u"include-tests"] == u"MRR": data_r_mean = data_r[0][0] data_r_stdev = data_r[0][1] else: data_r_mean = mean(data_r) data_r_stdev = stdev(data_r) item.append(round(data_r_mean / 1e6, 1)) item.append(round(data_r_stdev / 1e6, 1)) else: data_r_mean = None data_r_stdev = None item.extend([None, None]) data_c = tbl_dict[tst_name][u"cmp-data"] if data_c: if table[u"include-tests"] == u"MRR": data_c_mean = data_c[0][0] data_c_stdev = data_c[0][1] else: data_c_mean = mean(data_c) data_c_stdev = stdev(data_c) item.append(round(data_c_mean / 1e6, 1)) item.append(round(data_c_stdev / 1e6, 1)) else: data_c_mean = None data_c_stdev = None item.extend([None, None]) if data_r_mean is not None and data_c_mean is not None: delta, d_stdev = relative_change_stdev( data_r_mean, data_c_mean, data_r_stdev, data_c_stdev ) try: item.append(round(delta)) except ValueError: item.append(delta) try: item.append(round(d_stdev)) except ValueError: item.append(d_stdev) tbl_lst.append(item) # Sort the table according to the relative change tbl_lst.sort(key=lambda rel: rel[-1], reverse=True) # Generate csv tables: with open(f"{table[u'output-file']}.csv", u"wt") as file_handler: file_handler.write(u";".join(header) + u"\n") for test in tbl_lst: file_handler.write(u";".join([str(item) for item in test]) + u"\n") convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv", f"{table[u'output-file']}.txt", delimiter=u";") with open(table[u'output-file'], u'a') as txt_file: txt_file.write(legend) # Generate html table: _tpc_generate_html_table( header, tbl_lst, table[u'output-file'], legend=legend, title=table.get(u"title", u"") ) def table_soak_vs_ndr(table, input_data): """Generate the table(s) with algorithm: table_soak_vs_ndr specified in the specification file. :param table: Table to generate. :param input_data: Data to process. :type table: pandas.Series :type input_data: InputData """ logging.info(f" Generating the table {table.get(u'title', u'')} ...") # Transform the data logging.info( f" Creating the data set for the {table.get(u'type', u'')} " f"{table.get(u'title', u'')}." ) data = input_data.filter_data(table, continue_on_error=True) # Prepare the header of the table try: header = [ u"Test Case", f"Avg({table[u'reference'][u'title']})", f"Stdev({table[u'reference'][u'title']})", f"Avg({table[u'compare'][u'title']})", f"Stdev{table[u'compare'][u'title']})", u"Diff", u"Stdev(Diff)" ] header_str = u";".join(header) + u"\n" legend = ( u"\nLegend:\n" f"Avg({table[u'reference'][u'title']}): " f"Mean value of {table[u'reference'][u'title']} [Mpps] computed " f"from a series of runs of the listed tests.\n" f"Stdev({table[u'reference'][u'title']}): " f"Standard deviation value of {table[u'reference'][u'title']} " f"[Mpps] computed from a series of runs of the listed tests.\n" f"Avg({table[u'compare'][u'title']}): " f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from " f"a series of runs of the listed tests.\n" f"Stdev({table[u'compare'][u'title']}): " f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] " f"computed from a series of runs of the listed tests.\n" f"Diff({table[u'reference'][u'title']}," f"{table[u'compare'][u'title']}): " f"Percentage change calculated for mean values.\n" u"Stdev(Diff): " u"Standard deviation of percentage change calculated for mean " u"values.\n" u":END" ) except (AttributeError, KeyError) as err: logging.error(f"The model is invalid, missing parameter: {repr(err)}") return # Create a list of available SOAK test results: tbl_dict = dict() for job, builds in table[u"compare"][u"data"].items(): for build in builds: for tst_name, tst_data in data[job][str(build)].items(): if tst_data[u"type"] == u"SOAK": tst_name_mod = tst_name.replace(u"-soak", u"") if tbl_dict.get(tst_name_mod, None) is None: groups = re.search(REGEX_NIC, tst_data[u"parent"]) nic = groups.group(0) if groups else u"" name = ( f"{nic}-" f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}" ) tbl_dict[tst_name_mod] = { u"name": name, u"ref-data": list(), u"cmp-data": list() } try: tbl_dict[tst_name_mod][u"cmp-data"].append( tst_data[u"throughput"][u"LOWER"]) except (KeyError, TypeError): pass tests_lst = tbl_dict.keys() # Add corresponding NDR test results: for job, builds in table[u"reference"][u"data"].items(): for build in builds: for tst_name, tst_data in data[job][str(build)].items(): tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\ replace(u"-mrr", u"") if tst_name_mod not in tests_lst: continue try: if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"): continue if table[u"include-tests"] == u"MRR": result = (tst_data[u"result"][u"receive-rate"], tst_data[u"result"][u"receive-stdev"]) elif table[u"include-tests"] == u"PDR": result = \ tst_data[u"throughput"][u"PDR"][u"LOWER"] elif table[u"include-tests"] == u"NDR": result = \ tst_data[u"throughput"][u"NDR"][u"LOWER"] else: result = None if result is not None: tbl_dict[tst_name_mod][u"ref-data"].append( result) except (KeyError, TypeError): continue tbl_lst = list() for tst_name in tbl_dict: item = [tbl_dict[tst_name][u"name"], ] data_r = tbl_dict[tst_name][u"ref-data"] if data_r: if table[u"include-tests"] == u"MRR": data_r_mean = data_r[0][0] data_r_stdev = data_r[0][1] else: data_r_mean = mean(data_r) data_r_stdev = stdev(data_r) item.append(round(data_r_mean / 1e6, 1)) item.append(round(data_r_stdev / 1e6, 1)) else: data_r_mean = None data_r_stdev = None item.extend([None, None]) data_c = tbl_dict[tst_name][u"cmp-data"] if data_c: if table[u"include-tests"] == u"MRR": data_c_mean = data_c[0][0] data_c_stdev = data_c[0][1] else: data_c_mean = mean(data_c) data_c_stdev = stdev(data_c) item.append(round(data_c_mean / 1e6, 1)) item.append(round(data_c_stdev / 1e6, 1)) else: data_c_mean = None data_c_stdev = None item.extend([None, None]) if data_r_mean is not None and data_c_mean is not None: delta, d_stdev = relative_change_stdev( data_r_mean, data_c_mean, data_r_stdev, data_c_stdev) try: item.append(round(delta)) except ValueError: item.append(delta) try: item.append(round(d_stdev)) except ValueError: item.append(d_stdev) tbl_lst.append(item) # Sort the table according to the relative change tbl_lst.sort(key=lambda rel: rel[-1], reverse=True) # Generate csv tables: csv_file = f"{table[u'output-file']}.csv" with open(csv_file, u"wt") as file_handler: file_handler.write(header_str) for test in tbl_lst: file_handler.write(u";".join([str(item) for item in test]) + u"\n") convert_csv_to_pretty_txt( csv_file, f"{table[u'output-file']}.txt", delimiter=u";" ) with open(f"{table[u'output-file']}.txt", u'a') as txt_file: txt_file.write(legend) # Generate html table: _tpc_generate_html_table( header, tbl_lst, table[u'output-file'], legend=legend, title=table.get(u"title", u"") ) def table_perf_trending_dash(table, input_data): """Generate the table(s) with algorithm: table_perf_trending_dash specified in the specification file. :param table: Table to generate. :param input_data: Data to process. :type table: pandas.Series :type input_data: InputData """ logging.info(f" Generating the table {table.get(u'title', u'')} ...") # Transform the data logging.info( f" Creating the data set for the {table.get(u'type', u'')} " f"{table.get(u'title', u'')}." ) data = input_data.filter_data(table, continue_on_error=True) # Prepare the header of the tables header = [ u"Test Case", u"Trend [Mpps]", u"Short-Term Change [%]", u"Long-Term Change [%]", u"Regressions [#]", u"Progressions [#]" ] header_str = u",".join(header) + u"\n" # Prepare data to the table: tbl_dict = dict() for job, builds in table[u"data"].items(): for build in builds: for tst_name, tst_data in data[job][str(build)].items(): if tst_name.lower() in table.get(u"ignore-list", list()): continue if tbl_dict.get(tst_name, None) is None: groups = re.search(REGEX_NIC, tst_data[u"parent"]) if not groups: continue nic = groups.group(0) tbl_dict[tst_name] = { u"name": f"{nic}-{tst_data[u'name']}", u"data": OrderedDict() } try: tbl_dict[tst_name][u"data"][str(build)] = \ tst_data[u"result"][u"receive-rate"] except (TypeError, KeyError): pass # No data in output.xml for this test tbl_lst = list() for tst_name in tbl_dict: data_t = tbl_dict[tst_name][u"data"] if len(data_t) < 2: continue classification_lst, avgs = classify_anomalies(data_t) win_size = min(len(data_t), table[u"window"]) long_win_size = min(len(data_t), table[u"long-trend-window"]) try: max_long_avg = max( [x for x in avgs[-long_win_size:-win_size] if not isnan(x)]) except ValueError: max_long_avg = nan last_avg = avgs[-1] avg_week_ago = avgs[max(-win_size, -len(avgs))] if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0: rel_change_last = nan else: rel_change_last = round( ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2) if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0: rel_change_long = nan else: rel_change_long = round( ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2) if classification_lst: if isnan(rel_change_last) and isnan(rel_change_long): continue if isnan(last_avg) or isnan(rel_change_last) or \ isnan(rel_change_long): continue tbl_lst.append( [tbl_dict[tst_name][u"name"], round(last_avg / 1e6, 2), rel_change_last, rel_change_long, classification_lst[-win_size:].count(u"regression"), classification_lst[-win_size:].count(u"progression")]) tbl_lst.sort(key=lambda rel: rel[0]) tbl_sorted = list() for nrr in range(table[u"window"], -1, -1): tbl_reg = [item for item in tbl_lst if item[4] == nrr] for nrp in range(table[u"window"], -1, -1): tbl_out = [item for item in tbl_reg if item[5] == nrp] tbl_out.sort(key=lambda rel: rel[2]) tbl_sorted.extend(tbl_out) file_name = f"{table[u'output-file']}{table[u'output-file-ext']}" logging.info(f" Writing file: {file_name}") with open(file_name, u"wt") as file_handler: file_handler.write(header_str) for test in tbl_sorted: file_handler.write(u",".join([str(item) for item in test]) + u'\n') logging.info(f" Writing file: {table[u'output-file']}.txt") convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt") def _generate_url(testbed, test_name): """Generate URL to a trending plot from the name of the test case. :param testbed: The testbed used for testing. :param test_name: The name of the test case. :type testbed: str :type test_name: str :returns: The URL to the plot with the trending data for the given test case. :rtype str """ if u"x520" in test_name: nic = u"x520" elif u"x710" in test_name: nic = u"x710" elif u"xl710" in test_name: nic = u"xl710" elif u"xxv710" in test_name: nic = u"xxv710" elif u"vic1227" in test_name: nic = u"vic1227" elif u"vic1385" in test_name: nic = u"vic1385" elif u"x553" in test_name: nic = u"x553" elif u"cx556" in test_name or u"cx556a" in test_name: nic = u"cx556a" else: nic = u"" if u"64b" in test_name: frame_size = u"64b" elif u"78b" in test_name: frame_size = u"78b" elif u"imix" in test_name: frame_size = u"imix" elif u"9000b" in test_name: frame_size = u"9000b" elif u"1518b" in test_name: frame_size = u"1518b" elif u"114b" in test_name: frame_size = u"114b" else: frame_size = u"" if u"1t1c" in test_name or \ (u"-1c-" in test_name and testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")): cores = u"1t1c" elif u"2t2c" in test_name or \ (u"-2c-" in test_name and testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")): cores = u"2t2c" elif u"4t4c" in test_name or \ (u"-4c-" in test_name and testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")): cores = u"4t4c" elif u"2t1c" in test_name or \ (u"-1c-" in test_name and testbed in (u"2n-skx", u"3n-skx", u"2n-clx")): cores = u"2t1c" elif u"4t2c" in test_name or \ (u"-2c-" in test_name and testbed in (u"2n-skx", u"3n-skx", u"2n-clx")): cores = u"4t2c" elif u"8t4c" in test_name or \ (u"-4c-" in test_name and testbed in (u"2n-skx", u"3n-skx", u"2n-clx")): cores = u"8t4c" else: cores = u"" if u"testpmd" in test_name: driver = u"testpmd" elif u"l3fwd" in test_name: driver = u"l3fwd" elif u"avf" in test_name: driver = u"avf" elif u"rdma" in test_name: driver = u"rdma" elif u"dnv" in testbed or u"tsh" in testbed: driver = u"ixgbe" else: driver = u"dpdk" if u"acl" in test_name or \ u"macip" in test_name or \ u"nat" in test_name or \ u"policer" in test_name or \ u"cop" in test_name: bsf = u"features" elif u"scale" in test_name: bsf = u"scale" elif u"base" in test_name: bsf = u"base" else: bsf = u"base" if u"114b" in test_name and u"vhost" in test_name: domain = u"vts" elif u"testpmd" in test_name or u"l3fwd" in test_name: domain = u"dpdk" elif u"memif" in test_name: domain = u"container_memif" elif u"srv6" in test_name: domain = u"srv6" elif u"vhost" in test_name: domain = u"vhost" if u"vppl2xc" in test_name: driver += u"-vpp" else: driver += u"-testpmd" if u"lbvpplacp" in test_name: bsf += u"-link-bonding" elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name: domain = u"nf_service_density_vnfc" elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name: domain = u"nf_service_density_cnfc" elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name: domain = u"nf_service_density_cnfp" elif u"ipsec" in test_name: domain = u"ipsec" if u"sw" in test_name: bsf += u"-sw" elif u"hw" in test_name: bsf += u"-hw" elif u"ethip4vxlan" in test_name: domain = u"ip4_tunnels" elif u"ip4base" in test_name or u"ip4scale" in test_name: domain = u"ip4" elif u"ip6base" in test_name or u"ip6scale" in test_name: domain = u"ip6" elif u"l2xcbase" in test_name or \ u"l2xcscale" in test_name or \ u"l2bdbasemaclrn" in test_name or \ u"l2bdscale" in test_name or \ u"l2patch" in test_name: domain = u"l2" else: domain = u"" file_name = u"-".join((domain, testbed, nic)) + u".html#" anchor_name = u"-".join((frame_size, cores, bsf, driver)) return file_name + anchor_name def table_perf_trending_dash_html(table, input_data): """Generate the table(s) with algorithm: table_perf_trending_dash_html specified in the specification file. :param table: Table to generate. :param input_data: Data to process. :type table: dict :type input_data: InputData """ _ = input_data if not table.get(u"testbed", None): logging.error( f"The testbed is not defined for the table " f"{table.get(u'title', u'')}." ) return logging.info(f" Generating the table {table.get(u'title', u'')} ...") try: with open(table[u"input-file"], u'rt') as csv_file: csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"')) except KeyError: logging.warning(u"The input file is not defined.") return except csv.Error as err: logging.warning( f"Not possible to process the file {table[u'input-file']}.\n" f"{repr(err)}" ) return # Table: dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0')) # Table header: trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7")) for idx, item in enumerate(csv_lst[0]): alignment = u"left" if idx == 0 else u"center" thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment)) thead.text = item # Rows: colors = { u"regression": ( u"#ffcccc", u"#ff9999" ), u"progression": ( u"#c6ecc6", u"#9fdf9f" ), u"normal": ( u"#e9f1fb", u"#d4e4f7" ) } for r_idx, row in enumerate(csv_lst[1:]): if int(row[4]): color = u"regression" elif int(row[5]): color = u"progression" else: color = u"normal" trow = ET.SubElement( dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2]) ) # Columns: for c_idx, item in enumerate(row): tdata = ET.SubElement( trow, u"td", attrib=dict(align=u"left" if c_idx == 0 else u"center") ) # Name: if c_idx == 0: ref = ET.SubElement( tdata, u"a", attrib=dict( href=f"../trending/" f"{_generate_url(table.get(u'testbed', ''), item)}" ) ) ref.text = item else: tdata.text = item try: with open(table[u"output-file"], u'w') as html_file: logging.info(f" Writing file: {table[u'output-file']}") html_file.write(u".. raw:: html\n\n\t") html_file.write(str(ET.tostring(dashboard, encoding=u"unicode"))) html_file.write(u"\n\t