aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools
diff options
context:
space:
mode:
authorTibor Frank <tifrank@cisco.com>2020-02-04 09:49:54 +0100
committerTibor Frank <tifrank@cisco.com>2020-02-07 09:31:44 +0000
commite13113b1e38772d9513a22750122799367554eb8 (patch)
tree251a0498db06ab8b0d81628e0736e81ad0c3e759 /resources/tools
parente1f2f97b23ec5ae55336d0735ea6fe6a5dd2ba24 (diff)
PAL: Operational data
Change-Id: I43eb12117ddc9b6df447f0303881bf7ec6153900 Signed-off-by: Tibor Frank <tifrank@cisco.com> (cherry picked from commit 2c06090c7b919783a91d3ca9ca8c8319b5c35b07)
Diffstat (limited to 'resources/tools')
-rw-r--r--resources/tools/presentation/generator_files.py83
-rw-r--r--resources/tools/presentation/generator_tables.py197
-rw-r--r--resources/tools/presentation/input_data_parser.py360
-rw-r--r--resources/tools/presentation/pal.py6
-rw-r--r--resources/tools/presentation/specification.yaml149
5 files changed, 532 insertions, 263 deletions
diff --git a/resources/tools/presentation/generator_files.py b/resources/tools/presentation/generator_files.py
index fdd364fc12..4543a6ee2b 100644
--- a/resources/tools/presentation/generator_files.py
+++ b/resources/tools/presentation/generator_files.py
@@ -41,7 +41,8 @@ def generate_files(spec, data):
"""
generator = {
- u"file_test_results": file_test_results
+ u"file_test_results": file_test_results,
+ u"file_test_results_html": file_test_results_html
}
logging.info(u"Generating the files ...")
@@ -144,3 +145,83 @@ def file_test_results(file_spec, input_data):
file_html=tbl_file.split(u"/")[-1]))
logging.info(u" Done.")
+
+
+def file_test_results_html(file_spec, input_data):
+ """Generate the file(s) with algorithms
+ - file_test_results_html
+ specified in the specification file.
+
+ :param file_spec: File to generate.
+ :param input_data: Data to process.
+ :type file_spec: pandas.Series
+ :type input_data: InputData
+ """
+
+ file_name = f"{file_spec[u'output-file']}.rst"
+ rst_header = file_spec[u"file-header"]
+
+ logging.info(f" Generating the file {file_name} ...")
+
+ table_lst = get_files(file_spec[u"dir-tables"], u".rst", full_path=True)
+ if not table_lst:
+ logging.error(
+ f" No tables to include in {file_spec[u'dir-tables']}. Skipping."
+ )
+ return
+
+ logging.info(f" Writing file {file_name}")
+
+ logging.info(
+ f" Creating the tests data set for the "
+ f"{file_spec.get(u'type', u'')} {file_spec.get(u'title', u'')}."
+ )
+
+ tests = input_data.filter_data(
+ file_spec,
+ params=[u"name", u"parent", u"doc", u"type", u"level"],
+ continue_on_error=True
+ )
+ if tests.empty:
+ return
+ tests = input_data.merge_data(tests)
+ tests.sort_index(inplace=True)
+
+ suites = input_data.filter_data(
+ file_spec,
+ continue_on_error=True,
+ data_set=u"suites"
+ )
+ if suites.empty:
+ return
+ suites = input_data.merge_data(suites)
+
+ with open(file_name, u"wt") as file_handler:
+ file_handler.write(rst_header)
+ for suite_longname, suite in suites.items():
+ if len(suite_longname.split(u".")) <= \
+ file_spec[u"data-start-level"]:
+ continue
+
+ title_line = \
+ get_rst_title_char(
+ suite[u"level"] - file_spec[u"data-start-level"] - 1
+ ) * len(suite[u"name"])
+ if not (u"-ndrpdr" in suite[u"name"] or
+ u"-mrr" in suite[u"name"] or
+ u"-func" in suite[u"name"] or
+ u"-device" in suite[u"name"]):
+ file_handler.write(f"\n{suite[u'name']}\n{title_line}\n")
+
+ if _tests_in_suite(suite[u"name"], tests):
+ file_handler.write(f"\n{suite[u'name']}\n{title_line}\n")
+ file_handler.write(
+ f"\n{suite[u'doc']}\n".replace(u'|br|', u'\n\n -')
+ )
+ for tbl_file in table_lst:
+ if suite[u"name"] in tbl_file:
+ file_handler.write(
+ f".. include:: {tbl_file.split(u'/')[-1]}"
+ )
+
+ logging.info(u" Done.")
diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py
index 83ae43251e..90a8c2c1c4 100644
--- a/resources/tools/presentation/generator_tables.py
+++ b/resources/tools/presentation/generator_tables.py
@@ -57,7 +57,8 @@ def generate_tables(spec, data):
u"table_perf_trending_dash_html": table_perf_trending_dash_html,
u"table_last_failed_tests": table_last_failed_tests,
u"table_failed_tests": table_failed_tests,
- u"table_failed_tests_html": table_failed_tests_html
+ u"table_failed_tests_html": table_failed_tests_html,
+ u"table_oper_data_html": table_oper_data_html
}
logging.info(u"Generating the tables ...")
@@ -72,6 +73,200 @@ def generate_tables(spec, data):
logging.info(u"Done.")
+def table_oper_data_html(table, input_data):
+ """Generate the table(s) with algorithm: html_table_oper_data
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ logging.info(f" Generating the table {table.get(u'title', u'')} ...")
+ # Transform the data
+ logging.info(
+ f" Creating the data set for the {table.get(u'type', u'')} "
+ f"{table.get(u'title', u'')}."
+ )
+ data = input_data.filter_data(
+ table,
+ params=[u"name", u"parent", u"show-run", u"type"],
+ continue_on_error=True
+ )
+ if data.empty:
+ return
+ data = input_data.merge_data(data)
+ data.sort_index(inplace=True)
+
+ suites = input_data.filter_data(
+ table,
+ continue_on_error=True,
+ data_set=u"suites"
+ )
+ if suites.empty:
+ return
+ suites = input_data.merge_data(suites)
+
+ def _generate_html_table(tst_data):
+ """Generate an HTML table with operational data for the given test.
+
+ :param tst_data: Test data to be used to generate the table.
+ :type tst_data: pandas.Series
+ :returns: HTML table with operational data.
+ :rtype: str
+ """
+
+ colors = {
+ u"header": u"#7eade7",
+ u"empty": u"#ffffff",
+ u"body": (u"#e9f1fb", u"#d4e4f7")
+ }
+
+ tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
+
+ trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
+ thead = ET.SubElement(
+ trow, u"th", attrib=dict(align=u"left", colspan=u"6")
+ )
+ thead.text = tst_data[u"name"]
+
+ trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
+ thead = ET.SubElement(
+ trow, u"th", attrib=dict(align=u"left", colspan=u"6")
+ )
+ thead.text = u"\t"
+
+ if tst_data.get(u"show-run", u"No Data") == u"No Data":
+ trow = ET.SubElement(
+ tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
+ )
+ tcol = ET.SubElement(
+ trow, u"td", attrib=dict(align=u"left", colspan=u"6")
+ )
+ tcol.text = u"No Data"
+ return str(ET.tostring(tbl, encoding=u"unicode"))
+
+ tbl_hdr = (
+ u"Name",
+ u"Nr of Vectors",
+ u"Nr of Packets",
+ u"Suspends",
+ u"Cycles per Packet",
+ u"Average Vector Size"
+ )
+
+ for dut_name, dut_data in tst_data[u"show-run"].items():
+ trow = ET.SubElement(
+ tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
+ )
+ tcol = ET.SubElement(
+ trow, u"td", attrib=dict(align=u"left", colspan=u"6")
+ )
+ if dut_data.get(u"threads", None) is None:
+ tcol.text = u"No Data"
+ continue
+ bold = ET.SubElement(tcol, u"b")
+ bold.text = dut_name
+
+ trow = ET.SubElement(
+ tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0])
+ )
+ tcol = ET.SubElement(
+ trow, u"td", attrib=dict(align=u"left", colspan=u"6")
+ )
+ bold = ET.SubElement(tcol, u"b")
+ bold.text = (
+ f"Host IP: {dut_data.get(u'host', '')}, "
+ f"Socket: {dut_data.get(u'socket', '')}"
+ )
+ trow = ET.SubElement(
+ tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
+ )
+ thead = ET.SubElement(
+ trow, u"th", attrib=dict(align=u"left", colspan=u"6")
+ )
+ thead.text = u"\t"
+
+ for thread_nr, thread in dut_data[u"threads"].items():
+ trow = ET.SubElement(
+ tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
+ )
+ tcol = ET.SubElement(
+ trow, u"td", attrib=dict(align=u"left", colspan=u"6")
+ )
+ bold = ET.SubElement(tcol, u"b")
+ bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
+ trow = ET.SubElement(
+ tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
+ )
+ for idx, col in enumerate(tbl_hdr):
+ tcol = ET.SubElement(
+ trow, u"td",
+ attrib=dict(align=u"right" if idx else u"left")
+ )
+ font = ET.SubElement(
+ tcol, u"font", attrib=dict(size=u"2")
+ )
+ bold = ET.SubElement(font, u"b")
+ bold.text = col
+ for row_nr, row in enumerate(thread):
+ trow = ET.SubElement(
+ tbl, u"tr",
+ attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
+ )
+ for idx, col in enumerate(row):
+ tcol = ET.SubElement(
+ trow, u"td",
+ attrib=dict(align=u"right" if idx else u"left")
+ )
+ font = ET.SubElement(
+ tcol, u"font", attrib=dict(size=u"2")
+ )
+ if isinstance(col, float):
+ font.text = f"{col:.2f}"
+ else:
+ font.text = str(col)
+ trow = ET.SubElement(
+ tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
+ )
+ thead = ET.SubElement(
+ trow, u"th", attrib=dict(align=u"left", colspan=u"6")
+ )
+ thead.text = u"\t"
+
+ trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
+ thead = ET.SubElement(
+ trow, u"th", attrib=dict(align=u"left", colspan=u"6")
+ )
+ font = ET.SubElement(
+ thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
+ )
+ font.text = u"."
+
+ return str(ET.tostring(tbl, encoding=u"unicode"))
+
+ for suite in suites.values:
+ html_table = str()
+ for test_data in data.values:
+ if test_data[u"parent"] not in suite[u"name"]:
+ continue
+ html_table += _generate_html_table(test_data)
+ if not html_table:
+ continue
+ try:
+ file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
+ with open(f"{file_name}", u'w') as html_file:
+ logging.info(f" Writing file: {file_name}")
+ html_file.write(u".. raw:: html\n\n\t")
+ html_file.write(html_table)
+ html_file.write(u"\n\t<p><br><br></p>\n")
+ except KeyError:
+ logging.warning(u"The output file is not defined.")
+ return
+ logging.info(u" Done.")
+
+
def table_details(table, input_data):
"""Generate the table(s) with algorithm: table_detailed_test_results
specified in the specification file.
diff --git a/resources/tools/presentation/input_data_parser.py b/resources/tools/presentation/input_data_parser.py
index 268224f0c6..ddfd96c77f 100644
--- a/resources/tools/presentation/input_data_parser.py
+++ b/resources/tools/presentation/input_data_parser.py
@@ -201,14 +201,24 @@ class ExecutionChecker(ResultVisitor):
.. note:: ID is the lowercase full path to the test.
"""
- REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
- r'PLRsearch upper bound::?\s(\d+.\d+)')
-
- REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
- r'NDR_UPPER:\s(\d+.\d+).*\n'
- r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
- r'PDR_UPPER:\s(\d+.\d+)')
-
+ REGEX_PLR_RATE = re.compile(
+ r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
+ r'PLRsearch upper bound::?\s(\d+.\d+)'
+ )
+ REGEX_NDRPDR_RATE = re.compile(
+ r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
+ r'NDR_UPPER:\s(\d+.\d+).*\n'
+ r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
+ r'PDR_UPPER:\s(\d+.\d+)'
+ )
+ REGEX_PERF_MSG_INFO = re.compile(
+ r'NDR_LOWER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
+ r'LATENCY.*\[\'(.*)\', \'(.*)\'\].*\n'
+ r'NDR_UPPER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
+ r'PDR_LOWER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
+ r'LATENCY.*\[\'(.*)\', \'(.*)\'\].*\n'
+ r'PDR_UPPER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*)'
+ )
# TODO: Remove when not needed
REGEX_NDRPDR_LAT_BASE = re.compile(
r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
@@ -234,26 +244,30 @@ class ExecutionChecker(ResultVisitor):
r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
r'Latency.*\[\'(.*)\', \'(.*)\'\]'
)
-
- REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
- r'[\D\d]*')
-
- REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
- r"VPP Version:\s*|VPP version:\s*)(.*)")
-
- REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
-
- REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s(\d*).*$')
-
- REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
- r'tx\s(\d*),\srx\s(\d*)')
-
- REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
- r' in packets per second: \[(.*)\]')
-
- REGEX_RECONF_LOSS = re.compile(r'Packets lost due to reconfig: (\d*)')
- REGEX_RECONF_TIME = re.compile(r'Implied time lost: (\d*.[\de-]*)')
-
+ REGEX_VERSION_VPP = re.compile(
+ r"(return STDOUT Version:\s*|"
+ r"VPP Version:\s*|VPP version:\s*)(.*)"
+ )
+ REGEX_VERSION_DPDK = re.compile(
+ r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
+ )
+ REGEX_TCP = re.compile(
+ r'Total\s(rps|cps|throughput):\s(\d*).*$'
+ )
+ REGEX_MRR = re.compile(
+ r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
+ r'tx\s(\d*),\srx\s(\d*)'
+ )
+ REGEX_BMRR = re.compile(
+ r'Maximum Receive Rate trial results'
+ r' in packets per second: \[(.*)\]'
+ )
+ REGEX_RECONF_LOSS = re.compile(
+ r'Packets lost due to reconfig: (\d*)'
+ )
+ REGEX_RECONF_TIME = re.compile(
+ r'Implied time lost: (\d*.[\de-]*)'
+ )
REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
@@ -299,14 +313,8 @@ class ExecutionChecker(ResultVisitor):
# 0 - no message
# 1 - PAPI History of DUT1
# 2 - PAPI History of DUT2
- self._lookup_kw_nr = 0
self._conf_history_lookup_nr = 0
- # Number of Show Running messages found
- # 0 - no message
- # 1 - Show run message found
- self._show_run_lookup_nr = 0
-
# Test ID of currently processed test- the lowercase full path to the
# test
self._test_id = None
@@ -344,6 +352,89 @@ class ExecutionChecker(ResultVisitor):
"""
return self._data
+ def _get_data_from_perf_test_msg(self, msg):
+ """Get
+ - NDR_LOWER
+ - LATENCY
+ - NDR_UPPER
+ - PDR_LOWER
+ - LATENCY
+ - PDR_UPPER
+ from message of NDRPDR performance tests.
+
+ :param msg: Message to be processed.
+ :type msg: str
+ :returns: Processed message or original message if a problem occurs.
+ :rtype: str
+ """
+
+ groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
+ if not groups or groups.lastindex != 20:
+ return msg
+
+ try:
+ data = {
+ u"ndr_low": float(groups.group(1)),
+ u"ndr_low_unit": groups.group(2),
+ u"ndr_low_b": float(groups.group(3)),
+ u"ndr_low_b_unit": groups.group(4),
+ u"ndr_lat_1": groups.group(5),
+ u"ndr_lat_2": groups.group(6),
+ u"ndr_up": float(groups.group(7)),
+ u"ndr_up_unit": groups.group(8),
+ u"ndr_up_b": float(groups.group(9)),
+ u"ndr_up_b_unit": groups.group(10),
+ u"pdr_low": float(groups.group(11)),
+ u"pdr_low_unit": groups.group(12),
+ u"pdr_low_b": float(groups.group(13)),
+ u"pdr_low_b_unit": groups.group(14),
+ u"pdr_lat_1": groups.group(15),
+ u"pdr_lat_2": groups.group(16),
+ u"pdr_up": float(groups.group(17)),
+ u"pdr_up_unit": groups.group(18),
+ u"pdr_up_b": float(groups.group(19)),
+ u"pdr_up_b_unit": groups.group(20)
+ }
+ except (AttributeError, IndexError, ValueError, KeyError):
+ return msg
+
+ def _process_lat(in_str):
+ """Extract min, avg, max values from latency string.
+
+ :param in_str: Latency string produced by robot framework.
+ :type in_str: str
+ :returns: Processed latency string or original string if a problem
+ occurs.
+ :rtype: str
+ """
+ in_list = in_str.split('/', 3)
+ if len(in_list) < 3:
+ return in_str
+
+ return f"min={in_list[0]}, avg={in_list[1]}, max={in_list[2]}"
+
+ try:
+ return (
+ f"NDR Lower: {(data[u'ndr_low'] / 1e6):.2f}"
+ f"M{data[u'ndr_low_unit']}, "
+ f"{data[u'ndr_low_b']:.2f}{data[u'ndr_low_b_unit']}\n"
+ # f"NDR Upper: {(data[u'ndr_up'] / 1e6):.2f}"
+ # f"M{data[u'ndr_up_unit']}, "
+ # f"{data[u'ndr_up_b']:.2f}{data[u'ndr_up_b_unit']}\n"
+ f"NDR Latency W-E: {_process_lat(data[u'ndr_lat_1'])}\n"
+ f"NDR Latency E-W: {_process_lat(data[u'ndr_lat_2'])}\n"
+ f"PDR Lower: {(data[u'pdr_low'] / 1e6):.2f}"
+ f"M{data[u'pdr_low_unit']}, "
+ f"{data[u'pdr_low_b']:.2f}{data[u'pdr_low_b_unit']}\n"
+ # f"PDR Upper: {(data[u'pdr_up'] / 1e6):.2f}"
+ # f"M{data[u'pdr_up_unit']}, "
+ # f"{data[u'pdr_up_b']:.2f}{data[u'pdr_up_b_unit']}\n"
+ f"PDR Latency W-E: {_process_lat(data[u'pdr_lat_1'])}\n"
+ f"PDR Latency E-W: {_process_lat(data[u'pdr_lat_2'])}"
+ )
+ except (AttributeError, IndexError, ValueError, KeyError):
+ return msg
+
def _get_testbed(self, msg):
"""Called when extraction of testbed IP is required.
The testbed is identified by TG node IP address.
@@ -465,96 +556,70 @@ class ExecutionChecker(ResultVisitor):
:returns: Nothing.
"""
+ if not msg.message.count(u"stats runtime"):
+ return
+
if u"show-run" not in self._data[u"tests"][self._test_id].keys():
- self._data[u"tests"][self._test_id][u"show-run"] = str()
+ self._data[u"tests"][self._test_id][u"show-run"] = dict()
- if msg.message.count(u"stats runtime") or \
- msg.message.count(u"Runtime"):
- try:
- host = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).
- group(1))
- except (AttributeError, IndexError):
- host = self._data[u"tests"][self._test_id][u"show-run"].\
- count(u"DUT:") + 1
- try:
- socket = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).
- group(2))
- socket = f"/{socket}"
- except (AttributeError, IndexError):
- socket = u""
- runtime = loads(
- str(msg.message).
- replace(u' ', u'').
- replace(u'\n', u'').
- replace(u"'", u'"').
- replace(u'b"', u'"').
- replace(u'u"', u'"').
- split(u":", 1)[1]
- )
- try:
- threads_nr = len(runtime[0][u"clocks"])
- except (IndexError, KeyError):
- return
- tbl_hdr = [
- u"Name",
- u"Calls",
- u"Vectors",
- u"Suspends",
- u"Clocks",
- u"Vectors/Calls"
- ]
- table = [[tbl_hdr, ] for _ in range(threads_nr)]
- for item in runtime:
- for idx in range(threads_nr):
- name = format(item[u"name"])
- calls = format(item[u"calls"][idx])
- vectors = format(item[u"vectors"][idx])
- suspends = format(item[u"suspends"][idx])
- if item[u"vectors"][idx] > 0:
- clocks = format(
- item[u"clocks"][idx]/item[u"vectors"][idx], u".2e")
- elif item[u"calls"][idx] > 0:
- clocks = format(
- item[u"clocks"][idx]/item[u"calls"][idx], u".2e")
- elif item[u"suspends"][idx] > 0:
- clocks = format(
- item[u"clocks"][idx]/item[u"suspends"][idx], u".2e")
- else:
- clocks = 0
- if item[u"calls"][idx] > 0:
- vectors_call = format(
- item[u"vectors"][idx]/item[u"calls"][idx], u".2f")
- else:
- vectors_call = format(0, u".2f")
- if int(calls) + int(vectors) + int(suspends):
- table[idx].append([
- name, calls, vectors, suspends, clocks, vectors_call
- ])
- text = ""
+ groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
+ if not groups:
+ return
+ try:
+ host = groups.group(1)
+ except (AttributeError, IndexError):
+ host = u""
+ try:
+ sock = groups.group(2)
+ except (AttributeError, IndexError):
+ sock = u""
+
+ runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
+ replace(u"'", u'"').replace(u'b"', u'"').
+ replace(u'u"', u'"').split(u":", 1)[1])
+
+ try:
+ threads_nr = len(runtime[0][u"clocks"])
+ except (IndexError, KeyError):
+ return
+
+ dut = u"DUT{nr}".format(
+ nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
+
+ oper = {
+ u"host": host,
+ u"socket": sock,
+ u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
+ }
+
+ for item in runtime:
for idx in range(threads_nr):
- text += f"Thread {idx} "
- text += u"vpp_main\n" if idx == 0 else f"vpp_wk_{idx-1}\n"
- txt_table = None
- for row in table[idx]:
- if txt_table is None:
- txt_table = prettytable.PrettyTable(row)
- else:
- if any(row[1:]):
- txt_table.add_row(row)
- txt_table.set_style(prettytable.MSWORD_FRIENDLY)
- txt_table.align[u"Name"] = u"l"
- txt_table.align[u"Calls"] = u"r"
- txt_table.align[u"Vectors"] = u"r"
- txt_table.align[u"Suspends"] = u"r"
- txt_table.align[u"Clocks"] = u"r"
- txt_table.align[u"Vectors/Calls"] = u"r"
-
- text += txt_table.get_string(sortby=u"Name") + u'\n'
- text = f"\n**DUT: {host}{socket}**\n{text}".\
- replace(u'\n', u' |br| ').\
- replace(u'\r', u'').\
- replace(u'"', u"'")
- self._data[u"tests"][self._test_id][u"show-run"] += text
+ if item[u"vectors"][idx] > 0:
+ clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
+ elif item[u"calls"][idx] > 0:
+ clocks = item[u"clocks"][idx] / item[u"calls"][idx]
+ elif item[u"suspends"][idx] > 0:
+ clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
+ else:
+ clocks = 0.0
+
+ if item[u"calls"][idx] > 0:
+ vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
+ else:
+ vectors_call = 0.0
+
+ if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
+ int(item[u"suspends"][idx]):
+ oper[u"threads"][idx].append([
+ item[u"name"],
+ item[u"calls"][idx],
+ item[u"vectors"][idx],
+ item[u"suspends"][idx],
+ clocks,
+ vectors_call
+ ])
+
+ self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
def _get_ndrpdr_throughput(self, msg):
"""Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
@@ -851,7 +916,7 @@ class ExecutionChecker(ResultVisitor):
replace(u'\r', u'').\
replace(u'[', u' |br| [').\
replace(u' |br| [', u'[', 1)
- test_result[u"msg"] = test.message.\
+ test_result[u"msg"] = self._get_data_from_perf_test_msg(test.message).\
replace(u'\n', u' |br| ').\
replace(u'\r', u'').\
replace(u'"', u"'")
@@ -973,10 +1038,8 @@ class ExecutionChecker(ResultVisitor):
if keyword.type == u"setup":
self.visit_setup_kw(keyword)
elif keyword.type == u"teardown":
- self._lookup_kw_nr = 0
self.visit_teardown_kw(keyword)
else:
- self._lookup_kw_nr = 0
self.visit_test_kw(keyword)
except AttributeError:
pass
@@ -1012,8 +1075,6 @@ class ExecutionChecker(ResultVisitor):
"""
if test_kw.name.count(u"Show Runtime On All Duts") or \
test_kw.name.count(u"Show Runtime Counters On All Duts"):
- self._lookup_kw_nr += 1
- self._show_run_lookup_nr = 0
self._msg_type = u"test-show-runtime"
elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
self._msg_type = u"dpdk-version"
@@ -1477,7 +1538,9 @@ class InputData:
"""
try:
- if element[u"filter"] in (u"all", u"template"):
+ if data_set == "suites":
+ cond = u"True"
+ elif element[u"filter"] in (u"all", u"template"):
cond = u"True"
else:
cond = InputData._condition(element[u"filter"])
@@ -1659,3 +1722,46 @@ class InputData:
merged_data[item_id] = item_data
return merged_data
+
+ def print_all_oper_data(self):
+ """Print all operational data to console.
+ """
+
+ tbl_hdr = (
+ u"Name",
+ u"Nr of Vectors",
+ u"Nr of Packets",
+ u"Suspends",
+ u"Cycles per Packet",
+ u"Average Vector Size"
+ )
+
+ for job in self._input_data.values:
+ for build in job.values:
+ for test_id, test_data in build[u"tests"].items():
+ print(f"{test_id}")
+ if test_data.get(u"show-run", None) is None:
+ continue
+ for dut_name, data in test_data[u"show-run"].items():
+ if data.get(u"threads", None) is None:
+ continue
+ print(f"Host IP: {data.get(u'host', '')}, "
+ f"Socket: {data.get(u'socket', '')}")
+ for thread_nr, thread in data[u"threads"].items():
+ txt_table = prettytable.PrettyTable(tbl_hdr)
+ avg = 0.0
+ for row in thread:
+ txt_table.add_row(row)
+ avg += row[-1]
+ if len(thread) == 0:
+ avg = u""
+ else:
+ avg = f", Average Vector Size per Node: " \
+ f"{(avg / len(thread)):.2f}"
+ th_name = u"main" if thread_nr == 0 \
+ else f"worker_{thread_nr}"
+ print(f"{dut_name}, {th_name}{avg}")
+ txt_table.float_format = u".2"
+ txt_table.align = u"r"
+ txt_table.align[u"Name"] = u"l"
+ print(f"{txt_table.get_string()}\n")
diff --git a/resources/tools/presentation/pal.py b/resources/tools/presentation/pal.py
index 01eb8f64ff..02aae66702 100644
--- a/resources/tools/presentation/pal.py
+++ b/resources/tools/presentation/pal.py
@@ -61,6 +61,10 @@ def parse_args():
parser.add_argument(u"-f", u"--force",
action=u"store_true",
help=u"Force removing the old build(s) if present.")
+ parser.add_argument(u"-o", u"--print-all-oper-data",
+ action=u"store_true",
+ help=u"Print all operational data to console. Be "
+ u"careful, the output can be really long.")
return parser.parse_args()
@@ -103,6 +107,8 @@ def main():
data = InputData(spec)
data.download_and_parse_data(repeat=1)
+ if args.print_all_oper_data:
+ data.print_all_oper_data()
generate_tables(spec, data)
generate_plots(spec, data)
diff --git a/resources/tools/presentation/specification.yaml b/resources/tools/presentation/specification.yaml
index dacd2bfa5d..7f4b1c135f 100644
--- a/resources/tools/presentation/specification.yaml
+++ b/resources/tools/presentation/specification.yaml
@@ -4202,135 +4202,58 @@
# Test Operational Data - VPP Performance Operational Data 3n-hsw
- type: "table"
title: "Test Operational Data - VPP Performance Operational Data 3n-hsw"
- algorithm: "table_merged_details"
- output-file-ext: ".csv"
+ algorithm: "table_oper_data_html"
output-file: "{DIR[DTO,PERF,VPP,3N,HSW]}/vpp_test_operational_3n_hsw"
- columns:
- - title: "Name"
- data: "data name"
- - title: "VPP Operational Data - Outputs of 'show runtime' at NDR packet rate"
- data: "data show-run"
- rows: "generated"
data: "vpp-perf-results-3n-hsw"
- filter: "not ('CFS_OPT' or ('NIC_Intel-X520-DA2' and 'IPSECHW') or ('NIC_Cisco-VIC-1385' and '9000B'))"
- parameters:
- - "parent"
- - "name"
- - "show-run"
+ filter: "'NDRPDR'"
# Test Operational Data - VPP Performance Operational Data 3n-skx
- type: "table"
title: "Test Operational Data - VPP Performance Operational Data 3n-skx"
- algorithm: "table_merged_details"
- output-file-ext: ".csv"
+ algorithm: "table_oper_data_html"
output-file: "{DIR[DTO,PERF,VPP,3N,SKX]}/vpp_test_operational_3n_skx"
- columns:
- - title: "Name"
- data: "data name"
- - title: "VPP Operational Data - Outputs of 'show runtime' at NDR packet rate"
- data: "data show-run"
- rows: "generated"
data: "vpp-perf-results-3n-skx"
filter: "not 'CFS_OPT'"
- parameters:
- - "parent"
- - "name"
- - "show-run"
# Test Operational Data - VPP Performance Operational Data 2n-skx
- type: "table"
title: "Test Operational Data - VPP Performance Operational Data 2n-skx"
- algorithm: "table_merged_details"
- output-file-ext: ".csv"
+ algorithm: "table_oper_data_html"
output-file: "{DIR[DTO,PERF,VPP,2N,SKX]}/vpp_test_operational_2n_skx"
- columns:
- - title: "Name"
- data: "data name"
- - title: "VPP Operational Data - Outputs of 'show runtime' at NDR packet rate"
- data: "data show-run"
- rows: "generated"
data: "vpp-perf-results-2n-skx"
filter: "not 'CFS_OPT'"
- parameters:
- - "parent"
- - "name"
- - "show-run"
# Test Operational Data - VPP Performance Operational Data 2n-clx
- type: "table"
title: "Test Operational Data - VPP Performance Operational Data 2n-clx"
- algorithm: "table_merged_details"
- output-file-ext: ".csv"
+ algorithm: "table_oper_data_html"
output-file: "{DIR[DTO,PERF,VPP,2N,CLX]}/vpp_test_operational_2n_clx"
- columns:
- - title: "Name"
- data: "data name"
- - title: "VPP Operational Data - Outputs of 'show runtime' at NDR packet rate"
- data: "data show-run"
- rows: "generated"
data: "vpp-perf-results-2n-clx"
filter: "not 'CFS_OPT'"
- parameters:
- - "parent"
- - "name"
- - "show-run"
# Test Operational Data - VPP Performance Operational Data 3n-tsh
- type: "table"
title: "Test Operational Data - VPP Performance Operational Data 3n-tsh"
- algorithm: "table_merged_details"
- output-file-ext: ".csv"
+ algorithm: "table_oper_data_html"
output-file: "{DIR[DTO,PERF,VPP,3N,TSH]}/vpp_test_operational_3n_tsh"
- columns:
- - title: "Name"
- data: "data name"
- - title: "VPP Operational Data - Outputs of 'show runtime' at NDR packet rate"
- data: "data show-run"
- rows: "generated"
data: "vpp-perf-results-3n-tsh"
filter: "not 'CFS_OPT'"
- parameters:
- - "parent"
- - "name"
- - "show-run"
# Test Operational Data - VPP Performance Operational Data 3n-dnv
- type: "table"
title: "Test Operational Data - VPP Performance Operational Data 3n-dnv"
- algorithm: "table_details"
- output-file-ext: ".csv"
+ algorithm: "table_oper_data_html"
output-file: "{DIR[DTO,PERF,VPP,3N,DNV]}/vpp_test_operational_3n_dnv"
- columns:
- - title: "Name"
- data: "data name"
- - title: "VPP Operational Data - Outputs of 'show runtime' at NDR packet rate"
- data: "data show-run"
- rows: "generated"
data: "vpp-perf-results-3n-dnv"
filter: "'NDRPDR'"
- parameters:
- - "parent"
- - "name"
- - "show-run"
# Test Operational Data - VPP Performance Operational Data 2n-dnv
- type: "table"
title: "Test Operational Data - VPP Performance Operational Data 2n-dnv"
- algorithm: "table_details"
- output-file-ext: ".csv"
+ algorithm: "table_oper_data_html"
output-file: "{DIR[DTO,PERF,VPP,2N,DNV]}/vpp_test_operational_2n_dnv"
- columns:
- - title: "Name"
- data: "data name"
- - title: "VPP Operational Data - Outputs of 'show runtime' at NDR packet rate"
- data: "data show-run"
- rows: "generated"
data: "vpp-perf-results-2n-dnv"
filter: "'NDRPDR'"
- parameters:
- - "parent"
- - "name"
- - "show-run"
# Detailed Test Results - VPP MRR Results 3n-hsw
- type: "table"
@@ -5340,120 +5263,78 @@
# VPP Performance Operational Data 3n-hsw
- type: "file"
title: "VPP Performance Operational Data 3n-hsw"
- algorithm: "file_test_results"
- output-file-ext: ".rst"
+ algorithm: "file_test_results_html"
output-file: "{DIR[DTO,PERF,VPP,3N,HSW]}/vpp_performance_operational_data_3n_hsw"
file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n"
dir-tables: "{DIR[DTO,PERF,VPP,3N,HSW]}"
data: "vpp-perf-results-3n-hsw"
filter: "not ('CFS_OPT' or ('NIC_Intel-X520-DA2' and 'IPSECHW') or ('NIC_Cisco-VIC-1385' and '9000B'))"
- parameters:
- - "name"
- - "doc"
- - "level"
- - "parent"
data-start-level: 3
# VPP Performance Operational Data 3n-skx
- type: "file"
title: "VPP Performance Operational Data 3n-skx"
- algorithm: "file_test_results"
- output-file-ext: ".rst"
+ algorithm: "file_test_results_html"
output-file: "{DIR[DTO,PERF,VPP,3N,SKX]}/vpp_performance_operational_data_3n_skx"
file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n"
dir-tables: "{DIR[DTO,PERF,VPP,3N,SKX]}"
data: "vpp-perf-results-3n-skx"
filter: "not 'CFS_OPT'"
- parameters:
- - "name"
- - "doc"
- - "level"
- - "parent"
data-start-level: 3
# VPP Performance Operational Data 2n-skx
- type: "file"
title: "VPP Performance Operational Data 2n-skx"
- algorithm: "file_test_results"
- output-file-ext: ".rst"
+ algorithm: "file_test_results_html"
output-file: "{DIR[DTO,PERF,VPP,2N,SKX]}/vpp_performance_operational_data_2n_skx"
file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n"
dir-tables: "{DIR[DTO,PERF,VPP,2N,SKX]}"
data: "vpp-perf-results-2n-skx"
filter: "not 'CFS_OPT'"
- parameters:
- - "name"
- - "doc"
- - "level"
- - "parent"
data-start-level: 3
# VPP Performance Operational Data 2n-clx
- type: "file"
title: "VPP Performance Operational Data 2n-clx"
- algorithm: "file_test_results"
- output-file-ext: ".rst"
+ algorithm: "file_test_results_html"
output-file: "{DIR[DTO,PERF,VPP,2N,CLX]}/vpp_performance_operational_data_2n_clx"
file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n"
dir-tables: "{DIR[DTO,PERF,VPP,2N,CLX]}"
data: "vpp-perf-results-2n-clx"
filter: "not 'CFS_OPT'"
- parameters:
- - "name"
- - "doc"
- - "level"
- - "parent"
data-start-level: 3
# VPP Performance Operational Data 3n-tsh
- type: "file"
title: "VPP Performance Operational Data 3n-tsh"
- algorithm: "file_test_results"
- output-file-ext: ".rst"
+ algorithm: "file_test_results_html"
output-file: "{DIR[DTO,PERF,VPP,3N,TSH]}/vpp_performance_operational_data_3n_tsh"
file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n"
dir-tables: "{DIR[DTO,PERF,VPP,3N,TSH]}"
data: "vpp-perf-results-3n-tsh"
filter: "not 'CFS_OPT'"
- parameters:
- - "name"
- - "doc"
- - "level"
- - "parent"
data-start-level: 3
# VPP Performance Operational Data 3n-dnv
- type: "file"
title: "VPP Performance Operational Data 3n-dnv"
- algorithm: "file_test_results"
- output-file-ext: ".rst"
+ algorithm: "file_test_results_html"
output-file: "{DIR[DTO,PERF,VPP,3N,DNV]}/vpp_performance_operational_data_3n_dnv"
file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n"
dir-tables: "{DIR[DTO,PERF,VPP,3N,DNV]}"
data: "vpp-perf-results-3n-dnv"
filter: "'NDRPDR'"
- parameters:
- - "name"
- - "doc"
- - "level"
- - "parent"
data-start-level: 3
# VPP Performance Operational Data 2n-dnv
- type: "file"
title: "VPP Performance Operational Data 2n-dnv"
- algorithm: "file_test_results"
- output-file-ext: ".rst"
+ algorithm: "file_test_results_html"
output-file: "{DIR[DTO,PERF,VPP,2N,DNV]}/vpp_performance_operational_data_2n_dnv"
file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n"
dir-tables: "{DIR[DTO,PERF,VPP,2N,DNV]}"
data: "vpp-perf-results-2n-dnv"
filter: "'NDRPDR'"
- parameters:
- - "name"
- - "doc"
- - "level"
- - "parent"
data-start-level: 3
# VPP MRR Results 3n-hsw