aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools
diff options
context:
space:
mode:
authorTibor Frank <tifrank@cisco.com>2019-05-13 13:56:39 +0200
committerTibor Frank <tifrank@cisco.com>2019-05-13 13:58:11 +0200
commitdb2fcb13bab0085abaaa704e87e345d43734ac86 (patch)
tree62ec034c3beb18374835bb0029a891a2ed43713c /resources/tools
parent80950e65bd14f6425127f7073b960a1fb2c3c4f7 (diff)
CSIT-1500: Add comparison table for SOAK vs NDRPDR
Change-Id: I890b28b7a8c1f60cce4c985ccd94fcdfd3c47d7d Signed-off-by: Tibor Frank <tifrank@cisco.com>
Diffstat (limited to 'resources/tools')
-rw-r--r--resources/tools/presentation/generator_tables.py109
-rw-r--r--resources/tools/presentation/pal.py84
-rw-r--r--resources/tools/presentation/specification.yaml68
3 files changed, 219 insertions, 42 deletions
diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py
index 0646db3ab8..1a15605618 100644
--- a/resources/tools/presentation/generator_tables.py
+++ b/resources/tools/presentation/generator_tables.py
@@ -525,6 +525,115 @@ def table_nics_comparison(table, input_data):
convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
+def table_soak_vs_ndr(table, input_data):
+ """Generate the table(s) with algorithm: table_soak_vs_ndr
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ logging.info(" Generating the table {0} ...".
+ format(table.get("title", "")))
+
+ # Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
+ data = input_data.filter_data(table, continue_on_error=True)
+
+ # Prepare the header of the table
+ try:
+ header = [
+ "Test case",
+ "{0} Throughput [Mpps]".format(table["reference"]["title"]),
+ "{0} Stdev [Mpps]".format(table["reference"]["title"]),
+ "{0} Throughput [Mpps]".format(table["compare"]["title"]),
+ "{0} Stdev [Mpps]".format(table["compare"]["title"]),
+ "Delta [%]"]
+ header_str = ",".join(header) + "\n"
+ except (AttributeError, KeyError) as err:
+ logging.error("The model is invalid, missing parameter: {0}".
+ format(err))
+ return
+
+ # Create a list of available SOAK test results:
+ tbl_dict = dict()
+ for job, builds in table["compare"]["data"].items():
+ for build in builds:
+ for tst_name, tst_data in data[job][str(build)].iteritems():
+ if tst_data["type"] == "SOAK":
+ tst_name_mod = tst_name.replace("-soak", "")
+ if tbl_dict.get(tst_name_mod, None) is None:
+ tbl_dict[tst_name_mod] = {
+ "name": tst_name_mod,
+ "ref-data": list(),
+ "cmp-data": list()
+ }
+ try:
+ tbl_dict[tst_name_mod]["cmp-data"].append(
+ tst_data["throughput"]["LOWER"])
+ except (KeyError, TypeError):
+ pass
+ tests_lst = tbl_dict.keys()
+
+ # Add corresponding NDR test results:
+ for job, builds in table["reference"]["data"].items():
+ for build in builds:
+ for tst_name, tst_data in data[job][str(build)].iteritems():
+ tst_name_mod = tst_name.replace("-ndrpdr", "").\
+ replace("-mrr", "")
+ if tst_name_mod in tests_lst:
+ try:
+ if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
+ if table["include-tests"] == "MRR":
+ result = tst_data["result"]["receive-rate"].avg
+ elif table["include-tests"] == "PDR":
+ result = tst_data["throughput"]["PDR"]["LOWER"]
+ elif table["include-tests"] == "NDR":
+ result = tst_data["throughput"]["NDR"]["LOWER"]
+ else:
+ result = None
+ if result is not None:
+ tbl_dict[tst_name_mod]["ref-data"].append(
+ result)
+ except (KeyError, TypeError):
+ continue
+
+ tbl_lst = list()
+ for tst_name in tbl_dict.keys():
+ item = [tbl_dict[tst_name]["name"], ]
+ data_t = tbl_dict[tst_name]["ref-data"]
+ if data_t:
+ item.append(round(mean(data_t) / 1000000, 2))
+ item.append(round(stdev(data_t) / 1000000, 2))
+ else:
+ item.extend([None, None])
+ data_t = tbl_dict[tst_name]["cmp-data"]
+ if data_t:
+ item.append(round(mean(data_t) / 1000000, 2))
+ item.append(round(stdev(data_t) / 1000000, 2))
+ else:
+ item.extend([None, None])
+ if item[-4] is not None and item[-2] is not None and item[-4] != 0:
+ item.append(int(relative_change(float(item[-4]), float(item[-2]))))
+ if len(item) == len(header):
+ tbl_lst.append(item)
+
+ # Sort the table according to the relative change
+ tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
+
+ # Generate csv tables:
+ csv_file = "{0}.csv".format(table["output-file"])
+ with open(csv_file, "w") as file_handler:
+ file_handler.write(header_str)
+ for test in tbl_lst:
+ file_handler.write(",".join([str(item) for item in test]) + "\n")
+
+ convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
+
+
def table_performance_trending_dashboard(table, input_data):
"""Generate the table(s) with algorithm:
table_performance_trending_dashboard
diff --git a/resources/tools/presentation/pal.py b/resources/tools/presentation/pal.py
index dfed6b3e62..238145b70f 100644
--- a/resources/tools/presentation/pal.py
+++ b/resources/tools/presentation/pal.py
@@ -94,48 +94,48 @@ def main():
return 1
ret_code = 1
- try:
- env = Environment(spec.environment, args.force)
- env.set_environment()
-
- prepare_static_content(spec)
-
- data = InputData(spec)
- data.download_and_parse_data(repeat=1)
-
- generate_tables(spec, data)
- generate_plots(spec, data)
- generate_files(spec, data)
-
- if spec.output["output"] == "report":
- generate_report(args.release, spec, args.week)
- logging.info("Successfully finished.")
- elif spec.output["output"] == "CPTA":
- sys.stdout.write(generate_cpta(spec, data))
- try:
- alert = Alerting(spec)
- alert.generate_alerts()
- except AlertingError as err:
- logging.warning(repr(err))
- logging.info("Successfully finished.")
- ret_code = 0
-
- except AlertingError as err:
- logging.critical("Finished with an alerting error.")
- logging.critical(repr(err))
- except PresentationError as err:
- logging.critical("Finished with an PAL error.")
- logging.critical(repr(err))
- except (KeyError, ValueError) as err:
- logging.critical("Finished with an error.")
- logging.critical(repr(err))
- except Exception as err:
- logging.critical("Finished with an unexpected error.")
- logging.critical(repr(err))
- finally:
- if spec is not None:
- clean_environment(spec.environment)
- return ret_code
+ # try:
+ env = Environment(spec.environment, args.force)
+ env.set_environment()
+
+ prepare_static_content(spec)
+
+ data = InputData(spec)
+ data.download_and_parse_data(repeat=1)
+
+ generate_tables(spec, data)
+ generate_plots(spec, data)
+ generate_files(spec, data)
+
+ if spec.output["output"] == "report":
+ generate_report(args.release, spec, args.week)
+ logging.info("Successfully finished.")
+ elif spec.output["output"] == "CPTA":
+ sys.stdout.write(generate_cpta(spec, data))
+ try:
+ alert = Alerting(spec)
+ alert.generate_alerts()
+ except AlertingError as err:
+ logging.warning(repr(err))
+ logging.info("Successfully finished.")
+ ret_code = 0
+
+ # except AlertingError as err:
+ # logging.critical("Finished with an alerting error.")
+ # logging.critical(repr(err))
+ # except PresentationError as err:
+ # logging.critical("Finished with an PAL error.")
+ # logging.critical(repr(err))
+ # except (KeyError, ValueError) as err:
+ # logging.critical("Finished with an error.")
+ # logging.critical(repr(err))
+ # except Exception as err:
+ # logging.critical("Finished with an unexpected error.")
+ # logging.critical(repr(err))
+ # finally:
+ # if spec is not None:
+ # clean_environment(spec.environment)
+ # return ret_code
if __name__ == '__main__':
diff --git a/resources/tools/presentation/specification.yaml b/resources/tools/presentation/specification.yaml
index 9abdf08a1f..0875d9a045 100644
--- a/resources/tools/presentation/specification.yaml
+++ b/resources/tools/presentation/specification.yaml
@@ -587,6 +587,26 @@
- "vpp-performance-changes-2n-skx-ref"
- "vpp-performance-changes-2n-skx-cmp"
+ vpp-soak-vs-ndr-2n-skx-ref:
+ csit-vpp-perf-verify-1904-2n-skx:
+ - 12 # NDRPDR sel
+ - 14 # NDRPDR sel
+ - 16 # NDRPDR sel
+ - 17 # NDRPDR sel
+ - 18 # NDRPDR sel
+ - 21 # NDRPDR sel
+ - 27 # NDRPDR sel
+ - 30 # NDRPDR sel
+ - 31 # NDRPDR sel
+ - 32 # NDRPDR sel
+ vpp-soak-vs-ndr-2n-skx-cmp:
+ csit-vpp-perf-verify-1904-2n-skx:
+ - 70 # SOAK
+ - 71 # SOAK
+ vpp-soak-vs-ndr-2n-skx:
+ - "vpp-soak-vs-ndr-2n-skx-ref"
+ - "vpp-soak-vs-ndr-2n-skx-cmp"
+
vpp-performance-changes-3n-hsw-mrr-h1:
csit-vpp-perf-verify-1810-3n-hsw:
- 9 # MRR sel
@@ -1626,6 +1646,54 @@
### T A B L E S ###
################################################################################
+# VPP SOAK vs NDR
+- type: "table"
+ title: "VPP SOAK vs NDR 2n-skx"
+ algorithm: "table_soak_vs_ndr"
+ output-file: "{DIR[STATIC,VPP]}/soak-vs-ndr-2n-skx"
+ reference:
+ title: "NDR"
+ data: "vpp-soak-vs-ndr-2n-skx-ref"
+ compare:
+ title: "SOAK"
+ data: "vpp-soak-vs-ndr-2n-skx-cmp"
+ data: "vpp-soak-vs-ndr-2n-skx"
+ include-tests: "NDR" # "PDR" | "NDR" | "MRR"
+ filter: "('NDRPDR' or 'SOAK')"
+ parameters:
+ - "throughput"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
# VPP Compare NICs 3n-hsw 1t1c ndr
- type: "table"
title: "VPP Compare NICs 3n-hsw 1t1c ndr"