diff options
Diffstat (limited to 'resources/tools/presentation')
-rw-r--r-- | resources/tools/presentation/generator_files.py | 111 | ||||
-rw-r--r-- | resources/tools/presentation/generator_tables.py | 60 | ||||
-rw-r--r-- | resources/tools/presentation/input_data_parser.py | 31 | ||||
-rw-r--r-- | resources/tools/presentation/pal.py | 66 | ||||
-rw-r--r-- | resources/tools/presentation/specification.yaml | 34 |
5 files changed, 228 insertions, 74 deletions
diff --git a/resources/tools/presentation/generator_files.py b/resources/tools/presentation/generator_files.py index dd70c70ce7..2ee00d1112 100644 --- a/resources/tools/presentation/generator_files.py +++ b/resources/tools/presentation/generator_files.py @@ -19,6 +19,16 @@ import logging from utils import get_files, get_rst_title_char +RST_INCLUDE_TABLE = ("\n.. only:: html\n\n" + " .. csv-table::\n" + " :header-rows: 1\n" + " :widths: auto\n" + " :align: center\n" + " :file: {file_html}\n" + "\n.. only:: latex\n\n" + "\n .. raw:: latex\n\n" + " \csvautolongtable{{{file_latex}}}\n\n") + def generate_files(spec, data): """Generate all files specified in the specification file. @@ -39,6 +49,23 @@ def generate_files(spec, data): logging.info("Done.") +def _tests_in_suite(suite_name, tests): + """Check if the suite includes tests. + + :param suite_name: Name of the suite to be checked. + :param tests: Set of tests + :type suite_name: str + :type tests: pandas.Series + :returns: True if the suite includes tests. + :rtype: bool + """ + + for key in tests.keys(): + if suite_name == tests[key]["parent"]: + return True + return False + + def file_test_results(file_spec, input_data): """Generate the file(s) with algorithm: file_test_results specified in the specification file. @@ -49,36 +76,10 @@ def file_test_results(file_spec, input_data): :type input_data: InputData """ - def tests_in_suite(suite_name, tests): - """Check if the suite includes tests. - - :param suite_name: Name of the suite to be checked. - :param tests: Set of tests - :type suite_name: str - :type tests: pandas.Series - :returns: True if the suite includes tests. - :rtype: bool - """ - - for key in tests.keys(): - if suite_name == tests[key]["parent"]: - return True - return False - file_name = "{0}{1}".format(file_spec["output-file"], file_spec["output-file-ext"]) rst_header = file_spec["file-header"] - rst_include_table = ("\n.. only:: html\n\n" - " .. csv-table::\n" - " :header-rows: 1\n" - " :widths: auto\n" - " :align: center\n" - " :file: {file_html}\n" - "\n.. only:: latex\n\n" - "\n .. raw:: latex\n\n" - " \csvautolongtable{{{file_latex}}}\n\n") - logging.info(" Generating the file {0} ...".format(file_name)) table_lst = get_files(file_spec["dir-tables"], ".csv", full_path=True) @@ -105,11 +106,65 @@ def file_test_results(file_spec, input_data): len(suite_name))) file_handler.write("\n{0}\n".format( suite["doc"].replace('|br|', '\n\n -'))) - if tests_in_suite(suite_name, input_data.tests(job, build)): + if _tests_in_suite(suite_name, input_data.tests(job, build)): + for tbl_file in table_lst: + if suite_name in tbl_file: + file_handler.write( + RST_INCLUDE_TABLE.format( + file_latex=tbl_file, + file_html=tbl_file.split("/")[-1])) + + logging.info(" Done.") + + +def file_merged_test_results(file_spec, input_data): + """Generate the file(s) with algorithm: file_merged_test_results specified + in the specification file. + + :param file_spec: File to generate. + :param input_data: Data to process. + :type file_spec: pandas.Series + :type input_data: InputData + """ + + file_name = "{0}{1}".format(file_spec["output-file"], + file_spec["output-file-ext"]) + rst_header = file_spec["file-header"] + + logging.info(" Generating the file {0} ...".format(file_name)) + + table_lst = get_files(file_spec["dir-tables"], ".csv", full_path=True) + if len(table_lst) == 0: + logging.error(" No tables to include in '{0}'. Skipping.". + format(file_spec["dir-tables"])) + return None + + logging.info(" Writing file '{0}'".format(file_name)) + + tests = input_data.filter_data(file_spec) + tests = input_data.merge_data(tests) + + suites = input_data.filter_data(file_spec, data_set="suites") + suites = input_data.merge_data(suites) + suites.sort_index(inplace=True) + + with open(file_name, "w") as file_handler: + file_handler.write(rst_header) + for suite_longname, suite in suites.iteritems(): + if len(suite_longname.split(".")) <= file_spec["data-start-level"]: + continue + suite_name = suite["name"] + file_handler.write("\n{0}\n{1}\n".format( + suite_name, get_rst_title_char( + suite["level"] - file_spec["data-start-level"] - 1) * + len(suite_name))) + file_handler.write("\n{0}\n".format( + suite["doc"].replace('|br|', '\n\n -'))) + if _tests_in_suite(suite_name, tests): for tbl_file in table_lst: if suite_name in tbl_file: file_handler.write( - rst_include_table.format( + RST_INCLUDE_TABLE.format( file_latex=tbl_file, file_html=tbl_file.split("/")[-1])) diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py index a07a989bce..920c30a4a8 100644 --- a/resources/tools/presentation/generator_tables.py +++ b/resources/tools/presentation/generator_tables.py @@ -108,6 +108,66 @@ def table_details(table, input_data): logging.info(" Done.") +def table_merged_details(table, input_data): + """Generate the table(s) with algorithm: table_merged_details + specified in the specification file. + + :param table: Table to generate. + :param input_data: Data to process. + :type table: pandas.Series + :type input_data: InputData + """ + + logging.info(" Generating the table {0} ...". + format(table.get("title", ""))) + + # Transform the data + data = input_data.filter_data(table) + data = input_data.merge_data(data) + + suites = input_data.filter_data(table, data_set="suites") + suites = input_data.merge_data(suites) + + # Prepare the header of the tables + header = list() + for column in table["columns"]: + header.append('"{0}"'.format(str(column["title"]).replace('"', '""'))) + + for _, suite in suites.iteritems(): + # Generate data + suite_name = suite["name"] + table_lst = list() + for test in data.keys(): + if data[test]["parent"] in suite_name: + row_lst = list() + for column in table["columns"]: + try: + col_data = str(data[test][column["data"]. + split(" ")[1]]).replace('"', '""') + if column["data"].split(" ")[1] in ("vat-history", + "show-run"): + col_data = replace(col_data, " |br| ", "", + maxreplace=1) + col_data = " |prein| {0} |preout| ".\ + format(col_data[:-5]) + row_lst.append('"{0}"'.format(col_data)) + except KeyError: + row_lst.append("No data") + table_lst.append(row_lst) + + # Write the data to file + if table_lst: + file_name = "{0}_{1}{2}".format(table["output-file"], suite_name, + table["output-file-ext"]) + logging.info(" Writing file: '{}'".format(file_name)) + with open(file_name, "w") as file_handler: + file_handler.write(",".join(header) + "\n") + for item in table_lst: + file_handler.write(",".join(item) + "\n") + + logging.info(" Done.") + + def table_performance_improvements(table, input_data): """Generate the table(s) with algorithm: table_performance_improvements specified in the specification file. diff --git a/resources/tools/presentation/input_data_parser.py b/resources/tools/presentation/input_data_parser.py index a04f351cab..8c44dc45fa 100644 --- a/resources/tools/presentation/input_data_parser.py +++ b/resources/tools/presentation/input_data_parser.py @@ -858,3 +858,34 @@ class InputData(object): logging.error(" The filter '{0}' is not correct. Check if all " "tags are enclosed by apostrophes.".format(cond)) return None + + @staticmethod + def merge_data(data): + """Merge data from more jobs and builds to a simple data structure. + + The output data structure is: + + - test (suite) 1 ID: + - param 1 + - param 2 + ... + - param n + ... + - test (suite) n ID: + ... + + :param data: Data to merge. + :type data: pandas.Series + :returns: Merged data. + :rtype: pandas.Series + """ + + logging.info(" Merging data ...") + + merged_data = pd.Series() + for _, builds in data.iteritems(): + for _, item in builds.iteritems(): + for ID, item_data in item.iteritems(): + merged_data[ID] = item_data + + return merged_data diff --git a/resources/tools/presentation/pal.py b/resources/tools/presentation/pal.py index fdd3e8a263..86b709c5be 100644 --- a/resources/tools/presentation/pal.py +++ b/resources/tools/presentation/pal.py @@ -83,39 +83,39 @@ def main(): logging.critical("Finished with error.") sys.exit(1) - # try: - env = Environment(spec.environment, args.force) - env.set_environment() - - if spec.is_debug: - if spec.debug["input-format"] == "zip": - unzip_files(spec) - else: - download_data_files(spec) - - prepare_static_content(spec) - - data = InputData(spec) - data.read_data() - - generate_tables(spec, data) - generate_plots(spec, data) - generate_files(spec, data) - generate_report(args.release, spec) - - logging.info("Successfully finished.") - - # except (KeyError, ValueError, PresentationError) as err: - # logging.info("Finished with an error.") - # logging.critical(str(err)) - # except Exception as err: - # logging.info("Finished with an error.") - # logging.critical(str(err)) - # - # finally: - # if spec is not None and not spec.is_debug: - # clean_environment(spec.environment) - # sys.exit(1) + try: + env = Environment(spec.environment, args.force) + env.set_environment() + + if spec.is_debug: + if spec.debug["input-format"] == "zip": + unzip_files(spec) + else: + download_data_files(spec) + + prepare_static_content(spec) + + data = InputData(spec) + data.read_data() + + generate_tables(spec, data) + generate_plots(spec, data) + generate_files(spec, data) + generate_report(args.release, spec) + + logging.info("Successfully finished.") + + except (KeyError, ValueError, PresentationError) as err: + logging.info("Finished with an error.") + logging.critical(str(err)) + except Exception as err: + logging.info("Finished with an error.") + logging.critical(str(err)) + + finally: + if spec is not None and not spec.is_debug: + clean_environment(spec.environment) + sys.exit(1) if __name__ == '__main__': diff --git a/resources/tools/presentation/specification.yaml b/resources/tools/presentation/specification.yaml index 82a76f9eab..cc8de35825 100644 --- a/resources/tools/presentation/specification.yaml +++ b/resources/tools/presentation/specification.yaml @@ -153,7 +153,6 @@ - 18 - 19 - 20 - - 22 csit-dpdk-perf-1710-all: - 2 - 3 @@ -261,7 +260,7 @@ - type: "table" title: "Detailed Test Results - VPP Performance Results" - algorithm: "table_details" + algorithm: "table_merged_details" output-file-ext: ".csv" output-file: "{DIR[DTR,PERF,VPP]}/vpp_performance_results" columns: @@ -277,7 +276,8 @@ rows: "generated" data: csit-vpp-perf-1710-all: - - 22 + - 20 +# - 23 filter: "all" parameters: - "name" @@ -288,7 +288,7 @@ - type: "table" title: "Test configuration - VPP Performance Test Configs" - algorithm: "table_details" + algorithm: "table_merged_details" output-file-ext: ".csv" output-file: "{DIR[DTC,PERF,VPP]}/vpp_test_configuration" columns: @@ -301,7 +301,8 @@ rows: "generated" data: csit-vpp-perf-1710-all: - - 22 + - 20 +# - 23 filter: "all" parameters: - "parent" @@ -311,7 +312,7 @@ - type: "table" title: "Test Operational Data - VPP Performance Operational Data" - algorithm: "table_details" + algorithm: "table_merged_details" output-file-ext: ".csv" output-file: "{DIR[DTO,PERF,VPP]}/vpp_test_operational" columns: @@ -324,7 +325,8 @@ rows: "generated" data: csit-vpp-perf-1710-all: - - 22 + - 20 +# - 23 filter: "all" parameters: - "parent" @@ -496,57 +498,63 @@ - type: "file" title: "VPP Performance Results" - algorithm: "file_test_results" + algorithm: "file_merged_test_results" output-file-ext: ".rst" output-file: "{DIR[DTR,PERF,VPP]}/vpp_performance_results" file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n" dir-tables: "{DIR[DTR,PERF,VPP]}" data: csit-vpp-perf-1710-all: - - 22 + - 20 +# - 23 filter: "all" parameters: - "name" - "doc" - "level" + - "parent" data-start-level: 2 # 0, 1, 2, ... chapters-start-level: 2 # 0, 1, 2, ... - type: "file" title: "VPP Performance Configuration" - algorithm: "file_test_results" + algorithm: "file_merged_test_results" output-file-ext: ".rst" output-file: "{DIR[DTC,PERF,VPP]}/vpp_performance_configuration" file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n" dir-tables: "{DIR[DTC,PERF,VPP]}" data: csit-vpp-perf-1710-all: - - 22 + - 20 +# - 23 filter: "all" parameters: - "name" - "doc" - "level" + - "parent" data-start-level: 2 # 0, 1, 2, ... chapters-start-level: 2 # 0, 1, 2, ... - type: "file" title: "VPP Performance Operational Data" - algorithm: "file_test_results" + algorithm: "file_merged_test_results" output-file-ext: ".rst" output-file: "{DIR[DTO,PERF,VPP]}/vpp_performance_operational_data" file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n" dir-tables: "{DIR[DTO,PERF,VPP]}" data: csit-vpp-perf-1710-all: - - 22 + - 20 +# - 23 filter: "all" parameters: - "name" - "doc" - "level" + - "parent" data-start-level: 2 # 0, 1, 2, ... chapters-start-level: 2 # 0, 1, 2, ... |