aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/presentation/generator_files.py
blob: e2bcf7835dbc10cdcd2ab5c7ab06b62c1eca6896 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Algorithms to generate files.
"""


import logging

from utils import get_files, get_rst_title_char

RST_INCLUDE_TABLE = ("\n.. only:: html\n\n"
                     "    .. csv-table::\n"
                     "        :header-rows: 1\n"
                     "        :widths: auto\n"
                     "        :align: center\n"
                     "        :file: {file_html}\n"
                     "\n.. only:: latex\n\n"
                     "\n  .. raw:: latex\n\n"
                     "      \csvautolongtable{{{file_latex}}}\n\n")


def generate_files(spec, data):
    """Generate all files specified in the specification file.

    :param spec: Specification read from the specification file.
    :param data: Data to process.
    :type spec: Specification
    :type data: InputData
    """

    logging.info("Generating the files ...")
    for file_spec in spec.files:
        try:
            eval(file_spec["algorithm"])(file_spec, data)
        except NameError as err:
            logging.error("Probably algorithm '{alg}' is not defined: {err}".
                          format(alg=file_spec["algorithm"], err=repr(err)))
    logging.info("Done.")


def _tests_in_suite(suite_name, tests):
    """Check if the suite includes tests.

    :param suite_name: Name of the suite to be checked.
    :param tests: Set of tests
    :type suite_name: str
    :type tests: pandas.Series
    :returns: True if the suite includes tests.
    :rtype: bool
    """

    for key in tests.keys():
        if suite_name == tests[key]["parent"]:
            return True
    return False


def file_test_results(file_spec, input_data):
    """Generate the file(s) with algorithms
    - file_test_results
    specified in the specification file.

    :param file_spec: File to generate.
    :param input_data: Data to process.
    :type file_spec: pandas.Series
    :type input_data: InputData
    """

    file_name = "{0}{1}".format(file_spec["output-file"],
                                file_spec["output-file-ext"])
    rst_header = file_spec["file-header"]

    logging.info("  Generating the file {0} ...".format(file_name))

    table_lst = get_files(file_spec["dir-tables"], ".csv", full_path=True)
    if len(table_lst) == 0:
        logging.error("  No tables to include in '{0}'. Skipping.".
                      format(file_spec["dir-tables"]))
        return None

    logging.info("    Writing file '{0}'".format(file_name))

    logging.info("    Creating the 'tests' data set for the {0} '{1}'.".
                 format(file_spec.get("type", ""), file_spec.get("title", "")))
    tests = input_data.filter_data(file_spec)
    tests = input_data.merge_data(tests)

    logging.info("    Creating the 'suites' data set for the {0} '{1}'.".
                 format(file_spec.get("type", ""), file_spec.get("title", "")))
    file_spec["filter"] = "all"
    suites = input_data.filter_data(file_spec, data_set="suites")
    suites = input_data.merge_data(suites)
    suites.sort_index(inplace=True)

    with open(file_name, "w") as file_handler:
        file_handler.write(rst_header)
        for suite_longname, suite in suites.iteritems():
            # TODO: Remove when NDRPDRDISC tests are not used:
            if "ndrchk" in suite_longname or "pdrchk" in suite_longname:
                continue
            if len(suite_longname.split(".")) <= file_spec["data-start-level"]:
                continue
            file_handler.write("\n{0}\n{1}\n".format(
                suite["name"], get_rst_title_char(
                    suite["level"] - file_spec["data-start-level"] - 1) *
                            len(suite["name"])))
            file_handler.write("\n{0}\n".format(
                suite["doc"].replace('|br|', '\n\n -')))
            if _tests_in_suite(suite["name"], tests):
                for tbl_file in table_lst:
                    if suite["name"] in tbl_file:
                        file_handler.write(
                            RST_INCLUDE_TABLE.format(
                                file_latex=tbl_file,
                                file_html=tbl_file.split("/")[-1]))
    logging.info("  Done.")