aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries/python/model
diff options
context:
space:
mode:
authorVratko Polak <vrpolak@cisco.com>2021-12-15 17:14:36 +0100
committerVratko Polak <vrpolak@cisco.com>2021-12-15 17:14:36 +0100
commit01d8f262afc567c3d49a23c3cb2cdeaced8a6887 (patch)
tree0449c972d8201be16d648dd749e0a7d116aa8b71 /resources/libraries/python/model
parentcca05a55f3434d8a031b98f4a496adb8df20c122 (diff)
UTI: Export results
+ Model version 1.0.0. - Only some result types are exported. + MRR, NDRPDR and SOAK. - Other result types to be added later. + In contrast, all test types are detected. + Convert custom classes to JSON-serializable equivalents. + Sort dict keys before converting to JSON. + Override the order for some known keys. + Export sets as sorted arrays. + Convert to info content from serialized raw content. + Also export outputs for suite setups and teardowns. + Info files for setup/teardown exist only temporarily. + The data is merged into suite.info.json file. + This simplifies presentation of total suite duration. + Define model via JSON schema: - Just test case, suite setup/teardown/suite to be added later. - Just info, raw to be added later. + Proper descriptions. + Json is generated from yaml. + This is a convenience for maintainers. + The officially used schema is the .json one. + TODOs written into a separate .txt file. + Validate exported instance against the schema. + Include format checking. + Update CSIT requirements for validation dependencies. + This needs python-dateutil==2.8.2, only a patch bump. + Compute bandwidth also for soak tests. + This unifies with NDRPDR to simplify schema definition. - PAL may need an update for parsing soak test message. + Include SSH log items, raw output only. + Generate all outputs in a single filesystem tree. + Move raw outputs into test_output_raw.tar.xz. + Rename existing tar with suites to generated_robot_files.tar.xz. Change-Id: I69ff7b330ed1a14dc435fd0ef008e753c0d7f78c Signed-off-by: Vratko Polak <vrpolak@cisco.com>
Diffstat (limited to 'resources/libraries/python/model')
-rw-r--r--resources/libraries/python/model/ExportLog.py148
-rw-r--r--resources/libraries/python/model/ExportResult.py179
-rw-r--r--resources/libraries/python/model/export_json.py238
-rw-r--r--resources/libraries/python/model/mem2raw.py145
-rw-r--r--resources/libraries/python/model/raw2info.py294
-rw-r--r--resources/libraries/python/model/util.py69
-rw-r--r--resources/libraries/python/model/validate.py73
7 files changed, 1146 insertions, 0 deletions
diff --git a/resources/libraries/python/model/ExportLog.py b/resources/libraries/python/model/ExportLog.py
new file mode 100644
index 0000000000..e02eef63c5
--- /dev/null
+++ b/resources/libraries/python/model/ExportLog.py
@@ -0,0 +1,148 @@
+# Copyright (c) 2021 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module with keywords that publish metric and other log events.
+"""
+
+import datetime
+
+from resources.libraries.python.model.util import get_export_data
+
+
+def export_ssh_command(host, port, command):
+ """Add a log item about SSH command execution starting.
+
+ The log item is present only in raw output.
+ Result arrives in a separate log item.
+ Log level is always DEBUG.
+
+ The command is stored as "data" (not "msg") as in some cases
+ the command can be too long to act as a message.
+
+ The host is added to the info set of hosts.
+
+ :param host: Node "host" attribute, usually its IPv4 address.
+ :param port: SSH port number to use when connecting to the host.
+ :param command: Serialized bash command to execute.
+ :type host: str
+ :type port: int
+ :type command: str
+ """
+ timestamp = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
+ data = get_export_data()
+ ssh_record = dict(
+ source_type=u"host,port",
+ source_id=dict(host=host, port=port),
+ msg_type=u"ssh_command",
+ log_level=u"DEBUG",
+ timestamp=timestamp,
+ msg="",
+ data=str(command),
+ )
+ data[u"hosts"].add(host)
+ data[u"log"].append(ssh_record)
+
+
+def export_ssh_result(host, port, code, stdout, stderr, duration):
+ """Add a log item about ssh execution result.
+
+ Only for raw output log.
+
+ There is no easy way to pair with the corresponding command,
+ but usually there is only one SSH session for given host and port.
+ The duration value may give a hint if that is not the case.
+
+ Message is empty, data has fields "rc", "stdout", "stderr" and "duration".
+ Log level is always DEBUG.
+
+ The host is NOT added to the info set of hosts, as each result
+ comes after a command.
+
+ TODO: Do not require duration, find preceding ssh command in log.
+ Reason: Pylint complains about too many arguments.
+ Alternative: Define type for SSH endopoint (and use that instead host+port).
+
+ :param host: Node "host" attribute, usually its IPv4 address.
+ :param port: SSH port number to use when connecting to the host.
+ :param code: Bash return code, e.g. 0 for success.
+ :param stdout: Captured standard output of the command execution.
+ :param stderr: Captured error output of the command execution.
+ :param duration: How long has the command been executing, in seconds.
+ :type host: str
+ :type port: int
+ :type code: int
+ :type stdout: str
+ :type stderr: str
+ :type duration: float
+ """
+ timestamp = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
+ data = get_export_data()
+ ssh_record = dict(
+ source_type=u"host,port",
+ source_id=dict(host=host, port=port),
+ msg_type=u"ssh_result",
+ log_level=u"DEBUG",
+ timestamp=timestamp,
+ msg=u"",
+ data=dict(
+ rc=int(code),
+ stdout=str(stdout),
+ stderr=str(stderr),
+ duration=float(duration),
+ ),
+ )
+ data[u"log"].append(ssh_record)
+
+
+def export_ssh_timeout(host, port, stdout, stderr, duration):
+ """Add a log item about ssh execution timing out.
+
+ Only for debug log.
+
+ There is no easy way to pair with the corresponding command,
+ but usually there is only one SSH session for given host and port.
+
+ Message is empty, data has fields "stdout", "stderr" and "duration".
+ The duration value may give a hint if that is not the case.
+ Log level is always DEBUG.
+
+ The host is NOT added to the info set of hosts, as each timeout
+ comes after a command.
+
+ :param host: Node "host" attribute, usually its IPv4 address.
+ :param port: SSH port number to use when connecting to the host.
+ :param stdout: Captured standard output of the command execution so far.
+ :param stderr: Captured error output of the command execution so far.
+ :param duration: How long has the command been executing, in seconds.
+ :type host: str
+ :type port: int
+ :type stdout: str
+ :type stderr: str
+ :type duration: float
+ """
+ timestamp = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
+ data = get_export_data()
+ ssh_record = dict(
+ source_type=u"host,port",
+ source_id=dict(host=host, port=port),
+ msg_type=u"ssh_timeout",
+ log_level=u"DEBUG",
+ timestamp=timestamp,
+ msg=u"",
+ data=dict(
+ stdout=str(stdout),
+ stderr=str(stderr),
+ duration=float(duration),
+ ),
+ )
+ data[u"log"].append(ssh_record)
diff --git a/resources/libraries/python/model/ExportResult.py b/resources/libraries/python/model/ExportResult.py
new file mode 100644
index 0000000000..d74a6ab5df
--- /dev/null
+++ b/resources/libraries/python/model/ExportResult.py
@@ -0,0 +1,179 @@
+# Copyright (c) 2021 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module with keywords that publish parts of result structure."""
+
+from robot.libraries.BuiltIn import BuiltIn
+
+from resources.libraries.python.model.util import descend, get_export_data
+
+
+def export_dut_type_and_version(dut_type=u"unknown", dut_version=u"unknown"):
+ """Export the arguments as dut type and version.
+
+ Robot tends to convert "none" into None, hence the unusual default values.
+
+ If either argument is missing, the value from robot variable is used.
+ If argument is present, the value is also stored to robot suite variable.
+
+ :param dut_type: DUT type, e.g. VPP or DPDK.
+ :param dut_version: DUT version as determined by the caller.
+ :type dut_type: Optional[str]
+ :type dut_version: Optiona[str]
+ :raises RuntimeError: If value is neither in argument not robot variable.
+ """
+ if dut_type == u"unknown":
+ dut_type = BuiltIn().get_variable_value(u"\\${DUT_TYPE}", u"unknown")
+ if dut_type == u"unknown":
+ raise RuntimeError(u"Dut type not provided.")
+ else:
+ # We want to set a variable in higher level suite setup
+ # to be available to test setup several levels lower.
+ # Documentation [0] looks like "children" is a keyword argument,
+ # but code [1] lines 1458 and 1511-1512 show
+ # it is just last stringy argument.
+ # [0] http://robotframework.org/robotframework/
+ # 3.1.2/libraries/BuiltIn.html#Set%20Suite%20Variable
+ # [1] https://github.com/robotframework/robotframework/blob/
+ # v3.1.2/src/robot/libraries/BuiltIn.py
+ BuiltIn().set_suite_variable(
+ u"\\${DUT_TYPE}", dut_type, u"children=True"
+ )
+ if dut_version == u"unknown":
+ dut_version = BuiltIn().get_variable_value(u"\\${DUT_VERSION}", u"unknown")
+ if dut_type == u"unknown":
+ raise RuntimeError(u"Dut version not provided.")
+ else:
+ BuiltIn().set_suite_variable(
+ u"\\${DUT_VERSION}", dut_version, u"children=True"
+ )
+ data = get_export_data()
+ data[u"dut_type"] = dut_type
+ data[u"dut_version"] = dut_version
+
+
+def append_mrr_value(mrr_value, unit):
+ """Store mrr value to proper place so it is dumped into json.
+
+ The value is appended only when unit is not empty.
+
+ :param mrr_value: Forwarding rate from MRR trial.
+ :param unit: Unit of measurement for the rate.
+ :type mrr_value: float
+ :type unit: str
+ """
+ if not unit:
+ return
+ data = get_export_data()
+ data[u"result"][u"type"] = u"mrr"
+ rate_node = descend(descend(data[u"result"], u"receive_rate"), "rate")
+ rate_node[u"unit"] = str(unit)
+ values_list = descend(rate_node, u"values", list)
+ values_list.append(float(mrr_value))
+ # TODO: Fill in the bandwidth part for pps?
+
+
+def export_search_bound(text, value, unit, bandwidth=None):
+ """Store bound value and unit.
+
+ This function works for both NDRPDR and SOAK, decided by text.
+
+ If a node does not exist, it is created.
+ If a previous value exists, it is overwritten silently.
+ Result type is set (overwritten) to ndrpdr (or soak).
+
+ Text is used to determine whether it is ndr or pdr, upper or lower bound,
+ as the Robot caller has the information only there.
+
+ :param text: Info from Robot caller to determime bound type.
+ :param value: The bound value in packets (or connections) per second.
+ :param unit: Rate unit the bound is measured (or estimated) in.
+ :param bandwidth: The same value recomputed into L1 bits per second.
+ :type text: str
+ :type value: float
+ :type unit: str
+ :type bandwidth: Optional[float]
+ """
+ value = float(value)
+ text = str(text).lower()
+ result_type = u"soak" if u"plrsearch" in text else u"ndrpdr"
+ upper_or_lower = u"upper" if u"upper" in text else u"lower"
+ ndr_or_pdr = u"ndr" if u"ndr" in text else u"pdr"
+
+ data = get_export_data()
+ result_node = data[u"result"]
+ result_node[u"type"] = result_type
+ rate_item = dict(rate=dict(value=value, unit=unit))
+ if bandwidth:
+ rate_item[u"bandwidth"] = dict(value=float(bandwidth), unit=u"bps")
+ if result_type == u"soak":
+ descend(result_node, u"critical_rate")[upper_or_lower] = rate_item
+ return
+ descend(result_node, ndr_or_pdr)[upper_or_lower] = rate_item
+
+
+def _add_latency(result_node, percent, whichward, latency_string):
+ """Descend to a corresponding node and add values from latency string.
+
+ This is an internal block, moved out from export_ndrpdr_latency,
+ as it can be called up to 4 times.
+
+ :param result_node: UTI tree node to descend from.
+ :param percent: Percent value to use in node key (90, 50, 10, 0).
+ :param whichward: "forward" or "reverse".
+ :param latency_item: Unidir output from TRex utility, min/avg/max/hdrh.
+ :type result_node: dict
+ :type percent: int
+ :type whichward: str
+ :latency_string: str
+ """
+ l_min, l_avg, l_max, l_hdrh = latency_string.split(u"/", 3)
+ whichward_node = descend(result_node, f"latency_{whichward}")
+ percent_node = descend(whichward_node, f"pdr_{percent}")
+ percent_node[u"min"] = int(l_min)
+ percent_node[u"avg"] = int(l_avg)
+ percent_node[u"max"] = int(l_max)
+ percent_node[u"hdrh"] = l_hdrh
+ percent_node[u"unit"] = u"us"
+
+
+def export_ndrpdr_latency(text, latency):
+ """Store NDRPDR hdrh latency data.
+
+ If "latency" node does not exist, it is created.
+ If a previous value exists, it is overwritten silently.
+
+ Text is used to determine what percentage of PDR is the load,
+ as the Robot caller has the information only there.
+
+ Reverse data may be missing, we assume the test was unidirectional.
+
+ :param text: Info from Robot caller to determime load.
+ :param latency: Output from TRex utility, min/avg/max/hdrh.
+ :type text: str
+ :type latency: 1-tuple or 2-tuple of str
+ """
+ data = get_export_data()
+ result_node = data[u"result"]
+ percent = 0
+ if u"90" in text:
+ percent = 90
+ elif u"50" in text:
+ percent = 50
+ elif u"10" in text:
+ percent = 10
+ _add_latency(result_node, percent, u"forward", latency[0])
+ # Else TRex does not support latency measurement for this traffic profile.
+ if len(latency) < 2:
+ return
+ _add_latency(result_node, percent, u"reverse", latency[1])
diff --git a/resources/libraries/python/model/export_json.py b/resources/libraries/python/model/export_json.py
new file mode 100644
index 0000000000..4f1b86dbf4
--- /dev/null
+++ b/resources/libraries/python/model/export_json.py
@@ -0,0 +1,238 @@
+# Copyright (c) 2021 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module tracking json in-memory data and saving it to files.
+
+The current implementation tracks data for raw output,
+and info output is created from raw output on disk (see raw2info module).
+Raw file contains all log items but no derived quantities,
+info file contains only important log items but also derived quantities.
+The overlap between two files is big.
+
+Each test case, suite setup (hierarchical) and teardown has its own file pair.
+
+Validation is performed for output files with available JSON schema.
+Validation is performed in data deserialized from disk,
+as serialization might have introduced subtle errors.
+"""
+
+import datetime
+import os.path
+
+from robot.api import logger
+from robot.libraries.BuiltIn import BuiltIn
+
+from resources.libraries.python.Constants import Constants
+from resources.libraries.python.model.ExportResult import (
+ export_dut_type_and_version
+)
+from resources.libraries.python.model.mem2raw import write_raw_output
+from resources.libraries.python.model.raw2info import convert_content_to_info
+from resources.libraries.python.model.validate import (get_validators, validate)
+
+
+class export_json():
+ """Class handling the json data setting and export."""
+
+ ROBOT_LIBRARY_SCOPE = u"GLOBAL"
+
+ def __init__(self):
+ """Declare required fields, cache output dir.
+
+ Also memorize schema validator instances.
+ """
+ self.output_dir = BuiltIn().get_variable_value(u"\\${OUTPUT_DIR}", ".")
+ self.raw_file_path = None
+ self.raw_data = None
+ self.validators = get_validators()
+
+ def export_pending_data(self):
+ """Write the accumulated data to disk.
+
+ Create missing directories.
+ Reset both file path and data to avoid writing multiple times.
+
+ Functions which finalize content for given file are calling this,
+ so make sure each test and non-empty suite setup or teardown
+ is calling this as their last keyword.
+
+ If no file path is set, do not write anything,
+ as that is the failsafe behavior when caller from unexpected place.
+ Aso do not write anything when EXPORT_JSON constant is false.
+
+ Regardless of whether data was written, it is cleared.
+ """
+ if not Constants.EXPORT_JSON or not self.raw_file_path:
+ self.raw_data = None
+ self.raw_file_path = None
+ return
+ write_raw_output(self.raw_file_path, self.raw_data)
+ # Raw data is going to be cleared (as a sign that raw export succeeded),
+ # so this is the last chance to detect if it was for a test case.
+ is_testcase = u"result" in self.raw_data
+ self.raw_data = None
+ # Validation for raw output goes here when ready.
+ info_file_path = convert_content_to_info(self.raw_file_path)
+ self.raw_file_path = None
+ # If "result" is missing from info content,
+ # it could be a bug in conversion from raw test case content,
+ # so instead of that we use the flag detected earlier.
+ if is_testcase:
+ validate(info_file_path, self.validators[u"tc_info"])
+
+ def warn_on_bad_export(self):
+ """If bad state is detected, log a warning and clean up state."""
+ if self.raw_file_path is not None or self.raw_data is not None:
+ logger.warn(
+ f"Previous export not clean, path {self.raw_file_path}\n"
+ f"data: {self.raw_data}"
+ )
+ self.raw_data = None
+ self.raw_file_path = None
+
+ def start_suite_setup_export(self):
+ """Set new file path, initialize data for the suite setup.
+
+ This has to be called explicitly at start of suite setup,
+ otherwise Robot likes to postpone initialization
+ until first call by a data-adding keyword.
+
+ File path is set based on suite.
+ """
+ self.warn_on_bad_export()
+ start_time = datetime.datetime.utcnow().strftime(
+ u"%Y-%m-%dT%H:%M:%S.%fZ"
+ )
+ suite_name = BuiltIn().get_variable_value(u"\\${SUITE_NAME}")
+ suite_id = suite_name.lower().replace(u" ", u"_")
+ suite_path_part = os.path.join(*suite_id.split(u"."))
+ output_dir = self.output_dir
+ self.raw_file_path = os.path.join(
+ output_dir, suite_path_part, u"setup.raw.json"
+ )
+ self.raw_data = dict()
+ self.raw_data[u"version"] = Constants.MODEL_VERSION
+ self.raw_data[u"start_time"] = start_time
+ self.raw_data[u"suite_name"] = suite_name
+ self.raw_data[u"suite_documentation"] = BuiltIn().get_variable_value(
+ u"\\${SUITE_DOCUMENTATION}"
+ )
+ # "end_time" and "duration" is added on flush.
+ self.raw_data[u"hosts"] = set()
+ self.raw_data[u"log"] = list()
+
+ def start_test_export(self):
+ """Set new file path, initialize data to minimal tree for the test case.
+
+ It is assumed Robot variables DUT_TYPE and DUT_VERSION
+ are already set (in suite setup) to correct values.
+
+ This function has to be called explicitly at the start of test setup,
+ otherwise Robot likes to postpone initialization
+ until first call by a data-adding keyword.
+
+ File path is set based on suite and test.
+ """
+ self.warn_on_bad_export()
+ start_time = datetime.datetime.utcnow().strftime(
+ u"%Y-%m-%dT%H:%M:%S.%fZ"
+ )
+ suite_name = BuiltIn().get_variable_value(u"\\${SUITE_NAME}")
+ suite_id = suite_name.lower().replace(u" ", u"_")
+ suite_path_part = os.path.join(*suite_id.split(u"."))
+ test_name = BuiltIn().get_variable_value(u"\\${TEST_NAME}")
+ self.raw_file_path = os.path.join(
+ self.output_dir, suite_path_part,
+ test_name.lower().replace(u" ", u"_") + u".raw.json"
+ )
+ self.raw_data = dict()
+ self.raw_data[u"version"] = Constants.MODEL_VERSION
+ self.raw_data[u"start_time"] = start_time
+ self.raw_data[u"suite_name"] = suite_name
+ self.raw_data[u"test_name"] = test_name
+ test_doc = BuiltIn().get_variable_value(u"\\${TEST_DOCUMENTATION}", u"")
+ self.raw_data[u"test_documentation"] = test_doc
+ # "test_type" is added when converting to info.
+ # "tags" is detected and added on flush.
+ # "end_time" and "duration" is added on flush.
+ # Robot status and message are added on flush.
+ self.raw_data[u"result"] = dict(type=u"unknown")
+ self.raw_data[u"hosts"] = set()
+ self.raw_data[u"log"] = list()
+ export_dut_type_and_version()
+
+ def start_suite_teardown_export(self):
+ """Set new file path, initialize data for the suite teardown.
+
+ This has to be called explicitly at start of suite teardown,
+ otherwise Robot likes to postpone initialization
+ until first call by a data-adding keyword.
+
+ File path is set based on suite.
+ """
+ self.warn_on_bad_export()
+ start_time = datetime.datetime.utcnow().strftime(
+ u"%Y-%m-%dT%H:%M:%S.%fZ"
+ )
+ suite_name = BuiltIn().get_variable_value(u"\\${SUITE_NAME}")
+ suite_id = suite_name.lower().replace(u" ", u"_")
+ suite_path_part = os.path.join(*suite_id.split(u"."))
+ self.raw_file_path = os.path.join(
+ self.output_dir, suite_path_part, u"teardown.raw.json"
+ )
+ self.raw_data = dict()
+ self.raw_data[u"version"] = Constants.MODEL_VERSION
+ self.raw_data[u"start_time"] = start_time
+ self.raw_data[u"suite_name"] = suite_name
+ # "end_time" and "duration" is added on flush.
+ self.raw_data[u"hosts"] = set()
+ self.raw_data[u"log"] = list()
+
+ def finalize_suite_setup_export(self):
+ """Add the missing fields to data. Do not write yet.
+
+ Should be run at the end of suite setup.
+ The write is done at next start (or at the end of global teardown).
+ """
+ end_time = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
+ self.raw_data[u"end_time"] = end_time
+ self.export_pending_data()
+
+ def finalize_test_export(self):
+ """Add the missing fields to data. Do not write yet.
+
+ Should be at the end of test teardown, as the implementation
+ reads various Robot variables, some of them only available at teardown.
+
+ The write is done at next start (or at the end of global teardown).
+ """
+ end_time = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
+ message = BuiltIn().get_variable_value(u"\\${TEST_MESSAGE}")
+ status = BuiltIn().get_variable_value(u"\\${TEST_STATUS}")
+ test_tags = BuiltIn().get_variable_value(u"\\${TEST_TAGS}")
+ self.raw_data[u"end_time"] = end_time
+ self.raw_data[u"tags"] = list(test_tags)
+ self.raw_data[u"status"] = status
+ self.raw_data[u"message"] = message
+ self.export_pending_data()
+
+ def finalize_suite_teardown_export(self):
+ """Add the missing fields to data. Do not write yet.
+
+ Should be run at the end of suite teardown
+ (but before the explicit write in the global suite teardown).
+ The write is done at next start (or explicitly for global teardown).
+ """
+ end_time = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
+ self.raw_data[u"end_time"] = end_time
+ self.export_pending_data()
diff --git a/resources/libraries/python/model/mem2raw.py b/resources/libraries/python/model/mem2raw.py
new file mode 100644
index 0000000000..c3145b9f31
--- /dev/null
+++ b/resources/libraries/python/model/mem2raw.py
@@ -0,0 +1,145 @@
+# Copyright (c) 2021 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module for converting in-memory data into raw JSON output.
+
+CSIT and VPP PAPI are using custom data types
+that are not directly serializable into JSON.
+
+Thus, before writing the raw outpt onto disk,
+the data is recursively converted to equivalent serializable types,
+in extreme cases replaced by string representation.
+
+Validation is outside the scope of this module,
+as it should use the JSON data read from disk.
+"""
+
+import json
+import os
+
+from collections.abc import Iterable, Mapping, Set
+from enum import IntFlag
+
+
+def _pre_serialize_recursive(data):
+ """Recursively sort and convert to a more serializable form.
+
+ VPP PAPI code can give data with its own MACAddres type,
+ or various other enum and flag types.
+ The default json.JSONEncoder method raises TypeError on that.
+ First point of this function is to apply str() or repr()
+ to leaf values that need it.
+
+ Also, PAPI responses are namedtuples, which confuses
+ the json.JSONEncoder method (so it does not recurse).
+ Dictization (see PapiExecutor) helps somewhat, but it turns namedtuple
+ into a UserDict, which also confuses json.JSONEncoder.
+ Therefore, we recursively convert any Mapping into an ordinary dict.
+
+ We also convert iterables to list (sorted if the iterable was a set),
+ and prevent numbers from getting converted to strings.
+
+ As we are doing such low level operations,
+ we also convert mapping keys to strings
+ and sort the mapping items by keys alphabetically,
+ except "data" field moved to the end.
+
+ :param data: Object to make serializable, dictized when applicable.
+ :type data: object
+ :returns: Serializable equivalent of the argument.
+ :rtype: object
+ :raises ValueError: If the argument does not support string conversion.
+ """
+ # Recursion ends at scalar values, first handle irregular ones.
+ if isinstance(data, IntFlag):
+ return repr(data)
+ if isinstance(data, bytes):
+ return data.hex()
+ # The regular ones are good to go.
+ if isinstance(data, (str, int, float, bool)):
+ return data
+ # Recurse over, convert and sort mappings.
+ if isinstance(data, Mapping):
+ # Convert and sort alphabetically.
+ ret = {
+ str(key): _pre_serialize_recursive(data[key])
+ for key in sorted(data.keys())
+ }
+ # If exists, move "data" field to the end.
+ if u"data" in ret:
+ data_value = ret.pop(u"data")
+ ret[u"data"] = data_value
+ # If exists, move "type" field at the start.
+ if u"type" in ret:
+ type_value = ret.pop(u"type")
+ ret_old = ret
+ ret = dict(type=type_value)
+ ret.update(ret_old)
+ return ret
+ # Recurse over and convert iterables.
+ if isinstance(data, Iterable):
+ list_data = [_pre_serialize_recursive(item) for item in data]
+ # Additionally, sets are exported as sorted.
+ if isinstance(data, Set):
+ list_data = sorted(list_data)
+ return list_data
+ # Unknown structure, attempt str().
+ return str(data)
+
+
+def _pre_serialize_root(data):
+ """Recursively convert to a more serializable form, tweak order.
+
+ See _pre_serialize_recursive for most of changes this does.
+
+ The logic here (outside the recursive function) only affects
+ field ordering in the root mapping,
+ to make it more human friendly.
+ We are moving "version" to the top,
+ followed by start time and end time.
+ and various long fields (such as "log") to the bottom.
+
+ Some edits are done in-place, do not trust the argument value after calling.
+
+ :param data: Root data to make serializable, dictized when applicable.
+ :type data: dict
+ :returns: Order-tweaked version of the argument.
+ :rtype: dict
+ :raises KeyError: If the data does not contain required fields.
+ :raises TypeError: If the argument is not a dict.
+ :raises ValueError: If the argument does not support string conversion.
+ """
+ if not isinstance(data, dict):
+ raise RuntimeError(f"Root data object needs to be a dict: {data!r}")
+ data = _pre_serialize_recursive(data)
+ log = data.pop(u"log")
+ new_data = dict(version=data.pop(u"version"))
+ new_data[u"start_time"] = data.pop(u"start_time")
+ new_data[u"end_time"] = data.pop(u"end_time")
+ new_data.update(data)
+ new_data[u"log"] = log
+ return new_data
+
+
+def write_raw_output(raw_file_path, raw_data):
+ """Prepare data for serialization and dump into a file.
+
+ Ancestor directories are created if needed.
+
+ :param to_raw_path: Local filesystem path, including the file name.
+ :type to_raw_path: str
+ """
+ raw_data = _pre_serialize_root(raw_data)
+ os.makedirs(os.path.dirname(raw_file_path), exist_ok=True)
+ with open(raw_file_path, u"xt", encoding="utf-8") as file_out:
+ json.dump(raw_data, file_out, indent=1)
diff --git a/resources/libraries/python/model/raw2info.py b/resources/libraries/python/model/raw2info.py
new file mode 100644
index 0000000000..7a3647d857
--- /dev/null
+++ b/resources/libraries/python/model/raw2info.py
@@ -0,0 +1,294 @@
+# Copyright (c) 2021 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module facilitating conversion from raw outputs into info outputs."""
+
+import copy
+import json
+import os
+
+import dateutil.parser
+
+from resources.libraries.python.Constants import Constants
+from resources.libraries.python.jumpavg.AvgStdevStats import AvgStdevStats
+
+
+def _raw_to_info_path(raw_path):
+ """Compute path for info output corresponding to given raw output.
+
+ :param raw_path: Local filesystem path to read raw JSON data from.
+ :type raw_path: str
+ :returns: Local filesystem path to write info JSON content to.
+ :rtype: str
+ :raises RuntimeError: If the input path does not meet all expectations.
+ """
+ raw_extension = u".raw.json"
+ tmp_parts = raw_path.split(raw_extension)
+ if len(tmp_parts) != 2 or tmp_parts[1] != u"":
+ raise RuntimeError(f"Not good extension {raw_extension}: {raw_path}")
+ info_path = tmp_parts[0] + u".info.json"
+ return info_path
+
+
+def _process_test_name(data):
+ """Replace raw test name with short and long test name and set test_type.
+
+ Perform in-place edits on the data dictionary.
+ Remove raw suite_name and test_name, they are not part of info schema.
+ Return early if the data is not for test case.
+ Inserttest ID and long and short test name into the data.
+ Besides suite_name and test_name, also test tags are read.
+
+ Short test name is basically a suite tag, but with NIC driver prefix,
+ if the NIC driver used is not the default one (drv_vfio_pci for VPP tests).
+
+ Long test name has the following form:
+ {nic_short_name}-{frame_size}-{threads_and_cores}-{suite_part}
+ Lookup in test tags is needed to get the threads value.
+ The threads_and_cores part may be empty, e.g. for TRex tests.
+
+ Test ID has form {suite_name}.{test_name} where the two names come from
+ Robot variables, converted to lower case and spaces replaces by undescores.
+
+ Test type is set in an internal function.
+
+ :param data: Raw data, perhaps some fields converted into info data already.
+ :type data: dict
+ :raises RuntimeError: If the raw data does not contain expected values.
+ """
+ suite_part = data.pop(u"suite_name").lower().replace(u" ", u"_")
+ if u"test_name" not in data:
+ # There will be no test_id, provide suite_id instead.
+ data[u"suite_id"] = suite_part
+ return
+ test_part = data.pop(u"test_name").lower().replace(u" ", u"_")
+ data[u"test_id"] = f"{suite_part}.{test_part}"
+ tags = data[u"tags"]
+ # Test name does not contain thread count.
+ subparts = test_part.split(u"c-", 1)
+ if len(subparts) < 2 or subparts[0][-2:-1] != u"-":
+ # Physical core count not detected, assume it is a TRex test.
+ if u"--" not in test_part:
+ raise RuntimeError(f"Cores not found for {subparts}")
+ short_name = test_part.split(u"--", 1)[1]
+ else:
+ short_name = subparts[1]
+ # Add threads to test_part.
+ core_part = subparts[0][-1] + u"c"
+ for tag in tags:
+ tag = tag.lower()
+ if len(tag) == 4 and core_part == tag[2:] and tag[1] == u"t":
+ test_part = test_part.replace(f"-{core_part}-", f"-{tag}-")
+ break
+ else:
+ raise RuntimeError(f"Threads not found for {test_part} tags {tags}")
+ # For long name we need NIC model, which is only in suite name.
+ last_suite_part = suite_part.split(u".")[-1]
+ # Short name happens to be the suffix we want to ignore.
+ prefix_part = last_suite_part.split(short_name)[0]
+ # Also remove the trailing dash.
+ prefix_part = prefix_part[:-1]
+ # Throw away possible link prefix such as "1n1l-".
+ nic_code = prefix_part.split(u"-", 1)[-1]
+ nic_short = Constants.NIC_CODE_TO_SHORT_NAME[nic_code]
+ long_name = f"{nic_short}-{test_part}"
+ # Set test type.
+ test_type = _detect_test_type(data)
+ data[u"test_type"] = test_type
+ # Remove trailing test type from names (if present).
+ short_name = short_name.split(f"-{test_type}")[0]
+ long_name = long_name.split(f"-{test_type}")[0]
+ # Store names.
+ data[u"test_name_short"] = short_name
+ data[u"test_name_long"] = long_name
+
+
+def _detect_test_type(data):
+ """Return test_type, as inferred from robot test tags.
+
+ :param data: Raw data, perhaps some fields converted into info data already.
+ :type data: dict
+ :returns: The inferred test type value.
+ :rtype: str
+ :raises RuntimeError: If the test tags does not contain expected values.
+ """
+ tags = data[u"tags"]
+ # First 5 options are specific for VPP tests.
+ if u"DEVICETEST" in tags:
+ test_type = u"device"
+ elif u"LDP_NGINX" in tags:
+ test_type = u"vsap"
+ elif u"HOSTSTACK" in tags:
+ test_type = u"hoststack"
+ elif u"GSO_TRUE" in tags or u"GSO_FALSE" in tags:
+ test_type = u"gso"
+ elif u"RECONF" in tags:
+ test_type = u"reconf"
+ # The remaining 3 options could also apply to DPDK and TRex tests.
+ elif u"SOAK" in tags:
+ test_type = u"soak"
+ elif u"NDRPDR" in tags:
+ test_type = u"ndrpdr"
+ elif u"MRR" in tags:
+ test_type = u"mrr"
+ else:
+ raise RuntimeError(f"Unable to infer test type from tags: {tags}")
+ return test_type
+
+
+def _convert_to_info_in_memory(data):
+ """Perform all changes needed for processing of data, return new data.
+
+ Data is assumed to be valid for raw schema, so no exceptions are expected.
+ The original argument object is not edited,
+ a new copy is created for edits and returned,
+ because there is no easy way to sort keys in-place.
+
+ :param data: The whole composite object to filter and enhance.
+ :type data: dict
+ :returns: New object with the edited content.
+ :rtype: dict
+ """
+ data = copy.deepcopy(data)
+
+ # Drop any SSH log items.
+ data[u"log"] = list()
+
+ # Duration is computed for every file.
+ start_float = dateutil.parser.parse(data[u"start_time"]).timestamp()
+ end_float = dateutil.parser.parse(data[u"end_time"]).timestamp()
+ data[u"duration"] = end_float - start_float
+
+ # Reorder impotant fields to the top.
+ sorted_data = dict(version=data.pop(u"version"))
+ sorted_data[u"duration"] = data.pop(u"duration")
+ sorted_data[u"start_time"] = data.pop(u"start_time")
+ sorted_data[u"end_time"] = data.pop(u"end_time")
+ sorted_data.update(data)
+ data = sorted_data
+ # TODO: Do we care about the order of subsequently added fields?
+
+ # Convert status into a boolean.
+ status = data.pop(u"status", None)
+ if status is not None:
+ data[u"passed"] = (status == u"PASS")
+ if data[u"passed"]:
+ # Also truncate success test messages.
+ data[u"message"] = u""
+
+ # Replace raw names with processed ones, set test_id and test_type.
+ _process_test_name(data)
+
+ # The rest is only relevant for test case outputs.
+ if u"result" not in data:
+ return data
+ result_node = data[u"result"]
+ result_type = result_node[u"type"]
+ if result_type == u"unknown":
+ # Device or something else not supported.
+ return data
+
+ # More processing depending on result type. TODO: Separate functions?
+
+ # Compute avg and stdev for mrr.
+ if result_type == u"mrr":
+ rate_node = result_node[u"receive_rate"][u"rate"]
+ stats = AvgStdevStats.for_runs(rate_node[u"values"])
+ rate_node[u"avg"] = stats.avg
+ rate_node[u"stdev"] = stats.stdev
+
+ # Multiple processing steps for ndrpdr.
+ if result_type != u"ndrpdr":
+ return data
+ # Filter out invalid latencies.
+ for which_key in (u"latency_forward", u"latency_reverse"):
+ if which_key not in result_node:
+ # Probably just an unidir test.
+ continue
+ for load in (u"pdr_0", u"pdr_10", u"pdr_50", u"pdr_90"):
+ if result_node[which_key][load][u"max"] <= 0:
+ # One invalid number is enough to remove all loads.
+ break
+ else:
+ # No break means all numbers are ok, nothing to do here.
+ continue
+ # Break happened, something is invalid, remove all loads.
+ result_node.pop(which_key)
+
+ return data
+
+
+def _merge_into_suite_info_file(teardown_info_path):
+ """Move setup and teardown data into a singe file, remove old files.
+
+ The caller has to confirm the argument is correct, e.g. ending in
+ "/teardown.info.json".
+
+ :param teardown_info_path: Local filesystem path to teardown info file.
+ :type teardown_info_path: str
+ :returns: Local filesystem path to newly created suite info file.
+ :rtype: str
+ """
+ # Manual right replace: https://stackoverflow.com/a/9943875
+ setup_info_path = u"setup".join(teardown_info_path.rsplit(u"teardown", 1))
+ with open(teardown_info_path, u"rt", encoding="utf-8") as file_in:
+ teardown_data = json.load(file_in)
+ # Transforming setup data into suite data.
+ with open(setup_info_path, u"rt", encoding="utf-8") as file_in:
+ suite_data = json.load(file_in)
+
+ end_time = teardown_data[u"end_time"]
+ suite_data[u"end_time"] = end_time
+ start_float = dateutil.parser.parse(suite_data[u"start_time"]).timestamp()
+ end_float = dateutil.parser.parse(suite_data[u"end_time"]).timestamp()
+ suite_data[u"duration"] = end_float - start_float
+ setup_log = suite_data.pop(u"log")
+ suite_data[u"setup_log"] = setup_log
+ suite_data[u"teardown_log"] = teardown_data[u"log"]
+
+ suite_info_path = u"suite".join(teardown_info_path.rsplit(u"teardown", 1))
+ with open(suite_info_path, u"xt", encoding="utf-8") as file_out:
+ json.dump(suite_data, file_out, indent=1)
+ # We moved everything useful from temporary setup/teardown info files.
+ os.remove(setup_info_path)
+ os.remove(teardown_info_path)
+
+ return suite_info_path
+
+
+def convert_content_to_info(from_raw_path):
+ """Read raw output, perform filtering, add derivatves, write info output.
+
+ Directory path is created if missing.
+
+ When processing teardown, create also suite output using setup info.
+
+ :param from_raw_path: Local filesystem path to read raw JSON data from.
+ :type from_raw_path: str
+ :returns: Local filesystem path to written info JSON file.
+ :rtype: str
+ :raises RuntimeError: If path or content do not match expectations.
+ """
+ to_info_path = _raw_to_info_path(from_raw_path)
+ with open(from_raw_path, u"rt", encoding="utf-8") as file_in:
+ data = json.load(file_in)
+
+ data = _convert_to_info_in_memory(data)
+
+ with open(to_info_path, u"xt", encoding="utf-8") as file_out:
+ json.dump(data, file_out, indent=1)
+ if to_info_path.endswith(u"/teardown.info.json"):
+ to_info_path = _merge_into_suite_info_file(to_info_path)
+ # TODO: Return both paths for validation?
+
+ return to_info_path
diff --git a/resources/libraries/python/model/util.py b/resources/libraries/python/model/util.py
new file mode 100644
index 0000000000..879f1f28b1
--- /dev/null
+++ b/resources/libraries/python/model/util.py
@@ -0,0 +1,69 @@
+# Copyright (c) 2021 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module hosting few utility functions useful when dealing with modelled data.
+
+This is for storing varied utility functions, which are too short and diverse
+to be put into more descriptive modules.
+"""
+
+
+from robot.libraries.BuiltIn import BuiltIn
+
+
+def descend(parent_node, key, default_factory=None):
+ """Return a sub-node, create and insert it when it does not exist.
+
+ Without this function:
+ child_node = parent_node.get(key, dict())
+ parent_node[key] = child_node
+
+ With this function:
+ child_node = descend(parent_node, key)
+
+ New code is shorter and avoids the need to type key and parent_node twice.
+
+ :param parent_node: Reference to inner node of a larger structure
+ we want to descend from.
+ :param key: Key of the maybe existing child node.
+ :param default_factory: If the key does not exist, call this
+ to create a new value to be inserted under the key.
+ None means dict. The other popular option is list.
+ :type parent_node: dict
+ :type key: str
+ :type default_factory: Optional[Callable[[], object]]
+ :returns: The reference to (maybe just created) child node.
+ :rtype: object
+ """
+ if key not in parent_node:
+ factory = dict if default_factory is None else default_factory
+ parent_node[key] = factory()
+ return parent_node[key]
+
+
+def get_export_data():
+ """Return raw_data member of export_json library instance.
+
+ This assumes the data has been initialized already.
+ Return None if Robot is not running.
+
+ :returns: Current library instance's raw data field.
+ :rtype: Optional[dict]
+ :raises AttributeError: If library is not imported yet.
+ """
+ instance = BuiltIn().get_library_instance(
+ u"resources.libraries.python.model.export_json"
+ )
+ if instance is None:
+ return None
+ return instance.raw_data
diff --git a/resources/libraries/python/model/validate.py b/resources/libraries/python/model/validate.py
new file mode 100644
index 0000000000..c441936ac8
--- /dev/null
+++ b/resources/libraries/python/model/validate.py
@@ -0,0 +1,73 @@
+# Copyright (c) 2021 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module for validating JSON instances against schemas.
+
+Short module currently, as we validate only testcase info outputs.
+Structure will probably change when we start validation mode file types.
+"""
+
+import json
+import jsonschema
+
+
+def _get_validator(schema_path):
+ """Contruct validator with format checking enabled.
+
+ Load json schema from disk.
+ Perform validation against meta-schema before returning.
+
+ :param schema_path: Local filesystem path to .json file storing the schema.
+ :type schema_path: str
+ :returns: Instantiated validator class instance.
+ :rtype: jsonschema.validators.Validator
+ :raises RuntimeError: If the schema is not valid according its meta-schema.
+ """
+ with open(schema_path, u"rt", encoding="utf-8") as file_in:
+ schema = json.load(file_in)
+ validator_class = jsonschema.validators.validator_for(schema)
+ validator_class.check_schema(schema)
+ fmt_checker = jsonschema.FormatChecker()
+ validator = validator_class(schema, format_checker=fmt_checker)
+ return validator
+
+
+def get_validators():
+ """Return mapping from file types to validator instances.
+
+ Uses hardcoded file types and paths to schemas on disk.
+
+ :returns: Validators, currently just for tc_info_output.
+ :rtype: Mapping[str, jsonschema.validators.Validator]
+ :raises RuntimeError: If schemas are not readable or not valid.
+ """
+ relative_path = u"docs/model/current/schema/test_case.info.schema.json"
+ # Robot is always started when CWD is CSIT_DIR.
+ validator = _get_validator(relative_path)
+ return dict(tc_info=validator)
+
+
+def validate(file_path, validator):
+ """Load data from disk, use validator to validate it.
+
+ :param file_path: Local filesystem path including the file name to load.
+ :param validator: Validator instance to use for validation.
+ :type file_path: str
+ :type validator: jsonschema.validators.Validator
+ :raises RuntimeError: If schema validation fails.
+ """
+ with open(file_path, u"rt", encoding="utf-8") as file_in:
+ instance = json.load(file_in)
+ error = jsonschema.exceptions.best_match(validator.iter_errors(instance))
+ if error is not None:
+ raise error