aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries/python/model
diff options
context:
space:
mode:
Diffstat (limited to 'resources/libraries/python/model')
-rw-r--r--resources/libraries/python/model/ExportJson.py393
-rw-r--r--resources/libraries/python/model/ExportLog.py148
-rw-r--r--resources/libraries/python/model/ExportResult.py1
-rw-r--r--resources/libraries/python/model/MemDump.py (renamed from resources/libraries/python/model/mem2raw.py)85
-rw-r--r--resources/libraries/python/model/export_json.py236
-rw-r--r--resources/libraries/python/model/raw2info.py294
-rw-r--r--resources/libraries/python/model/util.py8
7 files changed, 464 insertions, 701 deletions
diff --git a/resources/libraries/python/model/ExportJson.py b/resources/libraries/python/model/ExportJson.py
new file mode 100644
index 0000000000..b0e0158295
--- /dev/null
+++ b/resources/libraries/python/model/ExportJson.py
@@ -0,0 +1,393 @@
+# Copyright (c) 2022 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module tracking json in-memory data and saving it to files.
+
+Each test case, suite setup (hierarchical) and teardown has its own file pair.
+
+Validation is performed for output files with available JSON schema.
+Validation is performed in data deserialized from disk,
+as serialization might have introduced subtle errors.
+"""
+
+import datetime
+import os.path
+
+from dateutil.parser import parse
+from robot.api import logger
+from robot.libraries.BuiltIn import BuiltIn
+
+from resources.libraries.python.Constants import Constants
+from resources.libraries.python.jumpavg.AvgStdevStats import AvgStdevStats
+from resources.libraries.python.model.ExportResult import (
+ export_dut_type_and_version, export_tg_type_and_version
+)
+from resources.libraries.python.model.MemDump import write_output
+from resources.libraries.python.model.validate import (
+ get_validators, validate
+)
+
+
+class ExportJson():
+ """Class handling the json data setting and export."""
+
+ ROBOT_LIBRARY_SCOPE = u"GLOBAL"
+
+ def __init__(self):
+ """Declare required fields, cache output dir.
+
+ Also memorize schema validator instances.
+ """
+ self.output_dir = BuiltIn().get_variable_value(u"\\${OUTPUT_DIR}", ".")
+ self.file_path = None
+ self.data = None
+ self.validators = get_validators()
+
+ def _detect_test_type(self):
+ """Return test_type, as inferred from robot test tags.
+
+ :returns: The inferred test type value.
+ :rtype: str
+ :raises RuntimeError: If the test tags does not contain expected values.
+ """
+ tags = self.data[u"tags"]
+ # First 5 options are specific for VPP tests.
+ if u"DEVICETEST" in tags:
+ test_type = u"device"
+ elif u"LDP_NGINX" in tags:
+ test_type = u"vsap"
+ elif u"HOSTSTACK" in tags:
+ test_type = u"hoststack"
+ elif u"GSO_TRUE" in tags or u"GSO_FALSE" in tags:
+ test_type = u"gso"
+ elif u"RECONF" in tags:
+ test_type = u"reconf"
+ # The remaining 3 options could also apply to DPDK and TRex tests.
+ elif u"SOAK" in tags:
+ test_type = u"soak"
+ elif u"NDRPDR" in tags:
+ test_type = u"ndrpdr"
+ elif u"MRR" in tags:
+ test_type = u"mrr"
+ else:
+ raise RuntimeError(f"Unable to infer test type from tags: {tags}")
+ return test_type
+
+ def export_pending_data(self):
+ """Write the accumulated data to disk.
+
+ Create missing directories.
+ Reset both file path and data to avoid writing multiple times.
+
+ Functions which finalize content for given file are calling this,
+ so make sure each test and non-empty suite setup or teardown
+ is calling this as their last keyword.
+
+ If no file path is set, do not write anything,
+ as that is the failsafe behavior when caller from unexpected place.
+ Aso do not write anything when EXPORT_JSON constant is false.
+
+ Regardless of whether data was written, it is cleared.
+ """
+ if not Constants.EXPORT_JSON or not self.file_path:
+ self.data = None
+ self.file_path = None
+ return
+ new_file_path = write_output(self.file_path, self.data)
+ # Data is going to be cleared (as a sign that export succeeded),
+ # so this is the last chance to detect if it was for a test case.
+ is_testcase = u"result" in self.data
+ self.data = None
+ # Validation for output goes here when ready.
+ self.file_path = None
+ if is_testcase:
+ validate(new_file_path, self.validators[u"tc_info"])
+
+ def warn_on_bad_export(self):
+ """If bad state is detected, log a warning and clean up state."""
+ if self.file_path is not None or self.data is not None:
+ logger.warn(f"Previous export not clean, path {self.file_path}")
+ self.data = None
+ self.file_path = None
+
+ def start_suite_setup_export(self):
+ """Set new file path, initialize data for the suite setup.
+
+ This has to be called explicitly at start of suite setup,
+ otherwise Robot likes to postpone initialization
+ until first call by a data-adding keyword.
+
+ File path is set based on suite.
+ """
+ self.warn_on_bad_export()
+ start_time = datetime.datetime.utcnow().strftime(
+ u"%Y-%m-%dT%H:%M:%S.%fZ"
+ )
+ suite_name = BuiltIn().get_variable_value(u"\\${SUITE_NAME}")
+ suite_id = suite_name.lower().replace(u" ", u"_")
+ suite_path_part = os.path.join(*suite_id.split(u"."))
+ output_dir = self.output_dir
+ self.file_path = os.path.join(
+ output_dir, suite_path_part, u"setup.info.json"
+ )
+ self.data = dict()
+ self.data[u"version"] = Constants.MODEL_VERSION
+ self.data[u"start_time"] = start_time
+ self.data[u"suite_name"] = suite_name
+ self.data[u"suite_documentation"] = BuiltIn().get_variable_value(
+ u"\\${SUITE_DOCUMENTATION}"
+ )
+ # "end_time" and "duration" are added on flush.
+ self.data[u"hosts"] = set()
+ self.data[u"telemetry"] = list()
+
+ def start_test_export(self):
+ """Set new file path, initialize data to minimal tree for the test case.
+
+ It is assumed Robot variables DUT_TYPE and DUT_VERSION
+ are already set (in suite setup) to correct values.
+
+ This function has to be called explicitly at the start of test setup,
+ otherwise Robot likes to postpone initialization
+ until first call by a data-adding keyword.
+
+ File path is set based on suite and test.
+ """
+ self.warn_on_bad_export()
+ start_time = datetime.datetime.utcnow().strftime(
+ u"%Y-%m-%dT%H:%M:%S.%fZ"
+ )
+ suite_name = BuiltIn().get_variable_value(u"\\${SUITE_NAME}")
+ suite_id = suite_name.lower().replace(u" ", u"_")
+ suite_path_part = os.path.join(*suite_id.split(u"."))
+ test_name = BuiltIn().get_variable_value(u"\\${TEST_NAME}")
+ self.file_path = os.path.join(
+ self.output_dir, suite_path_part,
+ test_name.lower().replace(u" ", u"_") + u".info.json"
+ )
+ self.data = dict()
+ self.data[u"version"] = Constants.MODEL_VERSION
+ self.data[u"start_time"] = start_time
+ self.data[u"suite_name"] = suite_name
+ self.data[u"test_name"] = test_name
+ test_doc = BuiltIn().get_variable_value(u"\\${TEST_DOCUMENTATION}", u"")
+ self.data[u"test_documentation"] = test_doc
+ # "test_type" is added on flush.
+ # "tags" is detected and added on flush.
+ # "end_time" and "duration" is added on flush.
+ # Robot status and message are added on flush.
+ self.data[u"result"] = dict(type=u"unknown")
+ self.data[u"hosts"] = BuiltIn().get_variable_value(u"\\${hosts}")
+ self.data[u"telemetry"] = list()
+ export_dut_type_and_version()
+ export_tg_type_and_version()
+
+ def start_suite_teardown_export(self):
+ """Set new file path, initialize data for the suite teardown.
+
+ This has to be called explicitly at start of suite teardown,
+ otherwise Robot likes to postpone initialization
+ until first call by a data-adding keyword.
+
+ File path is set based on suite.
+ """
+ self.warn_on_bad_export()
+ start_time = datetime.datetime.utcnow().strftime(
+ u"%Y-%m-%dT%H:%M:%S.%fZ"
+ )
+ suite_name = BuiltIn().get_variable_value(u"\\${SUITE_NAME}")
+ suite_id = suite_name.lower().replace(u" ", u"_")
+ suite_path_part = os.path.join(*suite_id.split(u"."))
+ self.file_path = os.path.join(
+ self.output_dir, suite_path_part, u"teardown.info.json"
+ )
+ self.data = dict()
+ self.data[u"version"] = Constants.MODEL_VERSION
+ self.data[u"start_time"] = start_time
+ self.data[u"suite_name"] = suite_name
+ # "end_time" and "duration" is added on flush.
+ self.data[u"hosts"] = BuiltIn().get_variable_value(u"\\${hosts}")
+ self.data[u"telemetry"] = list()
+
+ def finalize_suite_setup_export(self):
+ """Add the missing fields to data. Do not write yet.
+
+ Should be run at the end of suite setup.
+ The write is done at next start (or at the end of global teardown).
+ """
+ end_time = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
+ self.data[u"hosts"] = BuiltIn().get_variable_value(u"\\${hosts}")
+ self.data[u"end_time"] = end_time
+ self.export_pending_data()
+
+ def finalize_test_export(self):
+ """Add the missing fields to data. Do not write yet.
+
+ Should be at the end of test teardown, as the implementation
+ reads various Robot variables, some of them only available at teardown.
+
+ The write is done at next start (or at the end of global teardown).
+ """
+ end_time = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
+ message = BuiltIn().get_variable_value(u"\\${TEST_MESSAGE}")
+ test_tags = BuiltIn().get_variable_value(u"\\${TEST_TAGS}")
+ self.data[u"end_time"] = end_time
+ start_float = parse(self.data[u"start_time"]).timestamp()
+ end_float = parse(self.data[u"end_time"]).timestamp()
+ self.data[u"duration"] = end_float - start_float
+ self.data[u"tags"] = list(test_tags)
+ self.data[u"message"] = message
+ self.process_passed()
+ self.process_test_name()
+ self.process_results()
+ self.export_pending_data()
+
+ def finalize_suite_teardown_export(self):
+ """Add the missing fields to data. Do not write yet.
+
+ Should be run at the end of suite teardown
+ (but before the explicit write in the global suite teardown).
+ The write is done at next start (or explicitly for global teardown).
+ """
+ end_time = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
+ self.data[u"end_time"] = end_time
+ self.export_pending_data()
+
+ def process_test_name(self):
+ """Replace raw test name with short and long test name and set
+ test_type.
+
+ Perform in-place edits on the data dictionary.
+ Remove raw suite_name and test_name, they are not published.
+ Return early if the data is not for test case.
+ Insert test ID and long and short test name into the data.
+ Besides suite_name and test_name, also test tags are read.
+
+ Short test name is basically a suite tag, but with NIC driver prefix,
+ if the NIC driver used is not the default one (drv_vfio_pci for VPP
+ tests).
+
+ Long test name has the following form:
+ {nic_short_name}-{frame_size}-{threads_and_cores}-{suite_part}
+ Lookup in test tags is needed to get the threads value.
+ The threads_and_cores part may be empty, e.g. for TRex tests.
+
+ Test ID has form {suite_name}.{test_name} where the two names come from
+ Robot variables, converted to lower case and spaces replaces by
+ undescores.
+
+ Test type is set in an internal function.
+
+ :raises RuntimeError: If the data does not contain expected values.
+ """
+ suite_part = self.data.pop(u"suite_name").lower().replace(u" ", u"_")
+ if u"test_name" not in self.data:
+ # There will be no test_id, provide suite_id instead.
+ self.data[u"suite_id"] = suite_part
+ return
+ test_part = self.data.pop(u"test_name").lower().replace(u" ", u"_")
+ self.data[u"test_id"] = f"{suite_part}.{test_part}"
+ tags = self.data[u"tags"]
+ # Test name does not contain thread count.
+ subparts = test_part.split(u"c-", 1)
+ if len(subparts) < 2 or subparts[0][-2:-1] != u"-":
+ # Physical core count not detected, assume it is a TRex test.
+ if u"--" not in test_part:
+ raise RuntimeError(f"Cores not found for {subparts}")
+ short_name = test_part.split(u"--", 1)[1]
+ else:
+ short_name = subparts[1]
+ # Add threads to test_part.
+ core_part = subparts[0][-1] + u"c"
+ for tag in tags:
+ tag = tag.lower()
+ if len(tag) == 4 and core_part == tag[2:] and tag[1] == u"t":
+ test_part = test_part.replace(f"-{core_part}-", f"-{tag}-")
+ break
+ else:
+ raise RuntimeError(
+ f"Threads not found for {test_part} tags {tags}"
+ )
+ # For long name we need NIC model, which is only in suite name.
+ last_suite_part = suite_part.split(u".")[-1]
+ # Short name happens to be the suffix we want to ignore.
+ prefix_part = last_suite_part.split(short_name)[0]
+ # Also remove the trailing dash.
+ prefix_part = prefix_part[:-1]
+ # Throw away possible link prefix such as "1n1l-".
+ nic_code = prefix_part.split(u"-", 1)[-1]
+ nic_short = Constants.NIC_CODE_TO_SHORT_NAME[nic_code]
+ long_name = f"{nic_short}-{test_part}"
+ # Set test type.
+ test_type = self._detect_test_type()
+ self.data[u"test_type"] = test_type
+ # Remove trailing test type from names (if present).
+ short_name = short_name.split(f"-{test_type}")[0]
+ long_name = long_name.split(f"-{test_type}")[0]
+ # Store names.
+ self.data[u"test_name_short"] = short_name
+ self.data[u"test_name_long"] = long_name
+
+ def process_passed(self):
+ """Process the test status information as boolean.
+
+ Boolean is used to make post processing more efficient.
+ In case the test status is PASS, we will truncate the test message.
+ """
+ status = BuiltIn().get_variable_value(u"\\${TEST_STATUS}")
+ if status is not None:
+ self.data[u"passed"] = (status == u"PASS")
+ if self.data[u"passed"]:
+ # Also truncate success test messages.
+ self.data[u"message"] = u""
+
+ def process_results(self):
+ """Process measured results.
+
+ Results are used to avoid future post processing, making it more
+ efficient to consume.
+ """
+ if u"result" not in self.data:
+ return
+ result_node = self.data[u"result"]
+ result_type = result_node[u"type"]
+ if result_type == u"unknown":
+ # Device or something else not supported.
+ return
+
+ # Compute avg and stdev for mrr.
+ if result_type == u"mrr":
+ rate_node = result_node[u"receive_rate"][u"rate"]
+ stats = AvgStdevStats.for_runs(rate_node[u"values"])
+ rate_node[u"avg"] = stats.avg
+ rate_node[u"stdev"] = stats.stdev
+ return
+
+ # Multiple processing steps for ndrpdr.
+ if result_type != u"ndrpdr":
+ return
+ # Filter out invalid latencies.
+ for which_key in (u"latency_forward", u"latency_reverse"):
+ if which_key not in result_node:
+ # Probably just an unidir test.
+ continue
+ for load in (u"pdr_0", u"pdr_10", u"pdr_50", u"pdr_90"):
+ if result_node[which_key][load][u"max"] <= 0:
+ # One invalid number is enough to remove all loads.
+ break
+ else:
+ # No break means all numbers are ok, nothing to do here.
+ continue
+ # Break happened, something is invalid, remove all loads.
+ result_node.pop(which_key)
+ return
diff --git a/resources/libraries/python/model/ExportLog.py b/resources/libraries/python/model/ExportLog.py
deleted file mode 100644
index e02eef63c5..0000000000
--- a/resources/libraries/python/model/ExportLog.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module with keywords that publish metric and other log events.
-"""
-
-import datetime
-
-from resources.libraries.python.model.util import get_export_data
-
-
-def export_ssh_command(host, port, command):
- """Add a log item about SSH command execution starting.
-
- The log item is present only in raw output.
- Result arrives in a separate log item.
- Log level is always DEBUG.
-
- The command is stored as "data" (not "msg") as in some cases
- the command can be too long to act as a message.
-
- The host is added to the info set of hosts.
-
- :param host: Node "host" attribute, usually its IPv4 address.
- :param port: SSH port number to use when connecting to the host.
- :param command: Serialized bash command to execute.
- :type host: str
- :type port: int
- :type command: str
- """
- timestamp = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
- data = get_export_data()
- ssh_record = dict(
- source_type=u"host,port",
- source_id=dict(host=host, port=port),
- msg_type=u"ssh_command",
- log_level=u"DEBUG",
- timestamp=timestamp,
- msg="",
- data=str(command),
- )
- data[u"hosts"].add(host)
- data[u"log"].append(ssh_record)
-
-
-def export_ssh_result(host, port, code, stdout, stderr, duration):
- """Add a log item about ssh execution result.
-
- Only for raw output log.
-
- There is no easy way to pair with the corresponding command,
- but usually there is only one SSH session for given host and port.
- The duration value may give a hint if that is not the case.
-
- Message is empty, data has fields "rc", "stdout", "stderr" and "duration".
- Log level is always DEBUG.
-
- The host is NOT added to the info set of hosts, as each result
- comes after a command.
-
- TODO: Do not require duration, find preceding ssh command in log.
- Reason: Pylint complains about too many arguments.
- Alternative: Define type for SSH endopoint (and use that instead host+port).
-
- :param host: Node "host" attribute, usually its IPv4 address.
- :param port: SSH port number to use when connecting to the host.
- :param code: Bash return code, e.g. 0 for success.
- :param stdout: Captured standard output of the command execution.
- :param stderr: Captured error output of the command execution.
- :param duration: How long has the command been executing, in seconds.
- :type host: str
- :type port: int
- :type code: int
- :type stdout: str
- :type stderr: str
- :type duration: float
- """
- timestamp = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
- data = get_export_data()
- ssh_record = dict(
- source_type=u"host,port",
- source_id=dict(host=host, port=port),
- msg_type=u"ssh_result",
- log_level=u"DEBUG",
- timestamp=timestamp,
- msg=u"",
- data=dict(
- rc=int(code),
- stdout=str(stdout),
- stderr=str(stderr),
- duration=float(duration),
- ),
- )
- data[u"log"].append(ssh_record)
-
-
-def export_ssh_timeout(host, port, stdout, stderr, duration):
- """Add a log item about ssh execution timing out.
-
- Only for debug log.
-
- There is no easy way to pair with the corresponding command,
- but usually there is only one SSH session for given host and port.
-
- Message is empty, data has fields "stdout", "stderr" and "duration".
- The duration value may give a hint if that is not the case.
- Log level is always DEBUG.
-
- The host is NOT added to the info set of hosts, as each timeout
- comes after a command.
-
- :param host: Node "host" attribute, usually its IPv4 address.
- :param port: SSH port number to use when connecting to the host.
- :param stdout: Captured standard output of the command execution so far.
- :param stderr: Captured error output of the command execution so far.
- :param duration: How long has the command been executing, in seconds.
- :type host: str
- :type port: int
- :type stdout: str
- :type stderr: str
- :type duration: float
- """
- timestamp = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
- data = get_export_data()
- ssh_record = dict(
- source_type=u"host,port",
- source_id=dict(host=host, port=port),
- msg_type=u"ssh_timeout",
- log_level=u"DEBUG",
- timestamp=timestamp,
- msg=u"",
- data=dict(
- stdout=str(stdout),
- stderr=str(stderr),
- duration=float(duration),
- ),
- )
- data[u"log"].append(ssh_record)
diff --git a/resources/libraries/python/model/ExportResult.py b/resources/libraries/python/model/ExportResult.py
index 16c6b89fb3..dbe2914565 100644
--- a/resources/libraries/python/model/ExportResult.py
+++ b/resources/libraries/python/model/ExportResult.py
@@ -114,7 +114,6 @@ def append_mrr_value(mrr_value, unit):
rate_node[u"unit"] = str(unit)
values_list = descend(rate_node, u"values", list)
values_list.append(float(mrr_value))
- # TODO: Fill in the bandwidth part for pps?
def export_search_bound(text, value, unit, bandwidth=None):
diff --git a/resources/libraries/python/model/mem2raw.py b/resources/libraries/python/model/MemDump.py
index 543ee935e2..bf8835244b 100644
--- a/resources/libraries/python/model/mem2raw.py
+++ b/resources/libraries/python/model/MemDump.py
@@ -11,17 +11,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Module for converting in-memory data into raw JSON output.
+"""Module for converting in-memory data into JSON output.
-CSIT and VPP PAPI are using custom data types
-that are not directly serializable into JSON.
+CSIT and VPP PAPI are using custom data types that are not directly serializable
+into JSON.
-Thus, before writing the raw outpt onto disk,
-the data is recursively converted to equivalent serializable types,
-in extreme cases replaced by string representation.
+Thus, before writing the output onto disk, the data is recursively converted to
+equivalent serializable types, in extreme cases replaced by string
+representation.
-Validation is outside the scope of this module,
-as it should use the JSON data read from disk.
+Validation is outside the scope of this module, as it should use the JSON data
+read from disk.
"""
import json
@@ -29,6 +29,7 @@ import os
from collections.abc import Iterable, Mapping, Set
from enum import IntFlag
+from dateutil.parser import parse
def _pre_serialize_recursive(data):
@@ -107,7 +108,7 @@ def _pre_serialize_root(data):
to make it more human friendly.
We are moving "version" to the top,
followed by start time and end time.
- and various long fields (such as "log") to the bottom.
+ and various long fields to the bottom.
Some edits are done in-place, do not trust the argument value after calling.
@@ -122,24 +123,72 @@ def _pre_serialize_root(data):
if not isinstance(data, dict):
raise RuntimeError(f"Root data object needs to be a dict: {data!r}")
data = _pre_serialize_recursive(data)
- log = data.pop(u"log")
new_data = dict(version=data.pop(u"version"))
new_data[u"start_time"] = data.pop(u"start_time")
new_data[u"end_time"] = data.pop(u"end_time")
new_data.update(data)
- new_data[u"log"] = log
return new_data
-def write_raw_output(raw_file_path, raw_data):
+def _merge_into_suite_info_file(teardown_path):
+ """Move setup and teardown data into a singe file, remove old files.
+
+ The caller has to confirm the argument is correct, e.g. ending in
+ "/teardown.info.json".
+
+ :param teardown_path: Local filesystem path to teardown file.
+ :type teardown_path: str
+ :returns: Local filesystem path to newly created suite file.
+ :rtype: str
+ """
+ # Manual right replace: https://stackoverflow.com/a/9943875
+ setup_path = u"setup".join(teardown_path.rsplit(u"teardown", 1))
+ with open(teardown_path, u"rt", encoding="utf-8") as file_in:
+ teardown_data = json.load(file_in)
+ # Transforming setup data into suite data.
+ with open(setup_path, u"rt", encoding="utf-8") as file_in:
+ suite_data = json.load(file_in)
+
+ end_time = teardown_data[u"end_time"]
+ suite_data[u"end_time"] = end_time
+ start_float = parse(suite_data[u"start_time"]).timestamp()
+ end_float = parse(suite_data[u"end_time"]).timestamp()
+ suite_data[u"duration"] = end_float - start_float
+ setup_telemetry = suite_data.pop(u"telemetry")
+ suite_data[u"setup_telemetry"] = setup_telemetry
+ suite_data[u"teardown_telemetry"] = teardown_data[u"telemetry"]
+
+ suite_path = u"suite".join(teardown_path.rsplit(u"teardown", 1))
+ with open(suite_path, u"wt", encoding="utf-8") as file_out:
+ json.dump(suite_data, file_out, indent=1)
+ # We moved everything useful from temporary setup/teardown info files.
+ os.remove(setup_path)
+ os.remove(teardown_path)
+
+ return suite_path
+
+
+def write_output(file_path, data):
"""Prepare data for serialization and dump into a file.
Ancestor directories are created if needed.
- :param to_raw_path: Local filesystem path, including the file name.
- :type to_raw_path: str
+ :param file_path: Local filesystem path, including the file name.
+ :param data: Root data to make serializable, dictized when applicable.
+ :type file_path: str
+ :type data: dict
"""
- raw_data = _pre_serialize_root(raw_data)
- os.makedirs(os.path.dirname(raw_file_path), exist_ok=True)
- with open(raw_file_path, u"wt", encoding="utf-8") as file_out:
- json.dump(raw_data, file_out, indent=1)
+ data = _pre_serialize_root(data)
+
+ # Lets move Telemetry to the end.
+ telemetry = data.pop(u"telemetry")
+ data[u"telemetry"] = telemetry
+
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
+ with open(file_path, u"wt", encoding="utf-8") as file_out:
+ json.dump(data, file_out, indent=1)
+
+ if file_path.endswith(u"/teardown.info.json"):
+ file_path = _merge_into_suite_info_file(file_path)
+
+ return file_path
diff --git a/resources/libraries/python/model/export_json.py b/resources/libraries/python/model/export_json.py
deleted file mode 100644
index 840c49fa70..0000000000
--- a/resources/libraries/python/model/export_json.py
+++ /dev/null
@@ -1,236 +0,0 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module tracking json in-memory data and saving it to files.
-
-The current implementation tracks data for raw output,
-and info output is created from raw output on disk (see raw2info module).
-Raw file contains all log items but no derived quantities,
-info file contains only important log items but also derived quantities.
-The overlap between two files is big.
-
-Each test case, suite setup (hierarchical) and teardown has its own file pair.
-
-Validation is performed for output files with available JSON schema.
-Validation is performed in data deserialized from disk,
-as serialization might have introduced subtle errors.
-"""
-
-import datetime
-import os.path
-
-from robot.api import logger
-from robot.libraries.BuiltIn import BuiltIn
-
-from resources.libraries.python.Constants import Constants
-from resources.libraries.python.model.ExportResult import (
- export_dut_type_and_version, export_tg_type_and_version
-)
-from resources.libraries.python.model.mem2raw import write_raw_output
-from resources.libraries.python.model.raw2info import convert_content_to_info
-from resources.libraries.python.model.validate import (get_validators, validate)
-
-
-class export_json():
- """Class handling the json data setting and export."""
-
- ROBOT_LIBRARY_SCOPE = u"GLOBAL"
-
- def __init__(self):
- """Declare required fields, cache output dir.
-
- Also memorize schema validator instances.
- """
- self.output_dir = BuiltIn().get_variable_value(u"\\${OUTPUT_DIR}", ".")
- self.raw_file_path = None
- self.raw_data = None
- self.validators = get_validators()
-
- def export_pending_data(self):
- """Write the accumulated data to disk.
-
- Create missing directories.
- Reset both file path and data to avoid writing multiple times.
-
- Functions which finalize content for given file are calling this,
- so make sure each test and non-empty suite setup or teardown
- is calling this as their last keyword.
-
- If no file path is set, do not write anything,
- as that is the failsafe behavior when caller from unexpected place.
- Aso do not write anything when EXPORT_JSON constant is false.
-
- Regardless of whether data was written, it is cleared.
- """
- if not Constants.EXPORT_JSON or not self.raw_file_path:
- self.raw_data = None
- self.raw_file_path = None
- return
- write_raw_output(self.raw_file_path, self.raw_data)
- # Raw data is going to be cleared (as a sign that raw export succeeded),
- # so this is the last chance to detect if it was for a test case.
- is_testcase = u"result" in self.raw_data
- self.raw_data = None
- # Validation for raw output goes here when ready.
- info_file_path = convert_content_to_info(self.raw_file_path)
- self.raw_file_path = None
- # If "result" is missing from info content,
- # it could be a bug in conversion from raw test case content,
- # so instead of that we use the flag detected earlier.
- if is_testcase:
- validate(info_file_path, self.validators[u"tc_info"])
-
- def warn_on_bad_export(self):
- """If bad state is detected, log a warning and clean up state."""
- if self.raw_file_path is not None or self.raw_data is not None:
- logger.warn(f"Previous export not clean, path {self.raw_file_path}")
- self.raw_data = None
- self.raw_file_path = None
-
- def start_suite_setup_export(self):
- """Set new file path, initialize data for the suite setup.
-
- This has to be called explicitly at start of suite setup,
- otherwise Robot likes to postpone initialization
- until first call by a data-adding keyword.
-
- File path is set based on suite.
- """
- self.warn_on_bad_export()
- start_time = datetime.datetime.utcnow().strftime(
- u"%Y-%m-%dT%H:%M:%S.%fZ"
- )
- suite_name = BuiltIn().get_variable_value(u"\\${SUITE_NAME}")
- suite_id = suite_name.lower().replace(u" ", u"_")
- suite_path_part = os.path.join(*suite_id.split(u"."))
- output_dir = self.output_dir
- self.raw_file_path = os.path.join(
- output_dir, suite_path_part, u"setup.raw.json"
- )
- self.raw_data = dict()
- self.raw_data[u"version"] = Constants.MODEL_VERSION
- self.raw_data[u"start_time"] = start_time
- self.raw_data[u"suite_name"] = suite_name
- self.raw_data[u"suite_documentation"] = BuiltIn().get_variable_value(
- u"\\${SUITE_DOCUMENTATION}"
- )
- # "end_time" and "duration" is added on flush.
- self.raw_data[u"hosts"] = set()
- self.raw_data[u"log"] = list()
-
- def start_test_export(self):
- """Set new file path, initialize data to minimal tree for the test case.
-
- It is assumed Robot variables DUT_TYPE and DUT_VERSION
- are already set (in suite setup) to correct values.
-
- This function has to be called explicitly at the start of test setup,
- otherwise Robot likes to postpone initialization
- until first call by a data-adding keyword.
-
- File path is set based on suite and test.
- """
- self.warn_on_bad_export()
- start_time = datetime.datetime.utcnow().strftime(
- u"%Y-%m-%dT%H:%M:%S.%fZ"
- )
- suite_name = BuiltIn().get_variable_value(u"\\${SUITE_NAME}")
- suite_id = suite_name.lower().replace(u" ", u"_")
- suite_path_part = os.path.join(*suite_id.split(u"."))
- test_name = BuiltIn().get_variable_value(u"\\${TEST_NAME}")
- self.raw_file_path = os.path.join(
- self.output_dir, suite_path_part,
- test_name.lower().replace(u" ", u"_") + u".raw.json"
- )
- self.raw_data = dict()
- self.raw_data[u"version"] = Constants.MODEL_VERSION
- self.raw_data[u"start_time"] = start_time
- self.raw_data[u"suite_name"] = suite_name
- self.raw_data[u"test_name"] = test_name
- test_doc = BuiltIn().get_variable_value(u"\\${TEST_DOCUMENTATION}", u"")
- self.raw_data[u"test_documentation"] = test_doc
- # "test_type" is added when converting to info.
- # "tags" is detected and added on flush.
- # "end_time" and "duration" is added on flush.
- # Robot status and message are added on flush.
- self.raw_data[u"result"] = dict(type=u"unknown")
- self.raw_data[u"hosts"] = set()
- self.raw_data[u"log"] = list()
- export_dut_type_and_version()
- export_tg_type_and_version()
-
- def start_suite_teardown_export(self):
- """Set new file path, initialize data for the suite teardown.
-
- This has to be called explicitly at start of suite teardown,
- otherwise Robot likes to postpone initialization
- until first call by a data-adding keyword.
-
- File path is set based on suite.
- """
- self.warn_on_bad_export()
- start_time = datetime.datetime.utcnow().strftime(
- u"%Y-%m-%dT%H:%M:%S.%fZ"
- )
- suite_name = BuiltIn().get_variable_value(u"\\${SUITE_NAME}")
- suite_id = suite_name.lower().replace(u" ", u"_")
- suite_path_part = os.path.join(*suite_id.split(u"."))
- self.raw_file_path = os.path.join(
- self.output_dir, suite_path_part, u"teardown.raw.json"
- )
- self.raw_data = dict()
- self.raw_data[u"version"] = Constants.MODEL_VERSION
- self.raw_data[u"start_time"] = start_time
- self.raw_data[u"suite_name"] = suite_name
- # "end_time" and "duration" is added on flush.
- self.raw_data[u"hosts"] = set()
- self.raw_data[u"log"] = list()
-
- def finalize_suite_setup_export(self):
- """Add the missing fields to data. Do not write yet.
-
- Should be run at the end of suite setup.
- The write is done at next start (or at the end of global teardown).
- """
- end_time = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
- self.raw_data[u"end_time"] = end_time
- self.export_pending_data()
-
- def finalize_test_export(self):
- """Add the missing fields to data. Do not write yet.
-
- Should be at the end of test teardown, as the implementation
- reads various Robot variables, some of them only available at teardown.
-
- The write is done at next start (or at the end of global teardown).
- """
- end_time = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
- message = BuiltIn().get_variable_value(u"\\${TEST_MESSAGE}")
- status = BuiltIn().get_variable_value(u"\\${TEST_STATUS}")
- test_tags = BuiltIn().get_variable_value(u"\\${TEST_TAGS}")
- self.raw_data[u"end_time"] = end_time
- self.raw_data[u"tags"] = list(test_tags)
- self.raw_data[u"status"] = status
- self.raw_data[u"message"] = message
- self.export_pending_data()
-
- def finalize_suite_teardown_export(self):
- """Add the missing fields to data. Do not write yet.
-
- Should be run at the end of suite teardown
- (but before the explicit write in the global suite teardown).
- The write is done at next start (or explicitly for global teardown).
- """
- end_time = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
- self.raw_data[u"end_time"] = end_time
- self.export_pending_data()
diff --git a/resources/libraries/python/model/raw2info.py b/resources/libraries/python/model/raw2info.py
deleted file mode 100644
index bd7d0e3cf1..0000000000
--- a/resources/libraries/python/model/raw2info.py
+++ /dev/null
@@ -1,294 +0,0 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module facilitating conversion from raw outputs into info outputs."""
-
-import copy
-import json
-import os
-
-import dateutil.parser
-
-from resources.libraries.python.Constants import Constants
-from resources.libraries.python.jumpavg.AvgStdevStats import AvgStdevStats
-
-
-def _raw_to_info_path(raw_path):
- """Compute path for info output corresponding to given raw output.
-
- :param raw_path: Local filesystem path to read raw JSON data from.
- :type raw_path: str
- :returns: Local filesystem path to write info JSON content to.
- :rtype: str
- :raises RuntimeError: If the input path does not meet all expectations.
- """
- raw_extension = u".raw.json"
- tmp_parts = raw_path.split(raw_extension)
- if len(tmp_parts) != 2 or tmp_parts[1] != u"":
- raise RuntimeError(f"Not good extension {raw_extension}: {raw_path}")
- info_path = tmp_parts[0] + u".info.json"
- return info_path
-
-
-def _process_test_name(data):
- """Replace raw test name with short and long test name and set test_type.
-
- Perform in-place edits on the data dictionary.
- Remove raw suite_name and test_name, they are not part of info schema.
- Return early if the data is not for test case.
- Inserttest ID and long and short test name into the data.
- Besides suite_name and test_name, also test tags are read.
-
- Short test name is basically a suite tag, but with NIC driver prefix,
- if the NIC driver used is not the default one (drv_vfio_pci for VPP tests).
-
- Long test name has the following form:
- {nic_short_name}-{frame_size}-{threads_and_cores}-{suite_part}
- Lookup in test tags is needed to get the threads value.
- The threads_and_cores part may be empty, e.g. for TRex tests.
-
- Test ID has form {suite_name}.{test_name} where the two names come from
- Robot variables, converted to lower case and spaces replaces by undescores.
-
- Test type is set in an internal function.
-
- :param data: Raw data, perhaps some fields converted into info data already.
- :type data: dict
- :raises RuntimeError: If the raw data does not contain expected values.
- """
- suite_part = data.pop(u"suite_name").lower().replace(u" ", u"_")
- if u"test_name" not in data:
- # There will be no test_id, provide suite_id instead.
- data[u"suite_id"] = suite_part
- return
- test_part = data.pop(u"test_name").lower().replace(u" ", u"_")
- data[u"test_id"] = f"{suite_part}.{test_part}"
- tags = data[u"tags"]
- # Test name does not contain thread count.
- subparts = test_part.split(u"c-", 1)
- if len(subparts) < 2 or subparts[0][-2:-1] != u"-":
- # Physical core count not detected, assume it is a TRex test.
- if u"--" not in test_part:
- raise RuntimeError(f"Cores not found for {subparts}")
- short_name = test_part.split(u"--", 1)[1]
- else:
- short_name = subparts[1]
- # Add threads to test_part.
- core_part = subparts[0][-1] + u"c"
- for tag in tags:
- tag = tag.lower()
- if len(tag) == 4 and core_part == tag[2:] and tag[1] == u"t":
- test_part = test_part.replace(f"-{core_part}-", f"-{tag}-")
- break
- else:
- raise RuntimeError(f"Threads not found for {test_part} tags {tags}")
- # For long name we need NIC model, which is only in suite name.
- last_suite_part = suite_part.split(u".")[-1]
- # Short name happens to be the suffix we want to ignore.
- prefix_part = last_suite_part.split(short_name)[0]
- # Also remove the trailing dash.
- prefix_part = prefix_part[:-1]
- # Throw away possible link prefix such as "1n1l-".
- nic_code = prefix_part.split(u"-", 1)[-1]
- nic_short = Constants.NIC_CODE_TO_SHORT_NAME[nic_code]
- long_name = f"{nic_short}-{test_part}"
- # Set test type.
- test_type = _detect_test_type(data)
- data[u"test_type"] = test_type
- # Remove trailing test type from names (if present).
- short_name = short_name.split(f"-{test_type}")[0]
- long_name = long_name.split(f"-{test_type}")[0]
- # Store names.
- data[u"test_name_short"] = short_name
- data[u"test_name_long"] = long_name
-
-
-def _detect_test_type(data):
- """Return test_type, as inferred from robot test tags.
-
- :param data: Raw data, perhaps some fields converted into info data already.
- :type data: dict
- :returns: The inferred test type value.
- :rtype: str
- :raises RuntimeError: If the test tags does not contain expected values.
- """
- tags = data[u"tags"]
- # First 5 options are specific for VPP tests.
- if u"DEVICETEST" in tags:
- test_type = u"device"
- elif u"LDP_NGINX" in tags:
- test_type = u"vsap"
- elif u"HOSTSTACK" in tags:
- test_type = u"hoststack"
- elif u"GSO_TRUE" in tags or u"GSO_FALSE" in tags:
- test_type = u"gso"
- elif u"RECONF" in tags:
- test_type = u"reconf"
- # The remaining 3 options could also apply to DPDK and TRex tests.
- elif u"SOAK" in tags:
- test_type = u"soak"
- elif u"NDRPDR" in tags:
- test_type = u"ndrpdr"
- elif u"MRR" in tags:
- test_type = u"mrr"
- else:
- raise RuntimeError(f"Unable to infer test type from tags: {tags}")
- return test_type
-
-
-def _convert_to_info_in_memory(data):
- """Perform all changes needed for processing of data, return new data.
-
- Data is assumed to be valid for raw schema, so no exceptions are expected.
- The original argument object is not edited,
- a new copy is created for edits and returned,
- because there is no easy way to sort keys in-place.
-
- :param data: The whole composite object to filter and enhance.
- :type data: dict
- :returns: New object with the edited content.
- :rtype: dict
- """
- data = copy.deepcopy(data)
-
- # Drop any SSH log items.
- data[u"log"] = list()
-
- # Duration is computed for every file.
- start_float = dateutil.parser.parse(data[u"start_time"]).timestamp()
- end_float = dateutil.parser.parse(data[u"end_time"]).timestamp()
- data[u"duration"] = end_float - start_float
-
- # Reorder impotant fields to the top.
- sorted_data = dict(version=data.pop(u"version"))
- sorted_data[u"duration"] = data.pop(u"duration")
- sorted_data[u"start_time"] = data.pop(u"start_time")
- sorted_data[u"end_time"] = data.pop(u"end_time")
- sorted_data.update(data)
- data = sorted_data
- # TODO: Do we care about the order of subsequently added fields?
-
- # Convert status into a boolean.
- status = data.pop(u"status", None)
- if status is not None:
- data[u"passed"] = (status == u"PASS")
- if data[u"passed"]:
- # Also truncate success test messages.
- data[u"message"] = u""
-
- # Replace raw names with processed ones, set test_id and test_type.
- _process_test_name(data)
-
- # The rest is only relevant for test case outputs.
- if u"result" not in data:
- return data
- result_node = data[u"result"]
- result_type = result_node[u"type"]
- if result_type == u"unknown":
- # Device or something else not supported.
- return data
-
- # More processing depending on result type. TODO: Separate functions?
-
- # Compute avg and stdev for mrr.
- if result_type == u"mrr":
- rate_node = result_node[u"receive_rate"][u"rate"]
- stats = AvgStdevStats.for_runs(rate_node[u"values"])
- rate_node[u"avg"] = stats.avg
- rate_node[u"stdev"] = stats.stdev
-
- # Multiple processing steps for ndrpdr.
- if result_type != u"ndrpdr":
- return data
- # Filter out invalid latencies.
- for which_key in (u"latency_forward", u"latency_reverse"):
- if which_key not in result_node:
- # Probably just an unidir test.
- continue
- for load in (u"pdr_0", u"pdr_10", u"pdr_50", u"pdr_90"):
- if result_node[which_key][load][u"max"] <= 0:
- # One invalid number is enough to remove all loads.
- break
- else:
- # No break means all numbers are ok, nothing to do here.
- continue
- # Break happened, something is invalid, remove all loads.
- result_node.pop(which_key)
-
- return data
-
-
-def _merge_into_suite_info_file(teardown_info_path):
- """Move setup and teardown data into a singe file, remove old files.
-
- The caller has to confirm the argument is correct, e.g. ending in
- "/teardown.info.json".
-
- :param teardown_info_path: Local filesystem path to teardown info file.
- :type teardown_info_path: str
- :returns: Local filesystem path to newly created suite info file.
- :rtype: str
- """
- # Manual right replace: https://stackoverflow.com/a/9943875
- setup_info_path = u"setup".join(teardown_info_path.rsplit(u"teardown", 1))
- with open(teardown_info_path, u"rt", encoding="utf-8") as file_in:
- teardown_data = json.load(file_in)
- # Transforming setup data into suite data.
- with open(setup_info_path, u"rt", encoding="utf-8") as file_in:
- suite_data = json.load(file_in)
-
- end_time = teardown_data[u"end_time"]
- suite_data[u"end_time"] = end_time
- start_float = dateutil.parser.parse(suite_data[u"start_time"]).timestamp()
- end_float = dateutil.parser.parse(suite_data[u"end_time"]).timestamp()
- suite_data[u"duration"] = end_float - start_float
- setup_log = suite_data.pop(u"log")
- suite_data[u"setup_log"] = setup_log
- suite_data[u"teardown_log"] = teardown_data[u"log"]
-
- suite_info_path = u"suite".join(teardown_info_path.rsplit(u"teardown", 1))
- with open(suite_info_path, u"wt", encoding="utf-8") as file_out:
- json.dump(suite_data, file_out, indent=1)
- # We moved everything useful from temporary setup/teardown info files.
- os.remove(setup_info_path)
- os.remove(teardown_info_path)
-
- return suite_info_path
-
-
-def convert_content_to_info(from_raw_path):
- """Read raw output, perform filtering, add derivatves, write info output.
-
- Directory path is created if missing.
-
- When processing teardown, create also suite output using setup info.
-
- :param from_raw_path: Local filesystem path to read raw JSON data from.
- :type from_raw_path: str
- :returns: Local filesystem path to written info JSON file.
- :rtype: str
- :raises RuntimeError: If path or content do not match expectations.
- """
- to_info_path = _raw_to_info_path(from_raw_path)
- with open(from_raw_path, u"rt", encoding="utf-8") as file_in:
- data = json.load(file_in)
-
- data = _convert_to_info_in_memory(data)
-
- with open(to_info_path, u"wt", encoding="utf-8") as file_out:
- json.dump(data, file_out, indent=1)
- if to_info_path.endswith(u"/teardown.info.json"):
- to_info_path = _merge_into_suite_info_file(to_info_path)
- # TODO: Return both paths for validation?
-
- return to_info_path
diff --git a/resources/libraries/python/model/util.py b/resources/libraries/python/model/util.py
index 879f1f28b1..ff5042fa6c 100644
--- a/resources/libraries/python/model/util.py
+++ b/resources/libraries/python/model/util.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2022 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -52,7 +52,7 @@ def descend(parent_node, key, default_factory=None):
def get_export_data():
- """Return raw_data member of export_json library instance.
+ """Return data member of ExportJson library instance.
This assumes the data has been initialized already.
Return None if Robot is not running.
@@ -62,8 +62,8 @@ def get_export_data():
:raises AttributeError: If library is not imported yet.
"""
instance = BuiltIn().get_library_instance(
- u"resources.libraries.python.model.export_json"
+ u"resources.libraries.python.model.ExportJson"
)
if instance is None:
return None
- return instance.raw_data
+ return instance.data