aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--resources/tools/presentation/convert_xml_json.py475
-rw-r--r--resources/tools/presentation/generator_files.py7
-rw-r--r--resources/tools/presentation/generator_plots.py22
-rw-r--r--resources/tools/presentation/generator_tables.py18
-rw-r--r--resources/tools/presentation/input_data_files.py32
-rw-r--r--resources/tools/presentation/input_data_parser.py184
-rw-r--r--resources/tools/presentation/json/template_0.1.0.json25
-rw-r--r--resources/tools/presentation/pal.py6
-rwxr-xr-xresources/tools/presentation/run_convert.sh35
-rwxr-xr-xresources/tools/presentation/run_cpta.sh2
-rwxr-xr-xresources/tools/presentation/run_report.sh2
-rw-r--r--resources/tools/presentation/specification_parser.py32
-rw-r--r--resources/tools/presentation/specifications/converter/environment.yaml124
-rw-r--r--resources/tools/presentation/specifications/converter/input.yaml21
-rw-r--r--resources/tools/presentation/specifications/report/environment.yaml5
-rw-r--r--resources/tools/presentation/specifications/trending/environment.yaml2
16 files changed, 798 insertions, 194 deletions
diff --git a/resources/tools/presentation/convert_xml_json.py b/resources/tools/presentation/convert_xml_json.py
new file mode 100644
index 0000000000..e9ccca0b63
--- /dev/null
+++ b/resources/tools/presentation/convert_xml_json.py
@@ -0,0 +1,475 @@
+# Copyright (c) 2021 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Convert output_info.xml files into JSON structures.
+
+Version: 0.1.0
+Date: 8th June 2021
+
+The json structure is defined in https://gerrit.fd.io/r/c/csit/+/28992
+"""
+
+import os
+import re
+import json
+import logging
+import gzip
+
+from os.path import join
+from shutil import rmtree
+from copy import deepcopy
+
+from pal_utils import get_files
+
+
+class JSONData:
+ """A Class storing and manipulating data from tests.
+ """
+
+ def __init__(self, template=None):
+ """Initialization.
+
+ :param template: JSON formatted template used to store data. It can
+ include default values.
+ :type template: dict
+ """
+
+ self._template = deepcopy(template)
+ self._data = self._template if self._template else dict()
+
+ def __str__(self):
+ """Return a string with human readable data.
+
+ :returns: Readable description.
+ :rtype: str
+ """
+ return str(self._data)
+
+ def __repr__(self):
+ """Return a string executable as Python constructor call.
+
+ :returns: Executable constructor call.
+ :rtype: str
+ """
+ return f"JSONData(template={self._template!r})"
+
+ @property
+ def data(self):
+ """Getter
+
+ :return: Data stored in the object.
+ :rtype: dict
+ """
+ return self._data
+
+ def add_element(self, value, path_to_value):
+ """Add an element to the json structure.
+
+ :param value: Element value.
+ :param path_to_value: List of tuples where the first item is the element
+ on the path and the second one is its type.
+ :type value: dict, list, str, int, float, bool
+ :type path_to_value: list
+ :raises: IndexError if the path is empty.
+ :raises: TypeError if the val is of not supported type.
+ """
+
+ def _add_element(val, path, structure):
+ """Add an element to the given path.
+
+ :param val: Element value.
+ :param path: List of tuples where the first item is the element
+ on the path and the second one is its type.
+ :param structure: The structure where the element is added.
+ :type val: dict, list, str, int, float, bool
+ :type path: list
+ :type structure: dict
+ :raises TypeError if there is a wrong type in the path.
+ """
+ if len(path) == 1:
+ if isinstance(structure, dict):
+ if path[0][1] is dict:
+ if path[0][0] not in structure:
+ structure[path[0][0]] = dict()
+ structure[path[0][0]].update(val)
+ elif path[0][1] is list:
+ if path[0][0] not in structure:
+ structure[path[0][0]] = list()
+ if isinstance(val, list):
+ structure[path[0][0]].extend(val)
+ else:
+ structure[path[0][0]].append(val)
+ else:
+ structure[path[0][0]] = val
+ elif isinstance(structure, list):
+ if path[0][0] == -1 or path[0][0] >= len(structure):
+ if isinstance(val, list):
+ structure.extend(val)
+ else:
+ structure.append(val)
+ else:
+ structure[path[0][0]] = val
+ return
+
+ if isinstance(structure, dict):
+ if path[0][1] is dict:
+ if path[0][0] not in structure:
+ structure[path[0][0]] = dict()
+ elif path[0][1] is list:
+ if path[0][0] not in structure:
+ structure[path[0][0]] = list()
+ elif isinstance(structure, list):
+ if path[0][0] == -1 or path[0][0] >= len(structure):
+ if path[0][1] is list:
+ structure.append(list())
+ elif path[0][1] is dict:
+ structure.append(dict())
+ else:
+ structure.append(0)
+ path[0][0] = len(structure) - 1
+ else:
+ raise TypeError(
+ u"Only the last item in the path can be different type "
+ u"then list or dictionary."
+ )
+ _add_element(val, path[1:], structure[path[0][0]])
+
+ if not isinstance(value, (dict, list, str, int, float, bool)):
+ raise TypeError(
+ u"The value must be one of these types: dict, list, str, int, "
+ u"float, bool.\n"
+ f"Value: {value}\n"
+ f"Path: {path_to_value}"
+ )
+ _add_element(deepcopy(value), path_to_value, self._data)
+
+ def get_element(self, path):
+ """Get the element specified by the path.
+
+ :param path: List of keys and indices to the requested element or
+ sub-tree.
+ :type path: list
+ :returns: Element specified by the path.
+ :rtype: any
+ """
+ raise NotImplementedError
+
+ def dump(self, file_out, indent=None):
+ """Write JSON data to a file.
+
+ :param file_out: Path to the output JSON file.
+ :param indent: Indentation of items in JSON string. It is directly
+ passed to json.dump method.
+ :type file_out: str
+ :type indent: str
+ """
+ try:
+ with open(file_out, u"w") as file_handler:
+ json.dump(self._data, file_handler, indent=indent)
+ except OSError as err:
+ logging.warning(f"{repr(err)} Skipping")
+
+ def load(self, file_in):
+ """Load JSON data from a file.
+
+ :param file_in: Path to the input JSON file.
+ :type file_in: str
+ :raises: ValueError if the data being deserialized is not a valid
+ JSON document.
+ :raises: IOError if the file is not found or corrupted.
+ """
+ with open(file_in, u"r") as file_handler:
+ self._data = json.load(file_handler)
+
+
+def _export_test_from_xml_to_json(tid, in_data, out, template, metadata):
+ """Export data from a test to a json structure.
+
+ :param tid: Test ID.
+ :param in_data: Test data.
+ :param out: Path to output json file.
+ :param template: JSON template with optional default values.
+ :param metadata: Data which are not stored in XML structure.
+ :type tid: str
+ :type in_data: dict
+ :type out: str
+ :type template: dict
+ :type metadata: dict
+ """
+
+ p_metadata = [(u"metadata", dict), ]
+ p_test = [(u"test", dict), ]
+ p_log = [(u"log", list), (-1, list)]
+
+ data = JSONData(template=template)
+
+ data.add_element({u"suite-id": metadata.pop(u"suite-id", u"")}, p_metadata)
+ data.add_element(
+ {u"suite-doc": metadata.pop(u"suite-doc", u"")}, p_metadata
+ )
+ data.add_element({u"testbed": metadata.pop(u"testbed", u"")}, p_metadata)
+ data.add_element(
+ {u"sut-version": metadata.pop(u"sut-version", u"")}, p_metadata
+ )
+
+ data.add_element({u"test-id": tid}, p_test)
+ t_type = in_data.get(u"type", u"")
+ t_type = u"NDRPDR" if t_type == u"CPS" else t_type # It is NDRPDR
+ data.add_element({u"test-type": t_type}, p_test)
+ tags = in_data.get(u"tags", list())
+ data.add_element({u"tags": tags}, p_test)
+ data.add_element(
+ {u"documentation": in_data.get(u"documentation", u"")}, p_test
+ )
+ data.add_element({u"message": in_data.get(u"msg", u"")}, p_test)
+ execution = {
+ u"start_time": in_data.get(u"starttime", u""),
+ u"end_time": in_data.get(u"endtime", u""),
+ u"status": in_data.get(u"status", u"FAILED"),
+ }
+ execution.update(metadata)
+ data.add_element({u"execution": execution}, p_test)
+
+ log_item = {
+ u"source": {
+ u"type": u"node",
+ u"id": ""
+ },
+ u"msg-type": u"",
+ u"log-level": u"INFO",
+ u"timestamp": in_data.get(u"starttime", u""), # replacement
+ u"msg": u"",
+ u"data": []
+ }
+
+ # Process configuration history:
+ in_papi = deepcopy(in_data.get(u"conf-history", None))
+ if in_papi:
+ regex_dut = re.compile(r'\*\*DUT(\d):\*\*')
+ node_id = u"dut1"
+ for line in in_papi.split(u"\n"):
+ if not line:
+ continue
+ groups = re.search(regex_dut, line)
+ if groups:
+ node_id = f"dut{groups.group(1)}"
+ else:
+ log_item[u"source"][u"id"] = node_id
+ log_item[u"msg-type"] = u"papi"
+ log_item[u"msg"] = line
+ data.add_element(log_item, p_log)
+
+ # Process show runtime:
+ in_sh_run = deepcopy(in_data.get(u"show-run", None))
+ if in_sh_run:
+ # Transform to openMetrics format
+ for key, val in in_sh_run.items():
+ log_item[u"source"][u"id"] = key
+ log_item[u"msg-type"] = u"metric"
+ log_item[u"msg"] = u"show-runtime"
+ log_item[u"data"] = list()
+ for item in val.get(u"runtime", list()):
+ for metric, m_data in item.items():
+ if metric == u"name":
+ continue
+ for idx, m_item in enumerate(m_data):
+ log_item[u"data"].append(
+ {
+ u"name": metric,
+ u"value": m_item,
+ u"labels": {
+ u"host": val.get(u"host", u""),
+ u"socket": val.get(u"socket", u""),
+ u"graph-node": item.get(u"name", u""),
+ u"thread-id": str(idx)
+ }
+ }
+ )
+ data.add_element(log_item, p_log)
+
+ # Process results:
+ results = dict()
+ if t_type == u"DEVICETEST":
+ pass # Nothing to add.
+ elif t_type == u"NDRPDR":
+ results = {
+ u"throughput": {
+ u"unit":
+ u"cps" if u"TCP_CPS" in tags or u"UDP_CPS" in tags
+ else u"pps",
+ u"ndr": {
+ u"value": {
+ u"lower": in_data.get(u"throughput", dict()).
+ get(u"NDR", dict()).get(u"LOWER", u"NaN"),
+ u"upper": in_data.get(u"throughput", dict()).
+ get(u"NDR", dict()).get(u"UPPER", u"NaN")
+ },
+ u"value_gbps": {
+ u"lower": in_data.get(u"gbps", dict()).
+ get(u"NDR", dict()).get(u"LOWER", u"NaN"),
+ u"upper": in_data.get(u"gbps", dict()).
+ get(u"NDR", dict()).get(u"UPPER", u"NaN")
+ }
+ },
+ u"pdr": {
+ u"value": {
+ u"lower": in_data.get(u"throughput", dict()).
+ get(u"PDR", dict()).get(u"LOWER", u"NaN"),
+ u"upper": in_data.get(u"throughput", dict()).
+ get(u"PDR", dict()).get(u"UPPER", u"NaN")
+ },
+ u"value_gbps": {
+ u"lower": in_data.get(u"gbps", dict()).
+ get(u"PDR", dict()).get(u"LOWER", u"NaN"),
+ u"upper": in_data.get(u"gbps", dict()).
+ get(u"PDR", dict()).get(u"UPPER", u"NaN")
+ }
+ }
+ },
+ u"latency": {
+ u"forward": {
+ u"pdr-90": in_data.get(u"latency", dict()).
+ get(u"PDR90", dict()).get(u"direction1", u"NaN"),
+ u"pdr-50": in_data.get(u"latency", dict()).
+ get(u"PDR50", dict()).get(u"direction1", u"NaN"),
+ u"pdr-10": in_data.get(u"latency", dict()).
+ get(u"PDR10", dict()).get(u"direction1", u"NaN"),
+ u"pdr-0": in_data.get(u"latency", dict()).
+ get(u"LAT0", dict()).get(u"direction1", u"NaN")
+ },
+ u"reverse": {
+ u"pdr-90": in_data.get(u"latency", dict()).
+ get(u"PDR90", dict()).get(u"direction2", u"NaN"),
+ u"pdr-50": in_data.get(u"latency", dict()).
+ get(u"PDR50", dict()).get(u"direction2", u"NaN"),
+ u"pdr-10": in_data.get(u"latency", dict()).
+ get(u"PDR10", dict()).get(u"direction2", u"NaN"),
+ u"pdr-0": in_data.get(u"latency", dict()).
+ get(u"LAT0", dict()).get(u"direction2", u"NaN")
+ }
+ }
+ }
+ elif t_type == "MRR":
+ results = {
+ u"unit": u"pps", # Old data use only pps
+ u"samples": in_data.get(u"result", dict()).get(u"samples", list()),
+ u"avg": in_data.get(u"result", dict()).get(u"receive-rate", u"NaN"),
+ u"stdev": in_data.get(u"result", dict()).
+ get(u"receive-stdev", u"NaN")
+ }
+ elif t_type == "SOAK":
+ results = {
+ u"critical-rate": {
+ u"lower": in_data.get(u"throughput", dict()).
+ get(u"LOWER", u"NaN"),
+ u"upper": in_data.get(u"throughput", dict()).
+ get(u"UPPER", u"NaN"),
+ }
+ }
+ elif t_type == "HOSTSTACK":
+ results = in_data.get(u"result", dict())
+ # elif t_type == "TCP": # Not used ???
+ # results = in_data.get(u"result", u"NaN")
+ elif t_type == "RECONF":
+ results = {
+ u"loss": in_data.get(u"result", dict()).get(u"loss", u"NaN"),
+ u"time": in_data.get(u"result", dict()).get(u"time", u"NaN")
+ }
+ else:
+ pass
+ data.add_element({u"results": results}, p_test)
+
+ data.dump(out, indent=u" ")
+
+
+def convert_xml_to_json(spec, data):
+ """Convert downloaded XML files into JSON.
+
+ Procedure:
+ - create one json file for each test,
+ - gzip all json files one by one,
+ - delete json files.
+
+ :param spec: Specification read from the specification files.
+ :param data: Input data parsed from output.xml files.
+ :type spec: Specification
+ :type data: InputData
+ """
+
+ logging.info(u"Converting downloaded XML files to JSON ...")
+
+ template_name = spec.output.get(u"use-template", None)
+ structure = spec.output.get(u"structure", u"tree")
+ if template_name:
+ with open(template_name, u"r") as file_handler:
+ template = json.load(file_handler)
+ else:
+ template = None
+
+ build_dir = spec.environment[u"paths"][u"DIR[BUILD,JSON]"]
+ try:
+ rmtree(build_dir)
+ except FileNotFoundError:
+ pass # It does not exist
+
+ os.mkdir(build_dir)
+
+ for job, builds in data.data.items():
+ logging.info(f" Processing job {job}")
+ if structure == "tree":
+ os.makedirs(join(build_dir, job), exist_ok=True)
+ for build_nr, build in builds.items():
+ logging.info(f" Processing build {build_nr}")
+ if structure == "tree":
+ os.makedirs(join(build_dir, job, build_nr), exist_ok=True)
+ for test_id, test_data in build[u"tests"].items():
+ groups = re.search(re.compile(r'-(\d+[tT](\d+[cC]))-'), test_id)
+ if groups:
+ test_id = test_id.replace(groups.group(1), groups.group(2))
+ logging.info(f" Processing test {test_id}")
+ if structure == "tree":
+ dirs = test_id.split(u".")[:-1]
+ name = test_id.split(u".")[-1]
+ os.makedirs(
+ join(build_dir, job, build_nr, *dirs), exist_ok=True
+ )
+ file_name = \
+ f"{join(build_dir, job, build_nr, *dirs, name)}.json"
+ else:
+ file_name = join(
+ build_dir,
+ u'.'.join((job, build_nr, test_id, u'json'))
+ )
+ suite_id = test_id.rsplit(u".", 1)[0].replace(u" ", u"_")
+ _export_test_from_xml_to_json(
+ test_id, test_data, file_name, template,
+ {
+ u"ci": u"jenkins.fd.io",
+ u"job": job,
+ u"build": build_nr,
+ u"suite-id": suite_id,
+ u"suite-doc": build[u"suites"].get(suite_id, dict()).
+ get(u"doc", u""),
+ u"testbed": build[u"metadata"].get(u"testbed", u""),
+ u"sut-version": build[u"metadata"].get(u"version", u"")
+ }
+ )
+
+ # gzip the json files:
+ for file in get_files(build_dir, u"json"):
+ with open(file, u"rb") as src:
+ with gzip.open(f"{file}.gz", u"wb") as dst:
+ dst.writelines(src)
+ os.remove(file)
+
+ logging.info(u"Done.")
diff --git a/resources/tools/presentation/generator_files.py b/resources/tools/presentation/generator_files.py
index 11ed9b0337..aa4392e473 100644
--- a/resources/tools/presentation/generator_files.py
+++ b/resources/tools/presentation/generator_files.py
@@ -205,7 +205,12 @@ def file_details_split(file_spec, input_data, frmt=u"rst"):
chapters[chapter_l1][chapter_l2][nic][u"tables"].append(
(
table_lst.pop(idx),
- suite[u"doc"].replace(u'|br|', u'\n\n -')
+ suite[u"doc"].replace(u'"', u"'").
+ replace(u'\n', u' ').
+ replace(u'\r', u'').
+ replace(u'*[', u'\n\n - *[').
+ replace(u"*", u"**").
+ replace(u'\n\n - *[', u' - *[', 1)
)
)
break
diff --git a/resources/tools/presentation/generator_plots.py b/resources/tools/presentation/generator_plots.py
index 1d6bbaabf5..fb1b4734cf 100644
--- a/resources/tools/presentation/generator_plots.py
+++ b/resources/tools/presentation/generator_plots.py
@@ -18,16 +18,16 @@
import re
import logging
+from collections import OrderedDict
+from copy import deepcopy
+from math import log
+
import hdrh.histogram
import hdrh.codec
import pandas as pd
import plotly.offline as ploff
import plotly.graph_objs as plgo
-from collections import OrderedDict
-from copy import deepcopy
-from math import log
-
from plotly.exceptions import PlotlyError
from pal_utils import mean, stdev
@@ -200,7 +200,8 @@ def plot_hdrh_lat_by_percentile(plot, input_data):
hovertext.append(
f"<b>{desc[graph]}</b><br>"
f"Direction: {(u'W-E', u'E-W')[idx % 2]}<br>"
- f"Percentile: {previous_x:.5f}-{percentile:.5f}%<br>"
+ f"Percentile: "
+ f"{previous_x:.5f}-{percentile:.5f}%<br>"
f"Latency: {item.value_iterated_to}uSec"
)
xaxis.append(percentile)
@@ -208,7 +209,8 @@ def plot_hdrh_lat_by_percentile(plot, input_data):
hovertext.append(
f"<b>{desc[graph]}</b><br>"
f"Direction: {(u'W-E', u'E-W')[idx % 2]}<br>"
- f"Percentile: {previous_x:.5f}-{percentile:.5f}%<br>"
+ f"Percentile: "
+ f"{previous_x:.5f}-{percentile:.5f}%<br>"
f"Latency: {item.value_iterated_to}uSec"
)
previous_x = percentile
@@ -351,7 +353,7 @@ def plot_hdrh_lat_by_percentile_x_log(plot, input_data):
decoded = hdrh.histogram.HdrHistogram.decode(
test[u"latency"][graph][direction][u"hdrh"]
)
- except hdrh.codec.HdrLengthException:
+ except (hdrh.codec.HdrLengthException, TypeError):
logging.warning(
f"No data for direction {(u'W-E', u'E-W')[idx % 2]}"
)
@@ -855,10 +857,10 @@ def plot_mrr_box_name(plot, input_data):
# Add plot traces
traces = list()
- for idx in range(len(data_x)):
+ for idx, x_item in enumerate(data_x):
traces.append(
plgo.Box(
- x=[data_x[idx], ] * len(data_y[idx]),
+ x=[x_item, ] * len(data_y[idx]),
y=data_y[idx],
name=data_names[idx],
hoverinfo=u"y+name"
@@ -988,7 +990,7 @@ def plot_tsa_name(plot, input_data):
REGEX_NIC,
u"",
test_name.replace(u'-ndrpdr', u'').
- replace(u'2n1l-', u'')
+ replace(u'2n1l-', u'')
)
vals[name] = OrderedDict()
y_val_1 = test_vals[u"1"][0] / 1e6
diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py
index b03261c6d8..bb962890d0 100644
--- a/resources/tools/presentation/generator_tables.py
+++ b/resources/tools/presentation/generator_tables.py
@@ -323,7 +323,8 @@ def table_merged_details(table, input_data):
suite_name = suite[u"name"]
table_lst = list()
for test in data.keys():
- if data[test][u"parent"] not in suite_name:
+ if data[test][u"status"] != u"PASS" or \
+ data[test][u"parent"] not in suite_name:
continue
row_lst = list()
for column in table[u"columns"]:
@@ -351,10 +352,12 @@ def table_merged_details(table, input_data):
col_data = col_data.split(u" |br| ", 1)[1]
except IndexError:
pass
+ col_data = col_data.replace(u'\n', u' |br| ').\
+ replace(u'\r', u'').replace(u'"', u"'")
col_data = f" |prein| {col_data} |preout| "
elif column[u"data"].split(u" ")[1] in \
(u"conf-history", u"show-run"):
- col_data = col_data.replace(u" |br| ", u"", 1)
+ col_data = col_data.replace(u'\n', u' |br| ')
col_data = f" |prein| {col_data[:-5]} |preout| "
row_lst.append(f'"{col_data}"')
except KeyError:
@@ -386,12 +389,7 @@ def _tpc_modify_test_name(test_name, ignore_nic=False):
:rtype: str
"""
test_name_mod = test_name.\
- replace(u"-ndrpdrdisc", u""). \
replace(u"-ndrpdr", u"").\
- replace(u"-pdrdisc", u""). \
- replace(u"-ndrdisc", u"").\
- replace(u"-pdr", u""). \
- replace(u"-ndr", u""). \
replace(u"1t1c", u"1c").\
replace(u"2t1c", u"1c"). \
replace(u"2t2c", u"2c").\
@@ -425,7 +423,7 @@ def _tpc_insert_data(target, src, include_tests):
"""Insert src data to the target structure.
:param target: Target structure where the data is placed.
- :param src: Source data to be placed into the target stucture.
+ :param src: Source data to be placed into the target structure.
:param include_tests: Which results will be included (MRR, NDR, PDR).
:type target: list
:type src: dict
@@ -1252,8 +1250,8 @@ def table_perf_trending_dash_html(table, input_data):
u"a",
attrib=dict(
href=f"{lnk_dir}"
- f"{_generate_url(table.get(u'testbed', ''), item)}"
- f"{lnk_sufix}"
+ f"{_generate_url(table.get(u'testbed', ''), item)}"
+ f"{lnk_sufix}"
)
)
ref.text = item
diff --git a/resources/tools/presentation/input_data_files.py b/resources/tools/presentation/input_data_files.py
index fc629bc218..5bd6af42d6 100644
--- a/resources/tools/presentation/input_data_files.py
+++ b/resources/tools/presentation/input_data_files.py
@@ -181,22 +181,6 @@ def _unzip_file(spec, build, pid):
return False
-def _download_json(source, job, build, w_dir, arch):
- """
-
- :param source:
- :param job:
- :param build:
- :param w_dir: Path to working directory
- :param arch:
- :return:
- """
- success = False
- downloaded_name = u""
-
- return success, downloaded_name
-
-
def _download_xml(source, job, build, w_dir, arch):
"""
@@ -219,10 +203,9 @@ def _download_xml(source, job, build, w_dir, arch):
job=job, build=build[u'build'], filename=file_name
)
)
- verify = False if u"nginx" in url else True
logging.info(f" Trying to download {url}")
success, downloaded_name = _download_file(
- url, new_name, arch=arch, verify=verify, repeat=3
+ url, new_name, arch=arch, verify=(u"nginx" not in url), repeat=3
)
return success, downloaded_name
@@ -286,7 +269,6 @@ def download_and_unzip_data_file(spec, job, build, pid):
"""
download = {
- "json": _download_json,
"xml": _download_xml,
"xml-docs": _download_xml_docs
}
@@ -302,12 +284,12 @@ def download_and_unzip_data_file(spec, job, build, pid):
if not download_type:
continue
success, downloaded_name = download[download_type](
- source,
- job,
- build,
- spec.environment[u"paths"][u"DIR[WORKING,DATA]"],
- arch
- )
+ source,
+ job,
+ build,
+ spec.environment[u"paths"][u"DIR[WORKING,DATA]"],
+ arch
+ )
if success:
source[u"successful-downloads"] += 1
build[u"source"] = source[u"type"]
diff --git a/resources/tools/presentation/input_data_parser.py b/resources/tools/presentation/input_data_parser.py
index e1db03660d..d108d09e84 100644
--- a/resources/tools/presentation/input_data_parser.py
+++ b/resources/tools/presentation/input_data_parser.py
@@ -346,8 +346,6 @@ class ExecutionChecker(ResultVisitor):
u"timestamp": self._get_timestamp,
u"vpp-version": self._get_vpp_version,
u"dpdk-version": self._get_dpdk_version,
- # TODO: Remove when not needed:
- u"teardown-vat-history": self._get_vat_history,
u"teardown-papi-history": self._get_papi_history,
u"test-show-runtime": self._get_show_run,
u"testbed": self._get_testbed
@@ -608,32 +606,6 @@ class ExecutionChecker(ResultVisitor):
self._data[u"metadata"][u"generated"] = self._timestamp
self._msg_type = None
- def _get_vat_history(self, msg):
- """Called when extraction of VAT command history is required.
-
- TODO: Remove when not needed.
-
- :param msg: Message to process.
- :type msg: Message
- :returns: Nothing.
- """
- if msg.message.count(u"VAT command history:"):
- self._conf_history_lookup_nr += 1
- if self._conf_history_lookup_nr == 1:
- self._data[u"tests"][self._test_id][u"conf-history"] = str()
- else:
- self._msg_type = None
- text = re.sub(
- r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} VAT command history:",
- u"",
- msg.message,
- count=1
- ).replace(u'\n', u' |br| ').replace(u'"', u"'")
-
- self._data[u"tests"][self._test_id][u"conf-history"] += (
- f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
- )
-
def _get_papi_history(self, msg):
"""Called when extraction of PAPI command history is required.
@@ -652,9 +624,9 @@ class ExecutionChecker(ResultVisitor):
u"",
msg.message,
count=1
- ).replace(u'\n', u' |br| ').replace(u'"', u"'")
+ ).replace(u'"', u"'")
self._data[u"tests"][self._test_id][u"conf-history"] += (
- f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
+ f"**DUT{str(self._conf_history_lookup_nr)}:** {text}"
)
def _get_show_run(self, msg):
@@ -697,12 +669,13 @@ class ExecutionChecker(ResultVisitor):
except (IndexError, KeyError):
return
- dut = u"DUT{nr}".format(
+ dut = u"dut{nr}".format(
nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
oper = {
u"host": host,
u"socket": sock,
+ u"runtime": runtime,
u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
}
@@ -917,38 +890,6 @@ class ExecutionChecker(ResultVisitor):
except (IndexError, ValueError):
pass
- # TODO: Remove when not needed
- latency[u"NDR10"] = {
- u"direction1": copy.copy(latency_default),
- u"direction2": copy.copy(latency_default)
- }
- latency[u"NDR50"] = {
- u"direction1": copy.copy(latency_default),
- u"direction2": copy.copy(latency_default)
- }
- latency[u"NDR90"] = {
- u"direction1": copy.copy(latency_default),
- u"direction2": copy.copy(latency_default)
- }
- try:
- latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
- latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
- latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
- latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
- latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
- latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
- latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
- latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
- latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
- latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
- latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
- latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
- latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
- latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
- return latency, u"PASS"
- except (IndexError, ValueError):
- pass
-
return latency, u"FAIL"
@staticmethod
@@ -1010,19 +951,11 @@ class ExecutionChecker(ResultVisitor):
except AttributeError:
return
- doc_str = suite.doc.\
- replace(u'"', u"'").\
- replace(u'\n', u' ').\
- replace(u'\r', u'').\
- replace(u'*[', u' |br| *[').\
- replace(u"*", u"**").\
- replace(u' |br| *[', u'*[', 1)
-
self._data[u"suites"][suite.longname.lower().
replace(u'"', u"'").
replace(u" ", u"_")] = {
u"name": suite.name.lower(),
- u"doc": doc_str,
+ u"doc": suite.doc,
u"parent": parent_name,
u"level": len(suite.longname.split(u"."))
}
@@ -1080,49 +1013,36 @@ class ExecutionChecker(ResultVisitor):
name = test.name.lower()
# Remove TC number from the TC long name (backward compatibility):
- self._test_id = re.sub(
- self.REGEX_TC_NUMBER, u"", longname.replace(u"snat", u"nat")
- )
+ self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
# Remove TC number from the TC name (not needed):
- test_result[u"name"] = re.sub(
- self.REGEX_TC_NUMBER, "", name.replace(u"snat", u"nat")
- )
+ test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
- test_result[u"parent"] = test.parent.name.lower().\
- replace(u"snat", u"nat")
+ test_result[u"parent"] = test.parent.name.lower()
test_result[u"tags"] = tags
- test_result["doc"] = test.doc.\
- replace(u'"', u"'").\
- replace(u'\n', u' ').\
- replace(u'\r', u'').\
- replace(u'[', u' |br| [').\
- replace(u' |br| [', u'[', 1)
- test_result[u"type"] = u"FUNC"
+ test_result["doc"] = test.doc
+ test_result[u"type"] = u""
test_result[u"status"] = test.status
+ test_result[u"starttime"] = test.starttime
+ test_result[u"endtime"] = test.endtime
if test.status == u"PASS":
if u"NDRPDR" in tags:
if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
test_result[u"msg"] = self._get_data_from_pps_test_msg(
- test.message).replace(u'\n', u' |br| '). \
- replace(u'\r', u'').replace(u'"', u"'")
+ test.message)
elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
test_result[u"msg"] = self._get_data_from_cps_test_msg(
- test.message).replace(u'\n', u' |br| '). \
- replace(u'\r', u'').replace(u'"', u"'")
+ test.message)
else:
test_result[u"msg"] = self._get_data_from_perf_test_msg(
- test.message).replace(u'\n', u' |br| ').\
- replace(u'\r', u'').replace(u'"', u"'")
+ test.message)
elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
test_result[u"msg"] = self._get_data_from_mrr_test_msg(
- test.message).replace(u'\n', u' |br| ').\
- replace(u'\r', u'').replace(u'"', u"'")
+ test.message)
else:
- test_result[u"msg"] = test.message.replace(u'\n', u' |br| ').\
- replace(u'\r', u'').replace(u'"', u"'")
+ test_result[u"msg"] = test.message
else:
- test_result[u"msg"] = u"Test Failed."
+ test_result[u"msg"] = test.message
if u"PERFTEST" in tags:
# Replace info about cores (e.g. -1c-) with the info about threads
@@ -1157,26 +1077,26 @@ class ExecutionChecker(ResultVisitor):
)
return
- if test.status == u"PASS":
- if u"DEVICETEST" in tags:
- test_result[u"type"] = u"DEVICETEST"
- elif u"NDRPDR" in tags:
- if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
- test_result[u"type"] = u"CPS"
- else:
- test_result[u"type"] = u"NDRPDR"
+ if u"DEVICETEST" in tags:
+ test_result[u"type"] = u"DEVICETEST"
+ elif u"NDRPDR" in tags:
+ if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
+ test_result[u"type"] = u"CPS"
+ else:
+ test_result[u"type"] = u"NDRPDR"
+ if test.status == u"PASS":
test_result[u"throughput"], test_result[u"status"] = \
self._get_ndrpdr_throughput(test.message)
test_result[u"gbps"], test_result[u"status"] = \
self._get_ndrpdr_throughput_gbps(test.message)
test_result[u"latency"], test_result[u"status"] = \
self._get_ndrpdr_latency(test.message)
- elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
- if u"MRR" in tags:
- test_result[u"type"] = u"MRR"
- else:
- test_result[u"type"] = u"BMRR"
-
+ elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
+ if u"MRR" in tags:
+ test_result[u"type"] = u"MRR"
+ else:
+ test_result[u"type"] = u"BMRR"
+ if test.status == u"PASS":
test_result[u"result"] = dict()
groups = re.search(self.REGEX_BMRR, test.message)
if groups is not None:
@@ -1194,20 +1114,24 @@ class ExecutionChecker(ResultVisitor):
groups = re.search(self.REGEX_MRR, test.message)
test_result[u"result"][u"receive-rate"] = \
float(groups.group(3)) / float(groups.group(1))
- elif u"SOAK" in tags:
- test_result[u"type"] = u"SOAK"
+ elif u"SOAK" in tags:
+ test_result[u"type"] = u"SOAK"
+ if test.status == u"PASS":
test_result[u"throughput"], test_result[u"status"] = \
self._get_plr_throughput(test.message)
- elif u"HOSTSTACK" in tags:
- test_result[u"type"] = u"HOSTSTACK"
+ elif u"HOSTSTACK" in tags:
+ test_result[u"type"] = u"HOSTSTACK"
+ if test.status == u"PASS":
test_result[u"result"], test_result[u"status"] = \
self._get_hoststack_data(test.message, tags)
- elif u"TCP" in tags:
- test_result[u"type"] = u"TCP"
- groups = re.search(self.REGEX_TCP, test.message)
- test_result[u"result"] = int(groups.group(2))
- elif u"RECONF" in tags:
- test_result[u"type"] = u"RECONF"
+ # elif u"TCP" in tags: # This might be not used
+ # test_result[u"type"] = u"TCP"
+ # if test.status == u"PASS":
+ # groups = re.search(self.REGEX_TCP, test.message)
+ # test_result[u"result"] = int(groups.group(2))
+ elif u"RECONF" in tags:
+ test_result[u"type"] = u"RECONF"
+ if test.status == u"PASS":
test_result[u"result"] = None
try:
grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
@@ -1218,10 +1142,8 @@ class ExecutionChecker(ResultVisitor):
}
except (AttributeError, IndexError, ValueError, TypeError):
test_result[u"status"] = u"FAIL"
- else:
- test_result[u"status"] = u"FAIL"
- self._data[u"tests"][self._test_id] = test_result
- return
+ else:
+ test_result[u"status"] = u"FAIL"
self._data[u"tests"][self._test_id] = test_result
@@ -1370,13 +1292,7 @@ class ExecutionChecker(ResultVisitor):
:type teardown_kw: Keyword
:returns: Nothing.
"""
-
- if teardown_kw.name.count(u"Show Vat History On All Duts"):
- # TODO: Remove when not needed:
- self._conf_history_lookup_nr = 0
- self._msg_type = u"teardown-vat-history"
- teardown_kw.messages.visit(self)
- elif teardown_kw.name.count(u"Show Papi History On All Duts"):
+ if teardown_kw.name.count(u"Show Papi History On All Duts"):
self._conf_history_lookup_nr = 0
self._msg_type = u"teardown-papi-history"
teardown_kw.messages.visit(self)
@@ -1876,7 +1792,7 @@ class InputData:
if params is None:
params = element.get(u"parameters", None)
if params:
- params.append(u"type")
+ params.extend((u"type", u"status"))
data_to_filter = data if data else element[u"data"]
data = pd.Series()
diff --git a/resources/tools/presentation/json/template_0.1.0.json b/resources/tools/presentation/json/template_0.1.0.json
new file mode 100644
index 0000000000..dd9fed7360
--- /dev/null
+++ b/resources/tools/presentation/json/template_0.1.0.json
@@ -0,0 +1,25 @@
+{
+ "version": "0.1.0",
+ "test": {
+ "test-id": "",
+ "test-type": "",
+ "tags": [],
+ "documentation": "",
+ "message": "",
+ "execution": {
+ "ci": "",
+ "job": "",
+ "build": "",
+ "csit-commit": "",
+ "csit-gerrit-change": "",
+ "start_time": "",
+ "end_time": "",
+ "status": ""
+ },
+ "results": {}
+ },
+ "metadata": {},
+ "resource": [],
+ "network": [],
+ "log": []
+}
diff --git a/resources/tools/presentation/pal.py b/resources/tools/presentation/pal.py
index 5bbea297ef..7e2d9a8dbd 100644
--- a/resources/tools/presentation/pal.py
+++ b/resources/tools/presentation/pal.py
@@ -29,9 +29,10 @@ from generator_files import generate_files
from generator_report import generate_report
from generator_cpta import generate_cpta
from generator_alerts import Alerting, AlertingError
+from convert_xml_json import convert_xml_to_json
-OUTPUTS = (u"none", u"report", u"trending", u"convert_to_json")
+OUTPUTS = (u"none", u"report", u"trending", u"convert-xml-to-json")
def parse_args():
@@ -131,6 +132,7 @@ def main():
spec.read_specification()
except PresentationError as err:
logging.critical(u"Finished with error.")
+ logging.critical(repr(err))
return 1
if spec.output[u"output"] not in OUTPUTS:
@@ -170,6 +172,8 @@ def main():
alert.generate_alerts()
except AlertingError as err:
logging.warning(repr(err))
+ elif spec.output[u"output"] == u"convert-xml-to-json":
+ convert_xml_to_json(spec, data)
else:
logging.info("No output will be generated.")
diff --git a/resources/tools/presentation/run_convert.sh b/resources/tools/presentation/run_convert.sh
new file mode 100755
index 0000000000..814fab3a28
--- /dev/null
+++ b/resources/tools/presentation/run_convert.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+set -x
+
+# set default values in config array
+typeset -A CFG
+typeset -A DIR
+
+DIR[WORKING]=_tmp
+
+# Create working directories
+mkdir ${DIR[WORKING]}
+
+# Create virtual environment
+virtualenv -p $(which python3) ${DIR[WORKING]}/env
+source ${DIR[WORKING]}/env/bin/activate
+
+# FIXME: s3 config (until migrated to vault, then account will be reset)
+mkdir -p ${HOME}/.aws
+echo "[nomad-s3]" >> ${HOME}/.aws/config
+echo "[nomad-s3]
+aws_access_key_id = csit
+aws_secret_access_key = Csit1234" >> ${HOME}/.aws/credentials
+
+# Install python dependencies:
+pip3 install -r requirements.txt
+
+export PYTHONPATH=`pwd`:`pwd`/../../../
+
+python pal.py \
+ --specification specifications/converter \
+ --logging INFO \
+
+RETURN_STATUS=$(echo $?)
+exit ${RETURN_STATUS}
diff --git a/resources/tools/presentation/run_cpta.sh b/resources/tools/presentation/run_cpta.sh
index 8d3dd269a7..842339f7f5 100755
--- a/resources/tools/presentation/run_cpta.sh
+++ b/resources/tools/presentation/run_cpta.sh
@@ -24,7 +24,7 @@ aws_secret_access_key = Csit1234" >> ${HOME}/.aws/credentials
# Install python dependencies:
pip3 install -r requirements.txt
-export PYTHONPATH=`pwd`:`pwd`/../../../:`pwd`/../../libraries/python
+export PYTHONPATH=`pwd`:`pwd`/../../../
STATUS=$(python pal.py \
--specification specifications/trending \
diff --git a/resources/tools/presentation/run_report.sh b/resources/tools/presentation/run_report.sh
index 9cc33542e0..2a14da1b62 100755
--- a/resources/tools/presentation/run_report.sh
+++ b/resources/tools/presentation/run_report.sh
@@ -27,7 +27,7 @@ aws_secret_access_key = Csit1234" >> ${HOME}/.aws/credentials
# Install python dependencies:
pip3 install -r requirements.txt
-export PYTHONPATH=`pwd`:`pwd`/../../../:`pwd`/../../libraries/python
+export PYTHONPATH=`pwd`:`pwd`/../../../
python pal.py \
--specification specifications/report \
diff --git a/resources/tools/presentation/specification_parser.py b/resources/tools/presentation/specification_parser.py
index 4110bfff9b..a94d09f3fa 100644
--- a/resources/tools/presentation/specification_parser.py
+++ b/resources/tools/presentation/specification_parser.py
@@ -192,7 +192,7 @@ class Specification:
:returns: List of specifications of tables to be generated.
:rtype: list
"""
- return self._specification[u"tables"]
+ return self._specification.get(u"tables", list())
@property
def plots(self):
@@ -201,7 +201,7 @@ class Specification:
:returns: List of specifications of plots to be generated.
:rtype: list
"""
- return self._specification[u"plots"]
+ return self._specification.get(u"plots", list())
@property
def files(self):
@@ -210,7 +210,7 @@ class Specification:
:returns: List of specifications of files to be generated.
:rtype: list
"""
- return self._specification[u"files"]
+ return self._specification.get(u"files", list())
@property
def cpta(self):
@@ -614,6 +614,8 @@ class Specification:
idx = self._get_type_index(u"static")
if idx is None:
logging.warning(u"No static content specified.")
+ self._specification[u"static"] = dict()
+ return
for key, value in self._cfg_yaml[idx].items():
if isinstance(value, str):
@@ -816,10 +818,26 @@ class Specification:
logging.info(u"Parsing specification: INPUT")
- for data_set in self.data_sets.values():
- if data_set == "data-sets":
- continue
- for job, builds in data_set.items():
+ idx = self._get_type_index(u"input")
+ if idx is None:
+ logging.info(u"Creating the list of inputs from data sets.")
+ for data_set in self.data_sets.values():
+ if data_set == "data-sets":
+ continue
+ for job, builds in data_set.items():
+ for build in builds:
+ self.add_build(
+ job,
+ {
+ u"build": build,
+ u"status": None,
+ u"file-name": None,
+ u"source": None
+ }
+ )
+ else:
+ logging.info(u"Reading pre-defined inputs.")
+ for job, builds in self._cfg_yaml[idx][u"builds"].items():
for build in builds:
self.add_build(
job,
diff --git a/resources/tools/presentation/specifications/converter/environment.yaml b/resources/tools/presentation/specifications/converter/environment.yaml
new file mode 100644
index 0000000000..1f57445638
--- /dev/null
+++ b/resources/tools/presentation/specifications/converter/environment.yaml
@@ -0,0 +1,124 @@
+################################################################################
+### E N V I R O N M E N T ###
+################################################################################
+
+- type: "environment"
+
+ spec-files:
+ - "specifications/converter/input.yaml" # Only for converter XML --> JSON
+
+ paths:
+ # Top level directories:
+ ## Working directory
+ DIR[WORKING]: "_tmp"
+ ## Build directories
+ DIR[BUILD,JSON]: "_build"
+
+ # Working directories
+ ## Input data files (.zip, .xml)
+ DIR[WORKING,DATA]: "{DIR[WORKING]}/data"
+
+ # Data sources are used in this order:
+ data-sources:
+ # JSON from S3
+ - type: "json"
+ url: "https://logs.nginx.service.consul/vex-yul-rot-jenkins-1"
+ path: "{job}/{build}/{filename}"
+ file-name: "output.json.gz"
+ file-format: ".gz"
+ enabled: False
+ # XML
+ - type: "xml"
+ url: "https://logs.nginx.service.consul/vex-yul-rot-jenkins-1"
+ path: "{job}/{build}/archives/{filename}"
+ file-name: "output_info.xml.gz"
+ file-format: ".gz"
+ enabled: True
+ - type: "xml"
+ url: "https://logs.nginx.service.consul/vex-yul-rot-jenkins-1"
+ path: "{job}/{build}/{filename}"
+ file-name: "output_info.xml.gz"
+ file-format: ".gz"
+ enabled: True
+ - type: "xml"
+ url: "https://logs.fd.io/production/vex-yul-rot-jenkins-1"
+ path: "{job}/{build}/archives/{filename}"
+ file-name: "output_info.xml.gz"
+ file-format: ".gz"
+ enabled: True
+ - type: "xml"
+ url: "https://logs.fd.io/production/vex-yul-rot-jenkins-1"
+ path: "{job}/{build}/archives/{filename}"
+ file-name: "output.xml.gz"
+ file-format: ".gz"
+ enabled: True
+ - type: "xml"
+ url: "https://logs.nginx.service.consul/vex-yul-rot-jenkins-1"
+ path: "{job}/{build}/archives/{filename}"
+ file-name: "output.xml.gz"
+ file-format: ".gz"
+ enabled: True
+ - type: "xml"
+ url: "https://logs.nginx.service.consul/vex-yul-rot-jenkins-1"
+ path: "{job}/{build}/{filename}"
+ file-name: "output.xml.gz"
+ file-format: ".gz"
+ enabled: True
+ - type: "xml"
+ url: "https://logs.fd.io/production/vex-yul-rot-jenkins-1"
+ path: "{job}/{build}/{filename}"
+ file-name: "output_info.xml.gz"
+ file-format: ".gz"
+ enabled: True
+ - type: "xml"
+ url: "https://logs.fd.io/production/vex-yul-rot-jenkins-1"
+ path: "{job}/{build}/{filename}"
+ file-name: "output.xml.gz"
+ file-format: ".gz"
+ enabled: True
+ # XML from docs.nexus
+ - type: "xml-docs"
+ url: "https://docs.fd.io/csit"
+ path: "report/_static/archive"
+ file-name: "output_info.xml.gz"
+ file-format: ".gz"
+ enabled: True
+ - type: "xml-docs"
+ url: "https://docs.fd.io/csit"
+ path: "report/_static/archive"
+ file-name: "output.xml.gz"
+ file-format: ".gz"
+ enabled: True
+ - type: "xml-docs"
+ url: "https://docs.fd.io/csit"
+ path: "report/_static/archive"
+ file-name: "robot-plugin.zip"
+ file-format: ".zip"
+ enabled: True
+
+ make-dirs:
+ # List the directories which are created while preparing the environment.
+ # All directories MUST be defined in "paths" section.
+ - "DIR[WORKING,DATA]"
+
+ remove-dirs:
+ # List the directories which are deleted while cleaning the environment.
+ # All directories MUST be defined in "paths" section.
+ - "DIR[WORKING,DATA]"
+
+ build-dirs:
+ # List the directories where the results (build) is stored.
+ # All directories MUST be defined in "paths" section.
+ - "DIR[BUILD,JSON]"
+
+################################################################################
+### O U T P U T ###
+################################################################################
+
+- type: "output"
+ output: "convert-xml-to-json"
+ # type: flat | structured
+ # - flat - all .gz files in one directory
+ # - structured - .gz files in directories structured as job/build/*.gz
+ structure: "tree" # Use flat or tree
+ use-template: "json/template_0.1.0.json"
diff --git a/resources/tools/presentation/specifications/converter/input.yaml b/resources/tools/presentation/specifications/converter/input.yaml
new file mode 100644
index 0000000000..0cf765030d
--- /dev/null
+++ b/resources/tools/presentation/specifications/converter/input.yaml
@@ -0,0 +1,21 @@
+################################################################################
+### I N P U T X M L F I L E S ###
+################################################################################
+
+# This is only an example for converter XML --> JSON
+
+- type: "input"
+
+ # 3n-hsw
+
+ builds:
+ csit-vpp-perf-report-iterative-2101-3n-hsw:
+ - 65 # rls2101.rel NDRPDR reconf iter env 6
+ - 69 # rls2101.rel Hoststack iter env 6
+ - 64 # rls2101.rel NDRPDR iter env 6
+ - 63 # rls2101.rel MRR iter env 6
+ csit-vpp-perf-report-iterative-2101-2n-skx:
+ - 94 # rls2101.rel NDRPDR iter env 6
+ - 68 # rls2101.rel soak env 6
+ csit-vpp-device-2101-ubuntu1804-1n-skx:
+ - 358 # rls2101.rel VPP DEV env 6
diff --git a/resources/tools/presentation/specifications/report/environment.yaml b/resources/tools/presentation/specifications/report/environment.yaml
index 0e946046b4..10d61f56e4 100644
--- a/resources/tools/presentation/specifications/report/environment.yaml
+++ b/resources/tools/presentation/specifications/report/environment.yaml
@@ -193,7 +193,7 @@
file-format: ".zip"
enabled: True
- archive-inputs: True
+ archive-inputs: False
mapping-file: ""
@@ -224,7 +224,6 @@
reverse-input: False # Needed for trending, not important for the report
- # TODO: Change in code needed, it was in type: "configuration"
limits:
nic:
x520: 24460000
@@ -258,7 +257,7 @@
################################################################################
- type: "output"
- arch-file-format: # moved from input, TODO: change it in the code
+ arch-file-format:
- ".gz"
- ".zip"
output: "report"
diff --git a/resources/tools/presentation/specifications/trending/environment.yaml b/resources/tools/presentation/specifications/trending/environment.yaml
index dfa9f680c2..95eaa7b606 100644
--- a/resources/tools/presentation/specifications/trending/environment.yaml
+++ b/resources/tools/presentation/specifications/trending/environment.yaml
@@ -256,7 +256,7 @@
################################################################################
- type: "output"
- arch-file-format: # moved from input, TODO: change it in the code
+ arch-file-format:
- ".gz"
- ".zip"
output: "trending"