aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--resources/tools/presentation/generator_cpta.py49
-rw-r--r--resources/tools/presentation/input_data_files.py70
-rw-r--r--resources/tools/presentation/input_data_parser.py80
-rw-r--r--resources/tools/presentation/specification_CPTA.yaml94
-rw-r--r--resources/tools/presentation/specification_parser.py17
5 files changed, 143 insertions, 167 deletions
diff --git a/resources/tools/presentation/generator_cpta.py b/resources/tools/presentation/generator_cpta.py
index 8d10e15a49..ac0a5c60e1 100644
--- a/resources/tools/presentation/generator_cpta.py
+++ b/resources/tools/presentation/generator_cpta.py
@@ -338,11 +338,7 @@ def _generate_all_charts(spec, input_data):
:rtype: dict
"""
- logs = list()
-
- logs.append(
- (u"INFO", f" Generating the chart {graph.get(u'title', u'')} ...")
- )
+ logging.info(f" Generating the chart {graph.get(u'title', u'')} ...")
job_name = list(graph[u"data"].keys())[0]
@@ -350,11 +346,9 @@ def _generate_all_charts(spec, input_data):
res = dict()
# Transform the data
- logs.append(
- (u"INFO",
+ logging.info(
f" Creating the data set for the {graph.get(u'type', u'')} "
f"{graph.get(u'title', u'')}."
- )
)
if graph.get(u"include", None):
@@ -410,13 +404,10 @@ def _generate_all_charts(spec, input_data):
for tag in group:
for tst_name, test_data in chart_data.items():
if not test_data:
- logs.append(
- (u"WARNING", f"No data for the test {tst_name}")
- )
+ logging.warning(f"No data for the test {tst_name}")
continue
if tag not in chart_tags[tst_name]:
continue
- message = f"index: {index}, test: {tst_name}"
try:
trace, rslt = _generate_trending_traces(
test_data,
@@ -426,10 +417,8 @@ def _generate_all_charts(spec, input_data):
split(u'-')[2:-1]),
color=COLORS[index])
except IndexError:
- logs.append(
- (u"ERROR", f"Out of colors: {message}")
- )
- logging.error(f"Out of colors: {message}")
+ logging.error(f"Out of colors: index: "
+ f"{index}, test: {tst_name}")
index += 1
continue
traces.extend(trace)
@@ -441,11 +430,8 @@ def _generate_all_charts(spec, input_data):
else:
for tst_name, test_data in chart_data.items():
if not test_data:
- logs.append(
- (u"WARNING", f"No data for the test {tst_name}")
- )
+ logging.warning(f"No data for the test {tst_name}")
continue
- message = f"index: {index}, test: {tst_name}"
try:
trace, rslt = _generate_trending_traces(
test_data,
@@ -455,8 +441,9 @@ def _generate_all_charts(spec, input_data):
tst_name.split(u'.')[-1].split(u'-')[2:-1]),
color=COLORS[index])
except IndexError:
- logs.append((u"ERROR", f"Out of colors: {message}"))
- logging.error(f"Out of colors: {message}")
+ logging.error(
+ f"Out of colors: index: {index}, test: {tst_name}"
+ )
index += 1
continue
traces.extend(trace)
@@ -514,25 +501,13 @@ def _generate_all_charts(spec, input_data):
f"{spec.cpta[u'output-file']}/{graph[u'output-file-name']}"
f"{spec.cpta[u'output-file-type']}")
- logs.append((u"INFO", f" Writing the file {name_file} ..."))
+ logging.info(f" Writing the file {name_file} ...")
plpl = plgo.Figure(data=traces, layout=layout)
try:
ploff.plot(plpl, show_link=False, auto_open=False,
filename=name_file)
except plerr.PlotlyEmptyDataError:
- logs.append((u"WARNING", u"No data for the plot. Skipped."))
-
- for level, line in logs:
- if level == u"INFO":
- logging.info(line)
- elif level == u"ERROR":
- logging.error(line)
- elif level == u"DEBUG":
- logging.debug(line)
- elif level == u"CRITICAL":
- logging.critical(line)
- elif level == u"WARNING":
- logging.warning(line)
+ logging.warning(u"No data for the plot. Skipped.")
return {u"job_name": job_name, u"csv_table": csv_tbl, u"results": res}
@@ -542,7 +517,7 @@ def _generate_all_charts(spec, input_data):
builds_dict[job] = list()
for build in spec.input[u"builds"][job]:
status = build[u"status"]
- if status not in (u"failed", u"not found", u"removed"):
+ if status not in (u"failed", u"not found", u"removed", None):
builds_dict[job].append(str(build[u"build"]))
# Create "build ID": "date" dict:
diff --git a/resources/tools/presentation/input_data_files.py b/resources/tools/presentation/input_data_files.py
index e1fa5b2284..8b941f2f94 100644
--- a/resources/tools/presentation/input_data_files.py
+++ b/resources/tools/presentation/input_data_files.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2020 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -42,16 +42,14 @@ SEPARATOR = u"__"
REGEX_RELEASE = re.compile(r'(\D*)(\d{4}|master)(\D*)')
-def _download_file(url, file_name, log, arch=False):
+def _download_file(url, file_name, arch=False):
"""Download a file with input data.
:param url: URL to the file to download.
:param file_name: Name of file to download.
- :param log: List of log messages.
:param arch: If True, also .gz file is downloaded
:type url: str
:type file_name: str
- :type log: list of tuples (severity, msg)
:type arch: bool
:returns: True if the download was successful, otherwise False.
:rtype: bool
@@ -90,29 +88,27 @@ def _download_file(url, file_name, log, arch=False):
success = False
session = None
try:
- log.append((u"INFO", f" Connecting to {url} ..."))
+ logging.info(f" Connecting to {url} ...")
session = requests_retry_session()
response = session.get(url, stream=True)
code = response.status_code
- log.append((u"INFO", f" {code}: {responses[code]}"))
+ logging.info(f" {code}: {responses[code]}")
if code != codes[u"OK"]:
if session:
session.close()
url = url.replace(u"_info", u"")
- log.append((u"INFO", f" Connecting to {url} ..."))
+ logging.info(f" Connecting to {url} ...")
session = requests_retry_session()
response = session.get(url, stream=True)
code = response.status_code
- log.append((u"INFO", f" {code}: {responses[code]}"))
+ logging.info(f" {code}: {responses[code]}")
if code != codes[u"OK"]:
return False, file_name
file_name = file_name.replace(u"_info", u"")
dst_file_name = file_name.replace(u".gz", u"")
- log.append(
- (u"INFO", f" Downloading the file {url} to {dst_file_name} ...")
- )
+ logging.info(f" Downloading the file {url} to {dst_file_name} ...")
with open(dst_file_name, u"wb") as file_handle:
for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
if chunk:
@@ -121,44 +117,37 @@ def _download_file(url, file_name, log, arch=False):
if arch and u".gz" in file_name:
if session:
session.close()
- log.append(
- (u"INFO", f" Downloading the file {url} to {file_name} ...")
- )
+ logging.info(f" Downloading the file {url} to {file_name} ...")
session = requests_retry_session()
response = session.get(url, stream=True)
if response.status_code == codes[u"OK"]:
with open(file_name, u"wb") as file_handle:
file_handle.write(response.raw.read())
else:
- log.append(
- (u"ERROR", f"Not possible to download the file {url} to "
- f"{file_name} ...")
+ logging.error(
+ f"Not possible to download the file {url} to {file_name}"
)
success = True
except RequestException as err:
- log.append(
- (u"ERROR", f"HTTP Request exception:\n{repr(err)}")
- )
+ logging.error(f"HTTP Request exception:\n{repr(err)}")
except (IOError, ValueError, KeyError) as err:
- log.append((u"ERROR", f"Download failed.\n{repr(err)}"))
+ logging.error(f"Download failed.\n{repr(err)}")
finally:
if session:
session.close()
- log.append((u"INFO", u" Download finished."))
+ logging.info(u" Download finished.")
return success, file_name
-def _unzip_file(spec, build, pid, log):
+def _unzip_file(spec, build, pid):
"""Unzip downloaded source file.
:param spec: Specification read form the specification file.
:param build: Information about the build.
- :param log: List of log messages.
:type spec: Specification
:type build: dict
- :type log: list of tuples (severity, msg)
:returns: True if the download was successful, otherwise False.
:rtype: bool
"""
@@ -178,42 +167,35 @@ def _unzip_file(spec, build, pid, log):
new_name = \
f"{file_name.rsplit(u'.')[-2]}{SEPARATOR}{data_file.split(u'/')[-1]}"
- log.append((u"INFO", f" Unzipping: {data_file} from {file_name}."))
+ logging.info(f" Unzipping: {data_file} from {file_name}.")
try:
with ZipFile(file_name, u'r') as zip_file:
zip_file.extract(data_file, tmp_dir)
- log.append(
- (u"INFO", f" Renaming the file {join(tmp_dir, data_file)} to "
- f"{new_name}")
+ logging.info(
+ f" Renaming the file {join(tmp_dir, data_file)} to {new_name}"
)
rename(join(tmp_dir, data_file), new_name)
build[u"file-name"] = new_name
return True
except (BadZipfile, RuntimeError) as err:
- log.append(
- (u"ERROR", f"Failed to unzip the file {file_name}: {repr(err)}.")
- )
+ logging.error(f"Failed to unzip the file {file_name}: {repr(err)}.")
return False
except OSError as err:
- log.append(
- (u"ERROR", f"Failed to rename the file {data_file}: {repr(err)}.")
- )
+ logging.error(f"Failed to rename the file {data_file}: {repr(err)}.")
return False
-def download_and_unzip_data_file(spec, job, build, pid, log):
+def download_and_unzip_data_file(spec, job, build, pid):
"""Download and unzip a source file.
:param spec: Specification read form the specification file.
:param job: Name of the Jenkins job.
:param build: Information about the build.
:param pid: PID of the process executing this method.
- :param log: List of log messages.
:type spec: Specification
:type job: str
:type build: dict
:type pid: int
- :type log: list of tuples (severity, msg)
:returns: True if the download was successful, otherwise False.
:rtype: bool
"""
@@ -235,7 +217,7 @@ def download_and_unzip_data_file(spec, job, build, pid, log):
logging.info(f"Trying to download {url}")
arch = bool(spec.configuration.get(u"archive-inputs", True))
- success, downloaded_name = _download_file(url, new_name, log, arch=arch)
+ success, downloaded_name = _download_file(url, new_name, arch=arch)
if not success:
@@ -261,9 +243,7 @@ def download_and_unzip_data_file(spec, job, build, pid, log):
spec.environment[u"paths"][u"DIR[WORKING,DATA]"],
f"{job}{SEPARATOR}{build[u'build']}{SEPARATOR}{file_name[idx]}"
)
- success, downloaded_name = _download_file(
- url, new_name, log, arch=arch
- )
+ success, downloaded_name = _download_file(url, new_name, arch=arch)
if success:
file_name = file_name[idx]
if file_name.endswith(u".gz"):
@@ -296,11 +276,11 @@ def download_and_unzip_data_file(spec, job, build, pid, log):
logging.info(f"Downloading {url}")
- success, downloaded_name = _download_file(url, new_name, log)
+ success, downloaded_name = _download_file(url, new_name)
if success and downloaded_name.endswith(u".zip"):
if not is_zipfile(downloaded_name):
- log.append((u"ERROR", f"Zip file {new_name} is corrupted."))
+ logging.error(f"Zip file {new_name} is corrupted.")
success = False
if success:
@@ -310,6 +290,6 @@ def download_and_unzip_data_file(spec, job, build, pid, log):
build[u"file-name"] = downloaded_name[:-3]
if downloaded_name.endswith(u".zip"):
- success = _unzip_file(spec, build, pid, log)
+ success = _unzip_file(spec, build, pid)
return success
diff --git a/resources/tools/presentation/input_data_parser.py b/resources/tools/presentation/input_data_parser.py
index cd9c1a248d..27db6a84d8 100644
--- a/resources/tools/presentation/input_data_parser.py
+++ b/resources/tools/presentation/input_data_parser.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2020 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -1394,16 +1394,14 @@ class InputData:
"""
return self.data[job][build][u"tests"]
- def _parse_tests(self, job, build, log):
+ def _parse_tests(self, job, build):
"""Process data from robot output.xml file and return JSON structured
data.
:param job: The name of job which build output data will be processed.
:param build: The build which output data will be processed.
- :param log: List of log messages.
:type job: str
:type build: dict
- :type log: list of tuples (severity, msg)
:returns: JSON data structure.
:rtype: dict
"""
@@ -1417,9 +1415,8 @@ class InputData:
try:
result = ExecutionResult(data_file)
except errors.DataError as err:
- log.append(
- (u"ERROR", f"Error occurred while parsing output.xml: "
- f"{repr(err)}")
+ logging.error(
+ f"Error occurred while parsing output.xml: {repr(err)}"
)
return None
checker = ExecutionChecker(metadata, self._cfg.mapping,
@@ -1444,40 +1441,30 @@ class InputData:
:type repeat: int
"""
- logs = list()
-
- logs.append(
- (u"INFO", f" Processing the job/build: {job}: {build[u'build']}")
- )
+ logging.info(f" Processing the job/build: {job}: {build[u'build']}")
state = u"failed"
success = False
data = None
do_repeat = repeat
while do_repeat:
- success = download_and_unzip_data_file(self._cfg, job, build, pid,
- logs)
+ success = download_and_unzip_data_file(self._cfg, job, build, pid)
if success:
break
do_repeat -= 1
if not success:
- logs.append(
- (u"ERROR",
+ logging.error(
f"It is not possible to download the input data file from the "
f"job {job}, build {build[u'build']}, or it is damaged. "
- f"Skipped.")
+ f"Skipped."
)
if success:
- logs.append(
- (u"INFO",
- f" Processing data from the build {build[u'build']} ...")
- )
- data = self._parse_tests(job, build, logs)
+ logging.info(f" Processing data from build {build[u'build']}")
+ data = self._parse_tests(job, build)
if data is None:
- logs.append(
- (u"ERROR",
+ logging.error(
f"Input data file from the job {job}, build "
- f"{build[u'build']} is damaged. Skipped.")
+ f"{build[u'build']} is damaged. Skipped."
)
else:
state = u"processed"
@@ -1485,13 +1472,13 @@ class InputData:
try:
remove(build[u"file-name"])
except OSError as err:
- logs.append(
- ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
- f"{repr(err)}")
+ logging.error(
+ f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
)
# If the time-period is defined in the specification file, remove all
# files which are outside the time period.
+ is_last = False
timeperiod = self._cfg.input.get(u"time-period", None)
if timeperiod and data:
now = dt.utcnow()
@@ -1505,26 +1492,20 @@ class InputData:
# Remove the data and the file:
state = u"removed"
data = None
- logs.append(
- (u"INFO",
- f" The build {job}/{build[u'build']} is "
- f"outdated, will be removed.")
+ is_last = True
+ logging.info(
+ f" The build {job}/{build[u'build']} is "
+ f"outdated, will be removed."
)
- logs.append((u"INFO", u" Done."))
-
- for level, line in logs:
- if level == u"INFO":
- logging.info(line)
- elif level == u"ERROR":
- logging.error(line)
- elif level == u"DEBUG":
- logging.debug(line)
- elif level == u"CRITICAL":
- logging.critical(line)
- elif level == u"WARNING":
- logging.warning(line)
-
- return {u"data": data, u"state": state, u"job": job, u"build": build}
+ logging.info(u" Done.")
+
+ return {
+ u"data": data,
+ u"state": state,
+ u"job": job,
+ u"build": build,
+ u"last": is_last
+ }
def download_and_parse_data(self, repeat=1):
"""Download the input data files, parse input data from input files and
@@ -1541,6 +1522,8 @@ class InputData:
for build in builds:
result = self._download_and_parse_build(job, build, repeat)
+ if result[u"last"]:
+ break
build_nr = result[u"build"][u"build"]
if result[u"data"]:
@@ -1908,9 +1891,10 @@ class InputData:
data[job][str(build)][
test_id][param] = u"No Data"
except KeyError as err:
- logging.error(repr(err))
if continue_on_error:
+ logging.debug(repr(err))
continue
+ logging.error(repr(err))
return None
return data
diff --git a/resources/tools/presentation/specification_CPTA.yaml b/resources/tools/presentation/specification_CPTA.yaml
index 4d162a06e7..20efefeec3 100644
--- a/resources/tools/presentation/specification_CPTA.yaml
+++ b/resources/tools/presentation/specification_CPTA.yaml
@@ -214,6 +214,7 @@
start: 1086
>>>>>>> CHANGE (12d5c9 Trending: Fixes in plots, data)
end: "lastCompletedBuild"
+<<<<<<< HEAD (89bdc8 Trending: Fixes in plots, data)
skip:
<<<<<<< HEAD (914f8e Report: Add 2n-skx Soak and Reconf tests)
<<<<<<< HEAD (b54dc7 PAL: Local mode uses xml file name as a build number)
@@ -249,14 +250,13 @@
>>>>>>> CHANGE (51210b Trending: Update graphs - dnv)
- 1102
>>>>>>> CHANGE (b51708 Trending: Plots)
+=======
+ max-builds: 180 # Max nr of builds to use
+>>>>>>> CHANGE (1945eb PAL: Reverse download order for trending)
csit-dpdk-perf-mrr-weekly-master:
start: 107
end: "lastCompletedBuild"
- skip:
- - 110
- - 111
- - 112
- - 114
+ max-builds: 15 # Max nr of builds to use
plot-performance-trending-vpp-3n-hsw:
csit-vpp-perf-mrr-daily-master:
@@ -278,6 +278,7 @@
start: 1086
>>>>>>> CHANGE (12d5c9 Trending: Fixes in plots, data)
end: "lastCompletedBuild"
+<<<<<<< HEAD (89bdc8 Trending: Fixes in plots, data)
skip:
<<<<<<< HEAD (914f8e Report: Add 2n-skx Soak and Reconf tests)
<<<<<<< HEAD (b54dc7 PAL: Local mode uses xml file name as a build number)
@@ -313,16 +314,15 @@
>>>>>>> CHANGE (51210b Trending: Update graphs - dnv)
- 1102
>>>>>>> CHANGE (b51708 Trending: Plots)
+=======
+ max-builds: 180 # Max nr of builds to use
+>>>>>>> CHANGE (1945eb PAL: Reverse download order for trending)
plot-performance-trending-dpdk-3n-hsw:
csit-dpdk-perf-mrr-weekly-master:
start: 107
end: "lastCompletedBuild"
- skip:
- - 110
- - 111
- - 112
- - 114
+ max-builds: 15 # Max nr of builds to use
# 3n-skx
plot-performance-trending-all-3n-skx:
@@ -345,6 +345,7 @@
start: 834
>>>>>>> CHANGE (12d5c9 Trending: Fixes in plots, data)
end: "lastCompletedBuild"
+<<<<<<< HEAD (89bdc8 Trending: Fixes in plots, data)
skip:
<<<<<<< HEAD (914f8e Report: Add 2n-skx Soak and Reconf tests)
<<<<<<< HEAD (b54dc7 PAL: Local mode uses xml file name as a build number)
@@ -399,13 +400,13 @@
- 865
- 866
>>>>>>> CHANGE (12d5c9 Trending: Fixes in plots, data)
+=======
+ max-builds: 180 # Max nr of builds to use
+>>>>>>> CHANGE (1945eb PAL: Reverse download order for trending)
csit-dpdk-perf-mrr-weekly-master-3n-skx:
start: 72
end: "lastCompletedBuild"
- skip:
- - 76
- - 77
- - 79
+ max-builds: 15 # Max nr of builds to use
plot-performance-trending-vpp-3n-skx:
csit-vpp-perf-mrr-daily-master-3n-skx:
@@ -427,6 +428,7 @@
start: 834
>>>>>>> CHANGE (12d5c9 Trending: Fixes in plots, data)
end: "lastCompletedBuild"
+<<<<<<< HEAD (89bdc8 Trending: Fixes in plots, data)
skip:
<<<<<<< HEAD (914f8e Report: Add 2n-skx Soak and Reconf tests)
<<<<<<< HEAD (b54dc7 PAL: Local mode uses xml file name as a build number)
@@ -481,15 +483,15 @@
- 865
- 866
>>>>>>> CHANGE (12d5c9 Trending: Fixes in plots, data)
+=======
+ max-builds: 180 # Max nr of builds to use
+>>>>>>> CHANGE (1945eb PAL: Reverse download order for trending)
plot-performance-trending-dpdk-3n-skx:
csit-dpdk-perf-mrr-weekly-master-3n-skx:
start: 72
end: "lastCompletedBuild"
- skip:
- - 76
- - 77
- - 79
+ max-builds: 15 # Max nr of builds to use
# 2n-skx
plot-performance-trending-all-2n-skx:
@@ -512,6 +514,7 @@
start: 858
>>>>>>> CHANGE (12d5c9 Trending: Fixes in plots, data)
end: "lastCompletedBuild"
+<<<<<<< HEAD (89bdc8 Trending: Fixes in plots, data)
skip:
<<<<<<< HEAD (914f8e Report: Add 2n-skx Soak and Reconf tests)
<<<<<<< HEAD (b54dc7 PAL: Local mode uses xml file name as a build number)
@@ -549,11 +552,13 @@
- 870
- 874
>>>>>>> CHANGE (01597c Trending: Update graphs)
+=======
+ max-builds: 180 # Max nr of builds to use
+>>>>>>> CHANGE (1945eb PAL: Reverse download order for trending)
csit-dpdk-perf-mrr-weekly-master-2n-skx:
start: 78
end: "lastCompletedBuild"
- skip:
- - 79
+ max-builds: 15 # Max nr of builds to use
plot-performance-trending-vpp-2n-skx:
csit-vpp-perf-mrr-daily-master-2n-skx:
@@ -575,6 +580,7 @@
start: 858
>>>>>>> CHANGE (12d5c9 Trending: Fixes in plots, data)
end: "lastCompletedBuild"
+<<<<<<< HEAD (89bdc8 Trending: Fixes in plots, data)
skip:
<<<<<<< HEAD (914f8e Report: Add 2n-skx Soak and Reconf tests)
<<<<<<< HEAD (b54dc7 PAL: Local mode uses xml file name as a build number)
@@ -619,22 +625,21 @@
- 870
- 874
>>>>>>> CHANGE (01597c Trending: Update graphs)
+=======
+ max-builds: 180 # Max nr of builds to use
+>>>>>>> CHANGE (1945eb PAL: Reverse download order for trending)
plot-performance-trending-dpdk-2n-skx:
csit-dpdk-perf-mrr-weekly-master-2n-skx:
start: 78
end: "lastCompletedBuild"
- skip:
- - 79
+ max-builds: 15 # Max nr of builds to use
plot-performance-trending-vpp-nfv-2n-skx:
csit-vpp-perf-mrr-weekly-master-2n-skx:
start: 50
end: "lastCompletedBuild"
- skip:
- - 52
- - 54
- - 55
+ max-builds: 15 # Max nr of builds to use
<<<<<<< HEAD (d3ea22 Report: Add data)
=======
@@ -642,19 +647,13 @@
csit-vpp-perf-mrr-daily-master-2n-clx:
start: 236
end: "lastCompletedBuild"
- skip:
- - 239
- - 248
- - 251
- - 252
- - 253
+ max-builds: 180 # Max nr of builds to use
plot-performance-trending-dpdk-2n-clx:
csit-dpdk-perf-mrr-weekly-master-2n-clx:
start: 23
end: "lastCompletedBuild"
- skip:
- - 24
+ max-builds: 15 # Max nr of builds to use
>>>>>>> CHANGE (b51708 Trending: Plots)
# 3n-tsh
@@ -662,6 +661,7 @@
csit-vpp-perf-mrr-daily-master-3n-tsh:
start: 144
end: "lastCompletedBuild"
+<<<<<<< HEAD (89bdc8 Trending: Fixes in plots, data)
skip:
<<<<<<< HEAD (f7dd81 Trending: Update graphs - dnv)
<<<<<<< HEAD (b54dc7 PAL: Local mode uses xml file name as a build number)
@@ -687,11 +687,15 @@
- 156
- 157
>>>>>>> CHANGE (01597c Trending: Update graphs)
+=======
+ max-builds: 180 # Max nr of builds to use
+>>>>>>> CHANGE (1945eb PAL: Reverse download order for trending)
plot-performance-trending-vpp-3n-tsh:
csit-vpp-perf-mrr-daily-master-3n-tsh:
start: 144
end: "lastCompletedBuild"
+<<<<<<< HEAD (89bdc8 Trending: Fixes in plots, data)
skip:
<<<<<<< HEAD (f7dd81 Trending: Update graphs - dnv)
<<<<<<< HEAD (b54dc7 PAL: Local mode uses xml file name as a build number)
@@ -717,6 +721,9 @@
- 156
- 157
>>>>>>> CHANGE (01597c Trending: Update graphs)
+=======
+ max-builds: 180 # Max nr of builds to use
+>>>>>>> CHANGE (1945eb PAL: Reverse download order for trending)
plot-performance-trending-vpp-3n-dnv:
csit-vpp-perf-mrr-daily-master-3n-dnv:
@@ -734,6 +741,7 @@
start: 329
>>>>>>> CHANGE (12d5c9 Trending: Fixes in plots, data)
end: "lastCompletedBuild"
+<<<<<<< HEAD (89bdc8 Trending: Fixes in plots, data)
skip:
<<<<<<< HEAD (f7dd81 Trending: Update graphs - dnv)
<<<<<<< HEAD (b54dc7 PAL: Local mode uses xml file name as a build number)
@@ -790,6 +798,9 @@
- 391
- 392
>>>>>>> CHANGE (12d5c9 Trending: Fixes in plots, data)
+=======
+ max-builds: 180 # Max nr of builds to use
+>>>>>>> CHANGE (1945eb PAL: Reverse download order for trending)
plot-performance-trending-vpp-2n-dnv:
csit-vpp-perf-mrr-daily-master-2n-dnv:
@@ -807,6 +818,7 @@
start: 335
>>>>>>> CHANGE (12d5c9 Trending: Fixes in plots, data)
end: "lastCompletedBuild"
+<<<<<<< HEAD (89bdc8 Trending: Fixes in plots, data)
skip:
<<<<<<< HEAD (f7dd81 Trending: Update graphs - dnv)
- 145
@@ -854,6 +866,8 @@
- 396
- 397
>>>>>>> CHANGE (12d5c9 Trending: Fixes in plots, data)
+=======
+>>>>>>> CHANGE (1945eb PAL: Reverse download order for trending)
plot-layouts:
@@ -969,6 +983,7 @@
start: 1086
>>>>>>> CHANGE (12d5c9 Trending: Fixes in plots, data)
end: "lastCompletedBuild"
+ max-builds: 180 # Max nr of builds to download
skip:
<<<<<<< HEAD (914f8e Report: Add 2n-skx Soak and Reconf tests)
<<<<<<< HEAD (b54dc7 PAL: Local mode uses xml file name as a build number)
@@ -1007,6 +1022,7 @@
csit-dpdk-perf-mrr-weekly-master:
start: 113
end: "lastCompletedBuild"
+ max-builds: 15 # Max nr of builds to download
skip:
- 114
@@ -1030,6 +1046,7 @@
start: 834
>>>>>>> CHANGE (12d5c9 Trending: Fixes in plots, data)
end: "lastCompletedBuild"
+ max-builds: 180 # Max nr of builds to download
skip:
<<<<<<< HEAD (914f8e Report: Add 2n-skx Soak and Reconf tests)
<<<<<<< HEAD (b54dc7 PAL: Local mode uses xml file name as a build number)
@@ -1087,6 +1104,7 @@
csit-dpdk-perf-mrr-weekly-master-3n-skx:
start: 78
end: "lastCompletedBuild"
+ max-builds: 15 # Max nr of builds to download
skip:
- 79
@@ -1110,6 +1128,7 @@
start: 858
>>>>>>> CHANGE (12d5c9 Trending: Fixes in plots, data)
end: "lastCompletedBuild"
+ max-builds: 180 # Max nr of builds to download
skip:
<<<<<<< HEAD (914f8e Report: Add 2n-skx Soak and Reconf tests)
<<<<<<< HEAD (b54dc7 PAL: Local mode uses xml file name as a build number)
@@ -1150,6 +1169,7 @@
csit-vpp-perf-mrr-weekly-master-2n-skx:
start: 50
end: "lastCompletedBuild"
+ max-builds: 15 # Max nr of builds to download
skip:
- 52
- 54
@@ -1157,12 +1177,14 @@
csit-dpdk-perf-mrr-weekly-master-2n-skx:
start: 78
end: "lastCompletedBuild"
+ max-builds: 15 # Max nr of builds to download
skip:
- 79
csit-vpp-perf-mrr-daily-master-2n-clx:
start: 236
end: "lastCompletedBuild"
+ max-builds: 180 # Max nr of builds to download
skip:
<<<<<<< HEAD (914f8e Report: Add 2n-skx Soak and Reconf tests)
<<<<<<< HEAD (b54dc7 PAL: Local mode uses xml file name as a build number)
@@ -1197,6 +1219,7 @@
csit-dpdk-perf-mrr-weekly-master-2n-clx:
start: 23
end: "lastCompletedBuild"
+ max-builds: 15 # Max nr of builds to download
skip:
<<<<<<< HEAD (f7dd81 Trending: Update graphs - dnv)
- 18
@@ -1214,6 +1237,7 @@
csit-vpp-perf-mrr-daily-master-3n-tsh:
start: 144
end: "lastCompletedBuild"
+ max-builds: 180 # Max nr of builds to download
skip:
<<<<<<< HEAD (f7dd81 Trending: Update graphs - dnv)
<<<<<<< HEAD (b54dc7 PAL: Local mode uses xml file name as a build number)
@@ -1256,6 +1280,7 @@
start: 329
>>>>>>> CHANGE (12d5c9 Trending: Fixes in plots, data)
end: "lastCompletedBuild"
+ max-builds: 180 # Max nr of builds to download
skip:
<<<<<<< HEAD (f7dd81 Trending: Update graphs - dnv)
<<<<<<< HEAD (b54dc7 PAL: Local mode uses xml file name as a build number)
@@ -1329,6 +1354,7 @@
start: 335
>>>>>>> CHANGE (12d5c9 Trending: Fixes in plots, data)
end: "lastCompletedBuild"
+ max-builds: 180 # Max nr of builds to download
skip:
<<<<<<< HEAD (f7dd81 Trending: Update graphs - dnv)
- 145
diff --git a/resources/tools/presentation/specification_parser.py b/resources/tools/presentation/specification_parser.py
index 61ef42ef8d..548bbff532 100644
--- a/resources/tools/presentation/specification_parser.py
+++ b/resources/tools/presentation/specification_parser.py
@@ -528,14 +528,15 @@ class Specification:
continue
if isinstance(builds, dict):
build_end = builds.get(u"end", None)
+ max_builds = builds.get(u"max-builds", None)
try:
build_end = int(build_end)
except ValueError:
# defined as a range <start, build_type>
build_end = self._get_build_number(job, build_end)
- builds = [x for x in range(builds[u"start"],
- build_end + 1)
- if x not in builds.get(u"skip", list())]
+ builds = [x for x in range(builds[u"start"], build_end + 1)]
+ if max_builds and max_builds < len(builds):
+ builds = builds[:max_builds]
self.configuration[u"data-sets"][set_name][job] = builds
elif isinstance(builds, list):
for idx, item in enumerate(builds):
@@ -590,14 +591,23 @@ class Specification:
if builds:
if isinstance(builds, dict):
build_end = builds.get(u"end", None)
+ max_builds = builds.get(u"max-builds", None)
+ reverse = bool(builds.get(u"reverse", False))
try:
build_end = int(build_end)
except ValueError:
# defined as a range <start, build_type>
+ if build_end in (u"lastCompletedBuild",
+ u"lastSuccessfulBuild"):
+ reverse = True
build_end = self._get_build_number(job, build_end)
builds = [x for x in range(builds[u"start"],
build_end + 1)
if x not in builds.get(u"skip", list())]
+ if reverse:
+ builds.reverse()
+ if max_builds and max_builds < len(builds):
+ builds = builds[:max_builds]
self._specification[u"input"][u"builds"][job] = list()
for build in builds:
self._specification[u"input"][u"builds"][job]. \
@@ -608,6 +618,7 @@ class Specification:
f"No build is defined for the job {job}. Trying to "
f"continue without it."
)
+
except KeyError:
raise PresentationError(u"No data to process.")