aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/presentation
diff options
context:
space:
mode:
Diffstat (limited to 'resources/tools/presentation')
-rw-r--r--resources/tools/presentation/generator_tables.py27
-rw-r--r--resources/tools/presentation/pal.py82
-rw-r--r--resources/tools/presentation/specification_CPTA.yaml10
3 files changed, 61 insertions, 58 deletions
diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py
index 9f0096557e..65bf6d562e 100644
--- a/resources/tools/presentation/generator_tables.py
+++ b/resources/tools/presentation/generator_tables.py
@@ -567,7 +567,7 @@ def table_performance_trending_dashboard(table, input_data):
try:
tbl_dict[tst_name]["data"]. \
append(tst_data["result"]["throughput"])
- except TypeError:
+ except (TypeError, KeyError):
pass # No data in output.xml for this test
tbl_lst = list()
@@ -579,7 +579,8 @@ def table_performance_trending_dashboard(table, input_data):
# Test name:
name = tbl_dict[tst_name]["name"]
# Throughput trend:
- trend = list(pd_data.rolling(window=win_size).median())[-2]
+ trend = list(pd_data.rolling(window=win_size, min_periods=2).
+ median())[-2]
# Anomaly:
t_data, _ = find_outliers(pd_data)
last = list(t_data)[-1]
@@ -593,16 +594,18 @@ def table_performance_trending_dashboard(table, input_data):
anomaly = "progression"
else:
anomaly = "normal"
- # Change:
- change = round(float(last - trend) / 1000000, 2)
- # Relative change:
- rel_change = int(relative_change(float(trend), float(last)))
-
- tbl_lst.append([name,
- round(float(last) / 1000000, 2),
- change,
- rel_change,
- anomaly])
+
+ if not isnan(last) and not isnan(trend):
+ # Change:
+ change = round(float(last - trend) / 1000000, 2)
+ # Relative change:
+ rel_change = int(relative_change(float(trend), float(last)))
+
+ tbl_lst.append([name,
+ round(float(last) / 1000000, 2),
+ change,
+ rel_change,
+ anomaly])
# Sort the table according to the relative change
tbl_lst.sort(key=lambda rel: rel[-2], reverse=True)
diff --git a/resources/tools/presentation/pal.py b/resources/tools/presentation/pal.py
index aaeacaac15..98642c898c 100644
--- a/resources/tools/presentation/pal.py
+++ b/resources/tools/presentation/pal.py
@@ -87,48 +87,48 @@ def main():
return 1
ret_code = 0
- # try:
- env = Environment(spec.environment, args.force)
- env.set_environment()
-
- if spec.is_debug:
- if spec.debug["input-format"] == "zip":
- unzip_files(spec)
- else:
- download_data_files(spec)
-
- prepare_static_content(spec)
-
- data = InputData(spec)
- data.read_data()
-
- generate_tables(spec, data)
- generate_plots(spec, data)
- generate_files(spec, data)
-
- if spec.output["output"] == "report":
- generate_report(args.release, spec)
- logging.info("Successfully finished.")
- elif spec.output["output"] == "CPTA":
- sys.stdout.write(generate_cpta(spec, data))
- logging.info("Successfully finished.")
- else:
- logging.critical("The output '{0}' is not supported.".
- format(spec.output["output"]))
+ try:
+ env = Environment(spec.environment, args.force)
+ env.set_environment()
+
+ if spec.is_debug:
+ if spec.debug["input-format"] == "zip":
+ unzip_files(spec)
+ else:
+ download_data_files(spec)
+
+ prepare_static_content(spec)
+
+ data = InputData(spec)
+ data.read_data()
+
+ generate_tables(spec, data)
+ generate_plots(spec, data)
+ generate_files(spec, data)
+
+ if spec.output["output"] == "report":
+ generate_report(args.release, spec)
+ logging.info("Successfully finished.")
+ elif spec.output["output"] == "CPTA":
+ sys.stdout.write(generate_cpta(spec, data))
+ logging.info("Successfully finished.")
+ else:
+ logging.critical("The output '{0}' is not supported.".
+ format(spec.output["output"]))
+ ret_code = 1
+
+ except (KeyError, ValueError, PresentationError) as err:
+ logging.info("Finished with an error.")
+ logging.critical(str(err))
ret_code = 1
-
- # except (KeyError, ValueError, PresentationError) as err:
- # logging.info("Finished with an error.")
- # logging.critical(str(err))
- # ret_code = 1
- # except Exception as err:
- # logging.info("Finished with an unexpected error.")
- # logging.critical(str(err))
- # ret_code = 1
- # finally:
- # if spec is not None and not spec.is_debug:
- # clean_environment(spec.environment)
- # return ret_code
+ except Exception as err:
+ logging.info("Finished with an unexpected error.")
+ logging.critical(str(err))
+ ret_code = 1
+ finally:
+ if spec is not None and not spec.is_debug:
+ clean_environment(spec.environment)
+ return ret_code
if __name__ == '__main__':
diff --git a/resources/tools/presentation/specification_CPTA.yaml b/resources/tools/presentation/specification_CPTA.yaml
index cbd2391ca9..510c932077 100644
--- a/resources/tools/presentation/specification_CPTA.yaml
+++ b/resources/tools/presentation/specification_CPTA.yaml
@@ -215,11 +215,11 @@
output-file-ext: ".csv"
output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-1t1c"
data: "plot-performance-trending"
- filter: "'1T1C'"
+ filter: "'MRR' and '1T1C'"
parameters:
- "name"
- "parent"
- - "throughput"
+ - "result"
# Number of the best and the worst tests presented in the table. Use 0 (zero)
# to present all tests.
nr-of-tests-shown: 20
@@ -233,11 +233,11 @@
output-file-ext: ".csv"
output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-2t2c"
data: "plot-performance-trending"
- filter: "'2T2C'"
+ filter: "'MRR' and '2T2C'"
parameters:
- "name"
- "parent"
- - "throughput"
+ - "result"
# Number of the best and the worst tests presented in the table. Use 0 (zero)
# to present all tests.
nr-of-tests-shown: 20
@@ -251,7 +251,7 @@
output-file-ext: ".csv"
output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-4t4c"
data: "plot-performance-trending"
- filter: "'4T4C'"
+ filter: "'MRR' and '4T4C'"
parameters:
- "name"
- "parent"