diff options
author | Vratko Polak <vrpolak@cisco.com> | 2019-12-02 19:01:35 +0100 |
---|---|---|
committer | Vratko Polak <vrpolak@cisco.com> | 2019-12-02 19:01:35 +0100 |
commit | bd799a13a06c82e9b38097ea80ec30337edd5e8e (patch) | |
tree | a07e553c33e73a8fa28849bd8fdbbe018a8a0868 /resources | |
parent | 627cddca1d64edb8475407a1524efb2a22249a25 (diff) |
Use Jumpavg 0.2.0 in PAL
+ As a side effect, PAL is now part of "resource" package tree.
- Testable only with 23558 (the rest of PAL Python 3 migration).
Change-Id: Icbd90fd71458c07bced86f4bab9fa4e68282c38c
Signed-off-by: Vratko Polak <vrpolak@cisco.com>
Diffstat (limited to 'resources')
-rw-r--r-- | resources/tools/presentation/__init__.py | 18 | ||||
-rw-r--r-- | resources/tools/presentation/generator_CPTA.py | 9 | ||||
-rw-r--r-- | resources/tools/presentation/generator_plots.py | 4 | ||||
-rw-r--r-- | resources/tools/presentation/generator_tables.py | 6 | ||||
-rw-r--r-- | resources/tools/presentation/input_data_parser.py | 18 | ||||
-rw-r--r-- | resources/tools/presentation/requirements.txt | 1 | ||||
-rw-r--r-- | resources/tools/presentation/specification_parser.py | 4 | ||||
-rw-r--r-- | resources/tools/presentation/utils.py | 30 |
8 files changed, 52 insertions, 38 deletions
diff --git a/resources/tools/presentation/__init__.py b/resources/tools/presentation/__init__.py new file mode 100644 index 0000000000..a3b7344358 --- /dev/null +++ b/resources/tools/presentation/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) 2019 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +__init__ file for directory presentation + +This makes the presentation a part of the great CSIT resources package. +""" diff --git a/resources/tools/presentation/generator_CPTA.py b/resources/tools/presentation/generator_CPTA.py index f57757f451..eec401bd1f 100644 --- a/resources/tools/presentation/generator_CPTA.py +++ b/resources/tools/presentation/generator_CPTA.py @@ -181,7 +181,7 @@ def _generate_trending_traces(in_data, job_name, build_info, if "dpdk" in job_name: hover_text.append(hover_str.format( date=date, - value=int(in_data[idx].avg), + value=int(in_data[idx]), sut="dpdk", build=build_info[job_name][str(idx)][1].rsplit('~', 1)[0], period="weekly", @@ -190,7 +190,7 @@ def _generate_trending_traces(in_data, job_name, build_info, elif "vpp" in job_name: hover_text.append(hover_str.format( date=date, - value=int(in_data[idx].avg), + value=int(in_data[idx]), sut="vpp", build=build_info[job_name][str(idx)][1].rsplit('~', 1)[0], period="daily", @@ -228,7 +228,7 @@ def _generate_trending_traces(in_data, job_name, build_info, trace_samples = plgo.Scatter( x=xaxis, - y=[y.avg for y in data_y], + y=[y for y in data_y], # Was: y.avg mode='markers', line={ "width": 1 @@ -364,8 +364,7 @@ def _generate_all_charts(spec, input_data): tst_lst = list() for bld in builds_dict[job_name]: itm = tst_data.get(int(bld), '') - if not isinstance(itm, str): - itm = itm.avg + # CSIT-1180: Itm will be list, compute stats. tst_lst.append(str(itm)) csv_tbl.append("{0},".format(tst_name) + ",".join(tst_lst) + '\n') diff --git a/resources/tools/presentation/generator_plots.py b/resources/tools/presentation/generator_plots.py index 0e0faff5bc..3cbd35c430 100644 --- a/resources/tools/presentation/generator_plots.py +++ b/resources/tools/presentation/generator_plots.py @@ -1822,7 +1822,7 @@ def plot_service_density_heatmap(plot, input_data): stdev=None) try: if plot["include-tests"] == "MRR": - result = test["result"]["receive-rate"].avg + result = test["result"]["receive-rate"] # .avg elif plot["include-tests"] == "PDR": result = test["throughput"]["PDR"]["LOWER"] elif plot["include-tests"] == "NDR": @@ -2110,7 +2110,7 @@ def plot_service_density_heatmap_compare(plot, input_data): stdev_c=None) try: if plot["include-tests"] == "MRR": - result = test["result"]["receive-rate"].avg + result = test["result"]["receive-rate"] # .avg elif plot["include-tests"] == "PDR": result = test["throughput"]["PDR"]["LOWER"] elif plot["include-tests"] == "NDR": diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py index 1a47e81361..4a1ac0ef71 100644 --- a/resources/tools/presentation/generator_tables.py +++ b/resources/tools/presentation/generator_tables.py @@ -213,7 +213,7 @@ def _tpc_modify_displayed_test_name(test_name): def _tpc_insert_data(target, src, include_tests): try: if include_tests == "MRR": - target.append(src["result"]["receive-rate"].avg) + target.append(src["result"]["receive-rate"]) # .avg) elif include_tests == "PDR": target.append(src["throughput"]["PDR"]["LOWER"]) elif include_tests == "NDR": @@ -876,7 +876,7 @@ def table_nics_comparison(table, input_data): "cmp-data": list()} try: if table["include-tests"] == "MRR": - result = tst_data["result"]["receive-rate"].avg + result = tst_data["result"]["receive-rate"] # .avg elif table["include-tests"] == "PDR": result = tst_data["throughput"]["PDR"]["LOWER"] elif table["include-tests"] == "NDR": @@ -998,7 +998,7 @@ def table_soak_vs_ndr(table, input_data): try: if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"): if table["include-tests"] == "MRR": - result = tst_data["result"]["receive-rate"].avg + result = tst_data["result"]["receive-rate"] elif table["include-tests"] == "PDR": result = tst_data["throughput"]["PDR"]["LOWER"] elif table["include-tests"] == "NDR": diff --git a/resources/tools/presentation/input_data_parser.py b/resources/tools/presentation/input_data_parser.py index 46c8b9d5b9..e48b271489 100644 --- a/resources/tools/presentation/input_data_parser.py +++ b/resources/tools/presentation/input_data_parser.py @@ -34,8 +34,8 @@ from os import remove from datetime import datetime as dt from datetime import timedelta from json import loads -from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory +from resources.libraries.python import jumpavg from input_data_files import download_and_unzip_data_file @@ -147,7 +147,9 @@ class ExecutionChecker(ResultVisitor): "type": "MRR" | "BMRR", "status": "PASS" | "FAIL", "result": { - "receive-rate": AvgStdevMetadata, + "receive-rate": float, + # Average of a list, computed using AvgStdevStats. + # In CSIT-1180, replace with List[float]. } } @@ -832,17 +834,13 @@ class ExecutionChecker(ResultVisitor): items_str = groups.group(1) items_float = [float(item.strip()) for item in items_str.split(",")] - metadata = AvgStdevMetadataFactory.from_data(items_float) - # Next two lines have been introduced in CSIT-1179, - # to be removed in CSIT-1180. - metadata.size = 1 - metadata.stdev = 0.0 - test_result["result"]["receive-rate"] = metadata + # Use whole list in CSIT-1180. + stats = jumpavg.AvgStdevStats.for_runs(items_float) + test_result["result"]["receive-rate"] = stats.avg else: groups = re.search(self.REGEX_MRR, test.message) test_result["result"]["receive-rate"] = \ - AvgStdevMetadataFactory.from_data([ - float(groups.group(3)) / float(groups.group(1)), ]) + float(groups.group(3)) / float(groups.group(1)) elif test_result["type"] == "RECONF": test_result["result"] = None diff --git a/resources/tools/presentation/requirements.txt b/resources/tools/presentation/requirements.txt index 7845af3c76..1676983658 100644 --- a/resources/tools/presentation/requirements.txt +++ b/resources/tools/presentation/requirements.txt @@ -1,4 +1,3 @@ -jumpavg==0.1.3 Sphinx==1.7.6 sphinx-rtd-theme==0.4.0 robotframework==2.9.2 diff --git a/resources/tools/presentation/specification_parser.py b/resources/tools/presentation/specification_parser.py index 16c69ce18c..f99c7515fb 100644 --- a/resources/tools/presentation/specification_parser.py +++ b/resources/tools/presentation/specification_parser.py @@ -22,8 +22,8 @@ from yaml import load, YAMLError from pprint import pformat from errors import PresentationError -from utils import get_last_successful_build_number -from utils import get_last_completed_build_number +from utils import ( + get_last_successful_build_number, get_last_completed_build_number) class Specification: diff --git a/resources/tools/presentation/utils.py b/resources/tools/presentation/utils.py index 3f0d6ff084..3bd5a71e00 100644 --- a/resources/tools/presentation/utils.py +++ b/resources/tools/presentation/utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -28,8 +28,8 @@ from shutil import move, Error from datetime import datetime from pandas import Series +from resources.libraries.python import jumpavg from errors import PresentationError -from jumpavg.BitCountingClassifier import BitCountingClassifier def mean(items): @@ -270,30 +270,30 @@ def classify_anomalies(data): :returns: Classification and trend values :rtype: 2-tuple, list of strings and list of floats """ - # Nan mean something went wrong. + # Nan means something went wrong. # Use 0.0 to cause that being reported as a severe regression. - bare_data = [0.0 if np.isnan(sample.avg) else sample - for _, sample in data.iteritems()] - # TODO: Put analogous iterator into jumpavg library. - groups = BitCountingClassifier().classify(bare_data) - groups.reverse() # Just to use .pop() for FIFO. + bare_data = [0.0 if np.isnan(sample) else sample + for sample in data.itervalues()] + # TODO: Make BitCountingGroupList a subclass of list again? + group_list = jumpavg.classify(bare_data).group_list + group_list.reverse() # Just to use .pop() for FIFO. classification = [] avgs = [] active_group = None values_left = 0 avg = 0.0 - for _, sample in data.iteritems(): - if np.isnan(sample.avg): + for sample in data.itervalues(): + if np.isnan(sample): classification.append("outlier") - avgs.append(sample.avg) + avgs.append(sample) continue if values_left < 1 or active_group is None: values_left = 0 while values_left < 1: # Ignore empty groups (should not happen). - active_group = groups.pop() - values_left = len(active_group.values) - avg = active_group.metadata.avg - classification.append(active_group.metadata.classification) + active_group = group_list.pop() + values_left = len(active_group.run_list) + avg = active_group.stats.avg + classification.append(active_group.comment) avgs.append(avg) values_left -= 1 continue |