diff options
author | Tibor Frank <tifrank@cisco.com> | 2018-04-19 11:28:15 +0200 |
---|---|---|
committer | Tibor Frank <tifrank@cisco.com> | 2018-04-19 11:28:15 +0200 |
commit | 1265b8792b8edd44407c8073aeba2ca24dc0ad82 (patch) | |
tree | 7e704322f930ca221b171d41dcd6e4205764ff93 | |
parent | 98f7c4743d36b03cbe9952ff7e3e60e39f851e2d (diff) |
CSIT-1041: Trending dashboard
Change-Id: I175bf9269a9f958dc35d592a2810b7a6f37268a3
Signed-off-by: Tibor Frank <tifrank@cisco.com>
-rw-r--r-- | docs/cpta/introduction/index.rst | 36 | ||||
-rw-r--r-- | resources/tools/presentation/generator_tables.py | 91 |
2 files changed, 82 insertions, 45 deletions
diff --git a/docs/cpta/introduction/index.rst b/docs/cpta/introduction/index.rst index 516e8b36e0..26fa3a19a9 100644 --- a/docs/cpta/introduction/index.rst +++ b/docs/cpta/introduction/index.rst @@ -4,29 +4,31 @@ VPP MRR Performance Dashboard Description ----------- -Dashboard tables list a summary of per test-case VPP MRR performance trend -values and detected anomalies (Maximum Receive Rate - received packet rate -under line rate load). Data comes from trending MRR jobs executed every 12 -hrs (2:00, 14:00 UTC). Trend, trend compliance and anomaly calculations are -based on a rolling window of <N> samples, currently N=14 covering last 7 days. -Separate tables are generated for tested VPP worker-thread-core combinations +Dashboard tables list a summary of per test-case VPP MRR performance trend +values and detected anomalies (Maximum Receive Rate - received packet rate +under line rate load). Data comes from trending MRR jobs executed every 12 +hrs (2:00, 14:00 UTC). Trend, trend compliance and anomaly calculations are +based on a rolling window of <N> samples, currently N=14 covering last 7 days. +Separate tables are generated for tested VPP worker-thread-core combinations (1t1c, 2t2c, 4t4c). Legend to table: - - "Test Case": name of CSIT test case, naming convention on + - **Test Case** : name of CSIT test case, see naming convention in `CSIT wiki <https://wiki.fd.io/view/CSIT/csit-test-naming>`_. - - "Throughput Trend [Mpps]": last value of trend calculated over a + - **Throughput Trend [Mpps]** : last value of trend calculated over a rolling window. - - "Trend Compliance": calculated based on detected anomalies, listed in - precedence order - i) "failure" if 3 consecutive outliers, - ii) "regression" if any regressions, iii) "progression" if any - progressions, iv) "normal" if data compliant with trend. - - "Anomaly Value [Mpps]": i) highest outlier if "failure", ii) highest - regression if "regression", iii) highest progression if "progression", - iv) "-" if normal i.e. within trend. - - "Change [%]": "Anomaly Value" vs. "Throughput Trend", "-" if normal. - - "# Outliers": number of outliers detected within a rolling window. + - **Trend Compliance** : calculated based on detected anomalies over a + rolling window, listed in precedence order - i) **failure** if 3 + consecutive outliers, ii) **regression** if any regressions, iii) + **progression** if any progressions, iv) **normal** if data compliant + with trend; test cases listed alphabetically within compliance category. + - **Top Anomaly [Mpps]** : i) outlier if **failure**, ii) drop + if **regression**, iii) gain if **progression**, iv) **-** + if normal i.e. within trend. + - **Change [%]** : **Top Anomaly** vs. **Throughput Trend**, **-** if + normal. + - **Outliers [Number]** : number of outliers detected over a rolling window. Tables are listed in sections 1.x. Followed by daily trending graphs in sections 2.x. Daily trending data used to generate the graphs is listed in diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py index 29e29d0468..4bbee51ae5 100644 --- a/resources/tools/presentation/generator_tables.py +++ b/resources/tools/presentation/generator_tables.py @@ -671,12 +671,12 @@ def table_performance_trending_dashboard(table, input_data): data = input_data.filter_data(table, continue_on_error=True) # Prepare the header of the tables - header = ["Test case", + header = ["Test Case", "Throughput Trend [Mpps]", "Trend Compliance", - "Anomaly Value [Mpps]", + "Top Anomaly [Mpps]", "Change [%]", - "#Outliers" + "Outliers [Number]" ] header_str = ",".join(header) + "\n" @@ -724,9 +724,9 @@ def table_performance_trending_dashboard(table, input_data): if not isnan(value) \ and not isnan(median[build_nr]) \ and median[build_nr] != 0: - rel_change_lst.append( - int(relative_change(float(median[build_nr]), - float(value)))) + rel_change_lst.append(round( + relative_change(float(median[build_nr]), float(value)), + 2)) else: rel_change_lst.append(None) @@ -750,17 +750,6 @@ def table_performance_trending_dashboard(table, input_data): if first_idx < 0: first_idx = 0 - if "regression" in classification_lst[first_idx:]: - classification = "regression" - elif "outlier" in classification_lst[first_idx:]: - classification = "outlier" - elif "progression" in classification_lst[first_idx:]: - classification = "progression" - elif "normal" in classification_lst[first_idx:]: - classification = "normal" - else: - classification = None - nr_outliers = 0 consecutive_outliers = 0 failure = False @@ -773,23 +762,69 @@ def table_performance_trending_dashboard(table, input_data): else: consecutive_outliers = 0 - idx = len(classification_lst) - 1 - while idx: - if classification_lst[idx] == classification: - break - idx -= 1 - if failure: classification = "failure" - elif classification == "outlier": + elif "regression" in classification_lst[first_idx:]: + classification = "regression" + elif "progression" in classification_lst[first_idx:]: + classification = "progression" + else: classification = "normal" + if classification == "normal": + index = len(classification_lst) - 1 + else: + tmp_classification = "outlier" if classification == "failure" \ + else classification + for idx in range(first_idx, len(classification_lst)): + if classification_lst[idx] == tmp_classification: + index = idx + break + for idx in range(index+1, len(classification_lst)): + if classification_lst[idx] == tmp_classification: + if relative_change[idx] > relative_change[index]: + index = idx + + # if "regression" in classification_lst[first_idx:]: + # classification = "regression" + # elif "outlier" in classification_lst[first_idx:]: + # classification = "outlier" + # elif "progression" in classification_lst[first_idx:]: + # classification = "progression" + # elif "normal" in classification_lst[first_idx:]: + # classification = "normal" + # else: + # classification = None + # + # nr_outliers = 0 + # consecutive_outliers = 0 + # failure = False + # for item in classification_lst[first_idx:]: + # if item == "outlier": + # nr_outliers += 1 + # consecutive_outliers += 1 + # if consecutive_outliers == 3: + # failure = True + # else: + # consecutive_outliers = 0 + # + # idx = len(classification_lst) - 1 + # while idx: + # if classification_lst[idx] == classification: + # break + # idx -= 1 + # + # if failure: + # classification = "failure" + # elif classification == "outlier": + # classification = "normal" + trend = round(float(median_lst[-1]) / 1000000, 2) \ if not isnan(median_lst[-1]) else '' - sample = round(float(sample_lst[idx]) / 1000000, 2) \ - if not isnan(sample_lst[idx]) else '' - rel_change = rel_change_lst[idx] \ - if rel_change_lst[idx] is not None else '' + sample = round(float(sample_lst[index]) / 1000000, 2) \ + if not isnan(sample_lst[index]) else '' + rel_change = rel_change_lst[index] \ + if rel_change_lst[index] is not None else '' tbl_lst.append([name, trend, classification, |