aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/scripts/compare_perpatch.py
diff options
context:
space:
mode:
authorVratko Polak <vrpolak@cisco.com>2018-10-19 12:21:47 +0200
committerMaciek Konstantynowicz <mkonstan@cisco.com>2018-10-22 21:17:26 +0000
commitd465d9fba33a323703a2bf40c499d74d0f017091 (patch)
tree3678767f68a7d9faf22a93f4be0d5f9b7b807869 /resources/tools/scripts/compare_perpatch.py
parent8e7582edf682a7ba7edcb5ec0a45d00e361ec868 (diff)
Per patch: multiple BMRR calls
This is to reduce sensitivity on testbed unstable performance. Also add topo_cleanupbefore every pybot run to avoid issues with VPP uninstall. TRACE prints are left there to simplify investigation of false positives if/when they happen. Change-Id: I9b0cdcfbbe4aa0735a0596746ac32c9e93af0897 Signed-off-by: Vratko Polak <vrpolak@cisco.com>
Diffstat (limited to 'resources/tools/scripts/compare_perpatch.py')
-rw-r--r--resources/tools/scripts/compare_perpatch.py81
1 files changed, 56 insertions, 25 deletions
diff --git a/resources/tools/scripts/compare_perpatch.py b/resources/tools/scripts/compare_perpatch.py
index cc9ffd8992..1f8a1cf892 100644
--- a/resources/tools/scripts/compare_perpatch.py
+++ b/resources/tools/scripts/compare_perpatch.py
@@ -14,7 +14,7 @@
"""Script for determining whether per-patch perf test votes -1.
This script assumes there exist two text files with processed BMRR results,
-located at hardcoded relative paths, having several lines
+located at hardcoded relative paths (subdirs thereof), having several lines
of json-parseable lists of float values, corresponding to testcase results.
This script then uses jumpavg library to determine whether there was
a regression, progression or no change for each testcase.
@@ -38,32 +38,62 @@ def hack(value_list):
:rtype: list of float
"""
tmp = sorted(value_list)
- quarter = len(tmp) / 4
- ret = tmp[quarter:-quarter]
+ eight = len(tmp) / 8
+ ret = tmp[3*eight:-eight]
return ret
-parent_lines = list()
-new_lines = list()
-with open("csit_parent/results.txt") as parent_file:
- parent_lines = parent_file.readlines()
-with open("csit_new/results.txt") as new_file:
- new_lines = new_file.readlines()
-if len(parent_lines) != len(new_lines):
- print "Number of passed tests does not match!"
- sys.exit(1)
+iteration = -1
+parent_iterations = list()
+new_iterations = list()
+num_tests = None
+while 1:
+ iteration += 1
+ parent_lines = list()
+ new_lines = list()
+ filename = "csit_parent/{iter}/results.txt".format(iter=iteration)
+ try:
+ with open(filename) as parent_file:
+ parent_lines = parent_file.readlines()
+ except IOError:
+ break
+ num_lines = len(parent_lines)
+ filename = "csit_new/{iter}/results.txt".format(iter=iteration)
+ with open(filename) as new_file:
+ new_lines = new_file.readlines()
+ if num_lines != len(new_lines):
+ print "Number of tests does not match within iteration", iteration
+ sys.exit(1)
+ if num_tests is None:
+ num_tests = num_lines
+ elif num_tests != num_lines:
+ print "Number of tests does not match previous at iteration", iteration
+ sys.exit(1)
+ parent_iterations.append(parent_lines)
+ new_iterations.append(new_lines)
classifier = BitCountingClassifier()
-num_tests = len(parent_lines)
exit_code = 0
-for index in range(num_tests):
- parent_values = hack(json.loads(parent_lines[index]))
- new_values = hack(json.loads(new_lines[index]))
+for test_index in range(num_tests):
+ val_max = 1.0
+ parent_values = list()
+ new_values = list()
+ for iteration_index in range(len(parent_iterations)):
+ parent_values.extend(
+ json.loads(parent_iterations[iteration_index][test_index]))
+ new_values.extend(
+ json.loads(new_iterations[iteration_index][test_index]))
+ print "TRACE pre-hack parent: {p}".format(p=parent_values)
+ print "TRACE pre-hack new: {n}".format(n=new_values)
+ parent_values = hack(parent_values)
+ new_values = hack(new_values)
parent_max = BitCountingMetadataFactory.find_max_value(parent_values)
new_max = BitCountingMetadataFactory.find_max_value(new_values)
- cmax = max(parent_max, new_max)
- factory = BitCountingMetadataFactory(cmax)
+ val_max = max(val_max, parent_max, new_max)
+ factory = BitCountingMetadataFactory(val_max)
parent_stats = factory.from_data(parent_values)
- factory = BitCountingMetadataFactory(cmax, parent_stats.avg)
- new_stats = factory.from_data(new_values)
+ new_factory = BitCountingMetadataFactory(val_max, parent_stats.avg)
+ new_stats = new_factory.from_data(new_values)
+ print "TRACE parent: {p}".format(p=parent_values)
+ print "TRACE new: {n}".format(n=new_values)
print "DEBUG parent: {p}".format(p=parent_stats)
print "DEBUG new: {n}".format(n=new_stats)
common_max = max(parent_stats.avg, new_stats.avg)
@@ -71,15 +101,16 @@ for index in range(num_tests):
print "DEBUG difference: {d}%".format(d=100 * difference)
classified_list = classifier.classify([parent_stats, new_stats])
if len(classified_list) < 2:
- print "Test index {index}: normal (no anomaly)".format(
- index=index)
+ print "Test test_index {test_index}: normal (no anomaly)".format(
+ test_index=test_index)
continue
anomaly = classified_list[1].metadata.classification
if anomaly == "regression":
- print "Test index {index}: anomaly regression".format(index=index)
+ print "Test test_index {test_index}: anomaly regression".format(
+ test_index=test_index)
exit_code = 1
continue
- print "Test index {index}: anomaly {anomaly}".format(
- index=index, anomaly=anomaly)
+ print "Test test_index {test_index}: anomaly {anomaly}".format(
+ test_index=test_index, anomaly=anomaly)
print "DEBUG exit code {code}".format(code=exit_code)
sys.exit(exit_code)