summaryrefslogtreecommitdiffstats
path: root/scripts/automation
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/automation')
-rwxr-xr-xscripts/automation/regression/aggregate_results.py71
-rw-r--r--scripts/automation/regression/setups/kiwi02/benchmark.yaml3
-rw-r--r--scripts/automation/regression/setups/trex-dan/benchmark.yaml7
-rw-r--r--scripts/automation/regression/setups/trex04/benchmark.yaml4
-rw-r--r--scripts/automation/regression/setups/trex12/benchmark.yaml5
-rwxr-xr-xscripts/automation/regression/unit_tests/trex_general_test.py17
-rwxr-xr-xscripts/automation/regression/unit_tests/trex_imix_test.py31
7 files changed, 109 insertions, 29 deletions
diff --git a/scripts/automation/regression/aggregate_results.py b/scripts/automation/regression/aggregate_results.py
index cab19d09..71c4c9f8 100755
--- a/scripts/automation/regression/aggregate_results.py
+++ b/scripts/automation/regression/aggregate_results.py
@@ -65,7 +65,7 @@ def add_category_of_tests(category, tests, hidden = False, category_info_dir = N
with open(category_info_file) as f:
for info_line in f.readlines():
key_value = info_line.split(':', 1)
- if key_value[0].startswith('User'): # always 'hhaim', no need to show
+ if key_value[0].strip() in trex_info_dict.keys() + ['User']: # always 'hhaim', no need to show
continue
html_output += add_th_td('%s:' % key_value[0], key_value[1])
else:
@@ -80,7 +80,7 @@ def add_category_of_tests(category, tests, hidden = False, category_info_dir = N
if not len(tests):
return html_output + pad_tag('<br><font color=red>No tests!</font>', 'b') + '</div>'
- html_output += '<br>\n<table class="reference">\n<tr><th align="left">'
+ html_output += '<br>\n<table class="reference" width="100%">\n<tr><th align="left">'
if category == ERROR_CATEGORY:
html_output += 'Setup</th><th align="left">Failed tests:'
@@ -115,10 +115,13 @@ def add_category_of_tests(category, tests, hidden = False, category_info_dir = N
html_output += '<font color="blue"><b>SKIPPED</b></font></td>'
else:
html_output += '<font color="green"><b>PASSED</b></font></td>'
- html_output += '<td align="center"> '+ test.attrib['time'] + '</td></center></tr>'
+ html_output += '<td align="center"> '+ test.attrib['time'] + '</td></tr>'
result, result_text = test.attrib.get('result', ('', ''))
if result_text:
+ start_index_errors = result_text.find('Exception: The test is failed, reasons:')
+ if start_index_errors > 0:
+ result_text = result_text[start_index_errors + 10:].strip() # cut traceback
result_text = '<b style="color:000080;">%s:</b><br>%s<br><br>' % (result.capitalize(), result_text.replace('\n', '<br>'))
stderr = '' if brief and result_text else test.get('stderr', '')
if stderr:
@@ -213,6 +216,8 @@ if __name__ == '__main__':
dest = 'output_titlefile', help='Name of output file to contain title of mail.')
argparser.add_argument('--build_status_file', default='./reports/build_status',
dest = 'build_status_file', help='Name of output file to save scenaries build results (should not be wiped).')
+ argparser.add_argument('--last_passed_commit', default='./reports/last_passed_commit',
+ dest = 'last_passed_commit', help='Name of output file to save last passed commit (should not be wiped).')
args = argparser.parse_args()
@@ -221,7 +226,6 @@ if __name__ == '__main__':
scenario = os.environ.get('SCENARIO')
build_url = os.environ.get('BUILD_URL')
build_id = os.environ.get('BUILD_ID')
- trex_last_commit_hash = os.environ.get('TREX_LAST_COMMIT_HASH') # TODO: remove it, take from setups info
trex_repo = os.environ.get('TREX_CORE_REPO')
if not scenario:
print 'Warning: no environment variable SCENARIO, using default'
@@ -230,8 +234,24 @@ if __name__ == '__main__':
print 'Warning: no environment variable BUILD_URL'
if not build_id:
print 'Warning: no environment variable BUILD_ID'
+
+ trex_info_dict = OrderedDict()
+ for file in glob.glob('%s/report_*.info' % args.input_dir):
+ with open(file) as f:
+ file_lines = f.readlines()
+ if not len(file_lines):
+ continue # to next file
+ for info_line in file_lines:
+ key_value = info_line.split(':', 1)
+ not_trex_keys = ['Server', 'Router', 'User']
+ if key_value[0].strip() in not_trex_keys:
+ continue # to next parameters
+ trex_info_dict[key_value[0].strip()] = key_value[1].strip()
+ break
+
trex_last_commit_info = ''
- if scenario == 'trex_build' and trex_last_commit_hash and trex_repo:
+ trex_last_commit_hash = trex_info_dict.get('Git SHA')
+ if trex_last_commit_hash and trex_repo:
try:
print 'Getting TRex commit with hash %s' % trex_last_commit_hash
command = 'timeout 10 git --git-dir %s show %s --quiet' % (trex_repo, trex_last_commit_hash)
@@ -348,8 +368,12 @@ if __name__ == '__main__':
with open(start_time_file) as f:
start_time = int(f.read())
total_time = int(time.time()) - start_time
- html_output += add_th_td('Started:', datetime.datetime.fromtimestamp(start_time).strftime('%d/%m/%Y %H:%M:%S'))
- html_output += add_th_td('Total duration:', datetime.timedelta(seconds = total_time))
+ html_output += add_th_td('Regression start:', datetime.datetime.fromtimestamp(start_time).strftime('%d/%m/%Y %H:%M:%S'))
+ html_output += add_th_td('Regression duration:', datetime.timedelta(seconds = total_time))
+ for key in trex_info_dict:
+ if key == 'Git SHA':
+ continue
+ html_output += add_th_td(key, trex_info_dict[key])
if trex_last_commit_info:
html_output += add_th_td('Last commit:', trex_last_commit_info)
html_output += '</table><br>\n'
@@ -431,6 +455,12 @@ if __name__ == '__main__':
</html>\
'''
+# save html
+ with open(args.output_htmlfile, 'w') as f:
+ print('Writing output file: %s' % args.output_htmlfile)
+ f.write(html_output)
+ html_output = None
+
# mail report (only error tests, expanded)
mail_output = '''\
@@ -455,8 +485,13 @@ if __name__ == '__main__':
with open(start_time_file) as f:
start_time = int(f.read())
total_time = int(time.time()) - start_time
- mail_output += add_th_td('Started:', datetime.datetime.fromtimestamp(start_time).strftime('%d/%m/%Y %H:%M:%S'))
- mail_output += add_th_td('Total duration:', datetime.timedelta(seconds = total_time))
+ mail_output += add_th_td('Regression start:', datetime.datetime.fromtimestamp(start_time).strftime('%d/%m/%Y %H:%M:%S'))
+ mail_output += add_th_td('Regression duration:', datetime.timedelta(seconds = total_time))
+ for key in trex_info_dict:
+ if key == 'Git SHA':
+ continue
+ mail_output += add_th_td(key, trex_info_dict[key])
+
if trex_last_commit_info:
mail_output += add_th_td('Last commit:', trex_last_commit_info)
mail_output += '</table><br>\n<table width=100%><tr><td>\n'
@@ -476,9 +511,9 @@ if __name__ == '__main__':
with open(category_info_file) as f:
for info_line in f.readlines():
key_value = info_line.split(':', 1)
- if key_value[0].startswith('User'): # always 'hhaim', no need to show
+ if key_value[0].strip() in trex_info_dict.keys() + ['User']: # always 'hhaim', no need to show
continue
- mail_output += add_th_td('%s:' % key_value[0], key_value[1])
+ mail_output += add_th_td('%s:' % key_value[0].strip(), key_value[1].strip())
else:
mail_output += add_th_td('Info:', 'No info')
mail_output += '</table>\n'
@@ -489,7 +524,7 @@ if __name__ == '__main__':
if err:
mail_output += '<font color=red>%s<font>' % '\n<br>'.join(err)
if len(error_tests) > 5:
- mail_output += '\n<br><font color=red>More than 5 failed tests, showing brief output.<font>\n<br>'
+ mail_output += '\n<font color=red>More than 5 failed tests, showing brief output.<font>\n<br>'
# show only brief version (cut some info)
mail_output += add_category_of_tests(ERROR_CATEGORY, error_tests, hidden=False, expanded=True, brief=True)
else:
@@ -500,10 +535,6 @@ if __name__ == '__main__':
##### save outputs
-# html
- with open(args.output_htmlfile, 'w') as f:
- print('Writing output file: %s' % args.output_htmlfile)
- f.write(html_output)
# mail content
with open(args.output_mailfile, 'w') as f:
@@ -537,12 +568,18 @@ if __name__ == '__main__':
print('Writing output file: %s' % args.build_status_file)
pickle.dump(category_dict_status, f)
+# last successful commit
+ if (current_status in ('Successful', 'Fixed')) and trex_last_commit_hash and jobs_list > 0 and scenario == 'nightly':
+ with open(args.last_passed_commit, 'w') as f:
+ print('Writing output file: %s' % args.last_passed_commit)
+ f.write(trex_last_commit_hash)
+
# mail title
mailtitle_output = scenario.capitalize()
if build_id:
mailtitle_output += ' - Build #%s' % build_id
mailtitle_output += ' - %s!' % current_status
-
+
with open(args.output_titlefile, 'w') as f:
print('Writing output file: %s' % args.output_titlefile)
f.write(mailtitle_output)
diff --git a/scripts/automation/regression/setups/kiwi02/benchmark.yaml b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
index b50662e1..839b032a 100644
--- a/scripts/automation/regression/setups/kiwi02/benchmark.yaml
+++ b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
@@ -136,3 +136,6 @@ test_rx_check_http_ipv6:
rx_sample_rate : 32
+test_jumbo:
+ multiplier : 56
+ cores : 1
diff --git a/scripts/automation/regression/setups/trex-dan/benchmark.yaml b/scripts/automation/regression/setups/trex-dan/benchmark.yaml
index 419fe7b3..f65fcf90 100644
--- a/scripts/automation/regression/setups/trex-dan/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex-dan/benchmark.yaml
@@ -150,4 +150,9 @@ test_rx_check_http_negative:
client_acl_wildcard_mask : 0.0.0.255
dual_port_mask : 1.0.0.0
pool_start : 200.0.0.0
- pool_netmask : 255.255.255.0 \ No newline at end of file
+ pool_netmask : 255.255.255.0
+
+
+test_jumbo:
+ multiplier : 2.8
+ cores : 1 \ No newline at end of file
diff --git a/scripts/automation/regression/setups/trex04/benchmark.yaml b/scripts/automation/regression/setups/trex04/benchmark.yaml
index d448910e..56193f46 100644
--- a/scripts/automation/regression/setups/trex04/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex04/benchmark.yaml
@@ -57,4 +57,6 @@ test_ipv6_simple :
cpu2core_dev : 0.07
-
+test_jumbo:
+ multiplier : 2.8
+ cores : 1 \ No newline at end of file
diff --git a/scripts/automation/regression/setups/trex12/benchmark.yaml b/scripts/automation/regression/setups/trex12/benchmark.yaml
index 98f7215e..7985f15e 100644
--- a/scripts/automation/regression/setups/trex12/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex12/benchmark.yaml
@@ -159,3 +159,8 @@ test_rx_check_http_negative:
dual_port_mask : 1.0.0.0
pool_start : 200.0.0.0
pool_netmask : 255.255.255.0
+
+
+test_jumbo:
+ multiplier : 28
+ cores : 1
diff --git a/scripts/automation/regression/unit_tests/trex_general_test.py b/scripts/automation/regression/unit_tests/trex_general_test.py
index 6a6ad79c..fb84a3e1 100755
--- a/scripts/automation/regression/unit_tests/trex_general_test.py
+++ b/scripts/automation/regression/unit_tests/trex_general_test.py
@@ -165,12 +165,15 @@ class CTRexGeneral_Test(unittest.TestCase):
if res[name] != float(val):
self.fail('TRex results[%s]==%f and not as expected %f ' % (name, res[name], val))
- def check_CPU_benchmark (self, trex_res, err):
+ def check_CPU_benchmark (self, trex_res, err = 10, minimal_cpu = 30, maximal_cpu = 85):
#cpu_util = float(trex_res.get_last_value("trex-global.data.m_cpu_util"))
cpu_util = sum([float(x) for x in trex_res.get_value_list("trex-global.data.m_cpu_util")[-4:-1]]) / 3 # mean of 3 values before last
- if cpu_util < 30 and not self.is_virt_nics:
- self.fail("CPU is too low (%s%%), can't verify performance in such low CPU%%." % cpu_util )
+ if not self.is_virt_nics:
+ if cpu_util > maximal_cpu:
+ self.fail("CPU is too high (%s%%), probably queue full." % cpu_util )
+ if cpu_util < minimal_cpu:
+ self.fail("CPU is too low (%s%%), can't verify performance in such low CPU%%." % cpu_util )
cores = self.get_benchmark_param('cores')
trex_tx_bps = trex_res.get_last_value("trex-global.data.m_total_tx_bytes")
@@ -236,16 +239,14 @@ class CTRexGeneral_Test(unittest.TestCase):
if check_latency:
# check that max latency does not exceed 1 msec in regular setup or 20ms in VM
- allowed_latency = 20000 if self.is_VM else 1000
+ allowed_latency = 50000 if self.is_VM else 1000
if max(trex_res.get_max_latency().values()) > allowed_latency:
- print 'LatencyError: Maximal latency exceeds %s (usec)' % allowed_latency
- #raise AbnormalResultError('Maximal latency above 1ms')
+ self.fail('LatencyError: Maximal latency exceeds %s (usec)' % allowed_latency)
# check that avg latency does not exceed 1 msec in regular setup or 3ms in VM
allowed_latency = 3000 if self.is_VM else 1000
if max(trex_res.get_avg_latency().values()) > allowed_latency:
- print 'LatencyError: Average latency exceeds %s (usec)' % allowed_latency
- #raise AbnormalResultError('Maximal latency above 1ms')
+ self.fail('LatencyError: Average latency exceeds %s (usec)' % allowed_latency)
if not self.is_loopback:
# check router number of drops --> deliberately masked- need to be figured out!!!!!
diff --git a/scripts/automation/regression/unit_tests/trex_imix_test.py b/scripts/automation/regression/unit_tests/trex_imix_test.py
index b56f7f4e..9e772fa7 100755
--- a/scripts/automation/regression/unit_tests/trex_imix_test.py
+++ b/scripts/automation/regression/unit_tests/trex_imix_test.py
@@ -1,10 +1,9 @@
-
#!/router/bin/python
from trex_general_test import CTRexGeneral_Test
from CPlatform import CStaticRouteConfig
from tests_exceptions import *
#import sys
-import time;
+import time
class CTRexIMIX_Test(CTRexGeneral_Test):
"""This class defines the IMIX testcase of the T-Rex traffic generator"""
@@ -167,6 +166,34 @@ class CTRexIMIX_Test(CTRexGeneral_Test):
self.check_CPU_benchmark(trex_res, 10)
+
+ def test_jumbo(self):
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = "config")
+
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ d = 100,
+ f = 'cap2/imix_9k.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResults instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print ("\nLATEST RESULT OBJECT:")
+ print trex_res
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res, minimal_cpu = 0, maximal_cpu = 10)
+
def tearDown(self):
CTRexGeneral_Test.tearDown(self)
# remove nbar config here