summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYaroslav Brustinov <ybrustin@cisco.com>2016-01-08 14:40:31 +0200
committerYaroslav Brustinov <ybrustin@cisco.com>2016-01-08 14:40:31 +0200
commit148fd251911869db33df03f7fd3287c1f76f2fa4 (patch)
treedf3f8b848c57d701dd71255f9949927eaff32b6e
parent8db09096b9dcf030b7dc744fbd7ee463d8e6fd1b (diff)
building: add pkg option with --pkg-dir and --pkg-file arguments (./b pkg --pkg-dir ...) for building TRex package
regression: add jumbo packets test high latency fails test and not only print add maximal and minimal expected CPU utilization aggregate results: remove tracebacks at usual errors in tests move TRex info from setups info to top (its now common to all setups) save last good commit hash
-rwxr-xr-xlinux_dpdk/ws_main.py53
-rwxr-xr-xlinux_dpdk/wscript14
-rwxr-xr-xscripts/automation/regression/aggregate_results.py71
-rw-r--r--scripts/automation/regression/setups/kiwi02/benchmark.yaml3
-rw-r--r--scripts/automation/regression/setups/trex-dan/benchmark.yaml7
-rw-r--r--scripts/automation/regression/setups/trex04/benchmark.yaml4
-rw-r--r--scripts/automation/regression/setups/trex12/benchmark.yaml5
-rwxr-xr-xscripts/automation/regression/unit_tests/trex_general_test.py17
-rwxr-xr-xscripts/automation/regression/unit_tests/trex_imix_test.py31
9 files changed, 164 insertions, 41 deletions
diff --git a/linux_dpdk/ws_main.py b/linux_dpdk/ws_main.py
index 604b1c1a..19ff9298 100755
--- a/linux_dpdk/ws_main.py
+++ b/linux_dpdk/ws_main.py
@@ -14,7 +14,6 @@ import re
import uuid
import subprocess
-
# these variables are mandatory ('/' are converted automatically)
top = '../'
out = 'build_dpdk'
@@ -26,6 +25,7 @@ C_VER_FILE = "version.c"
H_VER_FILE = "version.h"
BUILD_NUM_FILE = "../VERSION"
+USERS_ALLOWED_TO_RELEASE = ['hhaim']
#######################################
@@ -84,6 +84,8 @@ class SrcGroups:
def options(opt):
opt.load('compiler_cxx')
opt.load('compiler_cc')
+ opt.add_option('--pkg-dir', '--pkg_dir', dest='pkg_dir', default=False, action='store', help="Destination folder for 'pkg' option.")
+ opt.add_option('--pkg-file', '--pkg_file', dest='pkg_file', default=False, action='store', help="Destination filename for 'pkg' option.")
def configure(conf):
conf.load('g++')
@@ -885,14 +887,45 @@ class Env(object):
s= Env().get_env('TREX_EX_WEB_SRV');
return s;
-
-
-def release(bld):
+def check_release_permission():
+ if os.getenv('USER') not in USERS_ALLOWED_TO_RELEASE:
+ print 'You are not allowed to release TRex. Please contact Hanoch.'
+ return False
+ return True
+
+# build package in parent dir. can provide custom name and folder with --pkg-dir and --pkg-file
+def pkg(self):
+ build_num = get_build_num()
+ pkg_dir = self.options.pkg_dir
+ if not pkg_dir:
+ pkg_dir = os.pardir
+ pkg_file = self.options.pkg_file
+ if not pkg_file:
+ pkg_file = '%s.tar.gz' % build_num
+ tmp_path = os.path.join(pkg_dir, '_%s' % pkg_file)
+ dst_path = os.path.join(pkg_dir, pkg_file)
+ build_path = os.path.join(os.pardir, build_num)
+
+ # clean old dir if exists
+ os.system('rm -rf %s' % build_path)
+ release(self, build_path + '/')
+ os.system("cp %s/%s.tar.gz %s" % (build_path, build_num, tmp_path))
+ os.system("mv %s %s" % (tmp_path, dst_path))
+
+ # clean new dir
+ os.system('rm -rf %s' % build_path)
+
+
+def release(bld, custom_dir = None):
""" release to local folder """
+ exec_p = Env().get_release_path()
+ if custom_dir:
+ exec_p = custom_dir
+ elif not check_release_permission():
+ return
print "copy images and libs"
- exec_p =Env().get_release_path();
os.system(' mkdir -p '+exec_p);
-
+
for obj in build_types:
copy_single_system (bld,exec_p,obj);
copy_single_system1 (bld,exec_p,obj)
@@ -914,6 +947,8 @@ def release(bld):
def publish(bld):
+ if not check_release_permission():
+ return
exec_p = Env().get_release_path()
rel=get_build_num ()
@@ -925,6 +960,8 @@ def publish(bld):
def publish_ext(bld):
+ if not check_release_permission():
+ return
exec_p = Env().get_release_path()
rel=get_build_num ()
@@ -934,7 +971,9 @@ def publish_ext(bld):
os.system("ssh -i %s -l %s %s 'cd %s/release/;rm be_latest; ln -P %s be_latest' " %(Env().get_trex_ex_web_key(),Env().get_trex_ex_web_user(),Env().get_trex_ex_web_srv(),Env().get_trex_ex_web_path(),release_name))
#os.system("ssh -i %s -l %s %s 'cd %s/release/;rm latest; ln -P %s latest' " %(Env().get_trex_ex_web_key(),Env().get_trex_ex_web_user(),Env().get_trex_ex_web_srv(),Env().get_trex_ex_web_path(),release_name))
-
+#WIP
+def release_successful(self):
+ print 'Not implemented'
def test (bld):
r=commands.getstatusoutput("git log --pretty=format:'%H' -n 1")
diff --git a/linux_dpdk/wscript b/linux_dpdk/wscript
index 459e2201..67434a19 100755
--- a/linux_dpdk/wscript
+++ b/linux_dpdk/wscript
@@ -9,7 +9,7 @@
import ws_main
-#mandatory ....
+#mandatory
top = ws_main.top
out = ws_main.out
@@ -28,6 +28,8 @@ def build(bld):
def build_info(bld):
ws_main.build_info(bld)
+def pkg(bld):
+ ws_main.pkg(bld)
def release(bld):
ws_main.release(bld)
@@ -39,14 +41,16 @@ def publish_ext(bld):
ws_main.publish_ext(bld)
def publish_web(bld):
- ws_main.publish_web(bld)
+ ws_main.publish_web(bld)
+def release_successful(bld):
+ ws_main.release_successful(bld)
def sync(bld):
- ws_main.sync(bld)
-
+ ws_main.sync(bld)
+
def test(bld):
- ws_main.test(bld)
+ ws_main.test(bld)
diff --git a/scripts/automation/regression/aggregate_results.py b/scripts/automation/regression/aggregate_results.py
index cab19d09..71c4c9f8 100755
--- a/scripts/automation/regression/aggregate_results.py
+++ b/scripts/automation/regression/aggregate_results.py
@@ -65,7 +65,7 @@ def add_category_of_tests(category, tests, hidden = False, category_info_dir = N
with open(category_info_file) as f:
for info_line in f.readlines():
key_value = info_line.split(':', 1)
- if key_value[0].startswith('User'): # always 'hhaim', no need to show
+ if key_value[0].strip() in trex_info_dict.keys() + ['User']: # always 'hhaim', no need to show
continue
html_output += add_th_td('%s:' % key_value[0], key_value[1])
else:
@@ -80,7 +80,7 @@ def add_category_of_tests(category, tests, hidden = False, category_info_dir = N
if not len(tests):
return html_output + pad_tag('<br><font color=red>No tests!</font>', 'b') + '</div>'
- html_output += '<br>\n<table class="reference">\n<tr><th align="left">'
+ html_output += '<br>\n<table class="reference" width="100%">\n<tr><th align="left">'
if category == ERROR_CATEGORY:
html_output += 'Setup</th><th align="left">Failed tests:'
@@ -115,10 +115,13 @@ def add_category_of_tests(category, tests, hidden = False, category_info_dir = N
html_output += '<font color="blue"><b>SKIPPED</b></font></td>'
else:
html_output += '<font color="green"><b>PASSED</b></font></td>'
- html_output += '<td align="center"> '+ test.attrib['time'] + '</td></center></tr>'
+ html_output += '<td align="center"> '+ test.attrib['time'] + '</td></tr>'
result, result_text = test.attrib.get('result', ('', ''))
if result_text:
+ start_index_errors = result_text.find('Exception: The test is failed, reasons:')
+ if start_index_errors > 0:
+ result_text = result_text[start_index_errors + 10:].strip() # cut traceback
result_text = '<b style="color:000080;">%s:</b><br>%s<br><br>' % (result.capitalize(), result_text.replace('\n', '<br>'))
stderr = '' if brief and result_text else test.get('stderr', '')
if stderr:
@@ -213,6 +216,8 @@ if __name__ == '__main__':
dest = 'output_titlefile', help='Name of output file to contain title of mail.')
argparser.add_argument('--build_status_file', default='./reports/build_status',
dest = 'build_status_file', help='Name of output file to save scenaries build results (should not be wiped).')
+ argparser.add_argument('--last_passed_commit', default='./reports/last_passed_commit',
+ dest = 'last_passed_commit', help='Name of output file to save last passed commit (should not be wiped).')
args = argparser.parse_args()
@@ -221,7 +226,6 @@ if __name__ == '__main__':
scenario = os.environ.get('SCENARIO')
build_url = os.environ.get('BUILD_URL')
build_id = os.environ.get('BUILD_ID')
- trex_last_commit_hash = os.environ.get('TREX_LAST_COMMIT_HASH') # TODO: remove it, take from setups info
trex_repo = os.environ.get('TREX_CORE_REPO')
if not scenario:
print 'Warning: no environment variable SCENARIO, using default'
@@ -230,8 +234,24 @@ if __name__ == '__main__':
print 'Warning: no environment variable BUILD_URL'
if not build_id:
print 'Warning: no environment variable BUILD_ID'
+
+ trex_info_dict = OrderedDict()
+ for file in glob.glob('%s/report_*.info' % args.input_dir):
+ with open(file) as f:
+ file_lines = f.readlines()
+ if not len(file_lines):
+ continue # to next file
+ for info_line in file_lines:
+ key_value = info_line.split(':', 1)
+ not_trex_keys = ['Server', 'Router', 'User']
+ if key_value[0].strip() in not_trex_keys:
+ continue # to next parameters
+ trex_info_dict[key_value[0].strip()] = key_value[1].strip()
+ break
+
trex_last_commit_info = ''
- if scenario == 'trex_build' and trex_last_commit_hash and trex_repo:
+ trex_last_commit_hash = trex_info_dict.get('Git SHA')
+ if trex_last_commit_hash and trex_repo:
try:
print 'Getting TRex commit with hash %s' % trex_last_commit_hash
command = 'timeout 10 git --git-dir %s show %s --quiet' % (trex_repo, trex_last_commit_hash)
@@ -348,8 +368,12 @@ if __name__ == '__main__':
with open(start_time_file) as f:
start_time = int(f.read())
total_time = int(time.time()) - start_time
- html_output += add_th_td('Started:', datetime.datetime.fromtimestamp(start_time).strftime('%d/%m/%Y %H:%M:%S'))
- html_output += add_th_td('Total duration:', datetime.timedelta(seconds = total_time))
+ html_output += add_th_td('Regression start:', datetime.datetime.fromtimestamp(start_time).strftime('%d/%m/%Y %H:%M:%S'))
+ html_output += add_th_td('Regression duration:', datetime.timedelta(seconds = total_time))
+ for key in trex_info_dict:
+ if key == 'Git SHA':
+ continue
+ html_output += add_th_td(key, trex_info_dict[key])
if trex_last_commit_info:
html_output += add_th_td('Last commit:', trex_last_commit_info)
html_output += '</table><br>\n'
@@ -431,6 +455,12 @@ if __name__ == '__main__':
</html>\
'''
+# save html
+ with open(args.output_htmlfile, 'w') as f:
+ print('Writing output file: %s' % args.output_htmlfile)
+ f.write(html_output)
+ html_output = None
+
# mail report (only error tests, expanded)
mail_output = '''\
@@ -455,8 +485,13 @@ if __name__ == '__main__':
with open(start_time_file) as f:
start_time = int(f.read())
total_time = int(time.time()) - start_time
- mail_output += add_th_td('Started:', datetime.datetime.fromtimestamp(start_time).strftime('%d/%m/%Y %H:%M:%S'))
- mail_output += add_th_td('Total duration:', datetime.timedelta(seconds = total_time))
+ mail_output += add_th_td('Regression start:', datetime.datetime.fromtimestamp(start_time).strftime('%d/%m/%Y %H:%M:%S'))
+ mail_output += add_th_td('Regression duration:', datetime.timedelta(seconds = total_time))
+ for key in trex_info_dict:
+ if key == 'Git SHA':
+ continue
+ mail_output += add_th_td(key, trex_info_dict[key])
+
if trex_last_commit_info:
mail_output += add_th_td('Last commit:', trex_last_commit_info)
mail_output += '</table><br>\n<table width=100%><tr><td>\n'
@@ -476,9 +511,9 @@ if __name__ == '__main__':
with open(category_info_file) as f:
for info_line in f.readlines():
key_value = info_line.split(':', 1)
- if key_value[0].startswith('User'): # always 'hhaim', no need to show
+ if key_value[0].strip() in trex_info_dict.keys() + ['User']: # always 'hhaim', no need to show
continue
- mail_output += add_th_td('%s:' % key_value[0], key_value[1])
+ mail_output += add_th_td('%s:' % key_value[0].strip(), key_value[1].strip())
else:
mail_output += add_th_td('Info:', 'No info')
mail_output += '</table>\n'
@@ -489,7 +524,7 @@ if __name__ == '__main__':
if err:
mail_output += '<font color=red>%s<font>' % '\n<br>'.join(err)
if len(error_tests) > 5:
- mail_output += '\n<br><font color=red>More than 5 failed tests, showing brief output.<font>\n<br>'
+ mail_output += '\n<font color=red>More than 5 failed tests, showing brief output.<font>\n<br>'
# show only brief version (cut some info)
mail_output += add_category_of_tests(ERROR_CATEGORY, error_tests, hidden=False, expanded=True, brief=True)
else:
@@ -500,10 +535,6 @@ if __name__ == '__main__':
##### save outputs
-# html
- with open(args.output_htmlfile, 'w') as f:
- print('Writing output file: %s' % args.output_htmlfile)
- f.write(html_output)
# mail content
with open(args.output_mailfile, 'w') as f:
@@ -537,12 +568,18 @@ if __name__ == '__main__':
print('Writing output file: %s' % args.build_status_file)
pickle.dump(category_dict_status, f)
+# last successful commit
+ if (current_status in ('Successful', 'Fixed')) and trex_last_commit_hash and jobs_list > 0 and scenario == 'nightly':
+ with open(args.last_passed_commit, 'w') as f:
+ print('Writing output file: %s' % args.last_passed_commit)
+ f.write(trex_last_commit_hash)
+
# mail title
mailtitle_output = scenario.capitalize()
if build_id:
mailtitle_output += ' - Build #%s' % build_id
mailtitle_output += ' - %s!' % current_status
-
+
with open(args.output_titlefile, 'w') as f:
print('Writing output file: %s' % args.output_titlefile)
f.write(mailtitle_output)
diff --git a/scripts/automation/regression/setups/kiwi02/benchmark.yaml b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
index b50662e1..839b032a 100644
--- a/scripts/automation/regression/setups/kiwi02/benchmark.yaml
+++ b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
@@ -136,3 +136,6 @@ test_rx_check_http_ipv6:
rx_sample_rate : 32
+test_jumbo:
+ multiplier : 56
+ cores : 1
diff --git a/scripts/automation/regression/setups/trex-dan/benchmark.yaml b/scripts/automation/regression/setups/trex-dan/benchmark.yaml
index 419fe7b3..f65fcf90 100644
--- a/scripts/automation/regression/setups/trex-dan/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex-dan/benchmark.yaml
@@ -150,4 +150,9 @@ test_rx_check_http_negative:
client_acl_wildcard_mask : 0.0.0.255
dual_port_mask : 1.0.0.0
pool_start : 200.0.0.0
- pool_netmask : 255.255.255.0 \ No newline at end of file
+ pool_netmask : 255.255.255.0
+
+
+test_jumbo:
+ multiplier : 2.8
+ cores : 1 \ No newline at end of file
diff --git a/scripts/automation/regression/setups/trex04/benchmark.yaml b/scripts/automation/regression/setups/trex04/benchmark.yaml
index d448910e..56193f46 100644
--- a/scripts/automation/regression/setups/trex04/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex04/benchmark.yaml
@@ -57,4 +57,6 @@ test_ipv6_simple :
cpu2core_dev : 0.07
-
+test_jumbo:
+ multiplier : 2.8
+ cores : 1 \ No newline at end of file
diff --git a/scripts/automation/regression/setups/trex12/benchmark.yaml b/scripts/automation/regression/setups/trex12/benchmark.yaml
index 98f7215e..7985f15e 100644
--- a/scripts/automation/regression/setups/trex12/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex12/benchmark.yaml
@@ -159,3 +159,8 @@ test_rx_check_http_negative:
dual_port_mask : 1.0.0.0
pool_start : 200.0.0.0
pool_netmask : 255.255.255.0
+
+
+test_jumbo:
+ multiplier : 28
+ cores : 1
diff --git a/scripts/automation/regression/unit_tests/trex_general_test.py b/scripts/automation/regression/unit_tests/trex_general_test.py
index 6a6ad79c..fb84a3e1 100755
--- a/scripts/automation/regression/unit_tests/trex_general_test.py
+++ b/scripts/automation/regression/unit_tests/trex_general_test.py
@@ -165,12 +165,15 @@ class CTRexGeneral_Test(unittest.TestCase):
if res[name] != float(val):
self.fail('TRex results[%s]==%f and not as expected %f ' % (name, res[name], val))
- def check_CPU_benchmark (self, trex_res, err):
+ def check_CPU_benchmark (self, trex_res, err = 10, minimal_cpu = 30, maximal_cpu = 85):
#cpu_util = float(trex_res.get_last_value("trex-global.data.m_cpu_util"))
cpu_util = sum([float(x) for x in trex_res.get_value_list("trex-global.data.m_cpu_util")[-4:-1]]) / 3 # mean of 3 values before last
- if cpu_util < 30 and not self.is_virt_nics:
- self.fail("CPU is too low (%s%%), can't verify performance in such low CPU%%." % cpu_util )
+ if not self.is_virt_nics:
+ if cpu_util > maximal_cpu:
+ self.fail("CPU is too high (%s%%), probably queue full." % cpu_util )
+ if cpu_util < minimal_cpu:
+ self.fail("CPU is too low (%s%%), can't verify performance in such low CPU%%." % cpu_util )
cores = self.get_benchmark_param('cores')
trex_tx_bps = trex_res.get_last_value("trex-global.data.m_total_tx_bytes")
@@ -236,16 +239,14 @@ class CTRexGeneral_Test(unittest.TestCase):
if check_latency:
# check that max latency does not exceed 1 msec in regular setup or 20ms in VM
- allowed_latency = 20000 if self.is_VM else 1000
+ allowed_latency = 50000 if self.is_VM else 1000
if max(trex_res.get_max_latency().values()) > allowed_latency:
- print 'LatencyError: Maximal latency exceeds %s (usec)' % allowed_latency
- #raise AbnormalResultError('Maximal latency above 1ms')
+ self.fail('LatencyError: Maximal latency exceeds %s (usec)' % allowed_latency)
# check that avg latency does not exceed 1 msec in regular setup or 3ms in VM
allowed_latency = 3000 if self.is_VM else 1000
if max(trex_res.get_avg_latency().values()) > allowed_latency:
- print 'LatencyError: Average latency exceeds %s (usec)' % allowed_latency
- #raise AbnormalResultError('Maximal latency above 1ms')
+ self.fail('LatencyError: Average latency exceeds %s (usec)' % allowed_latency)
if not self.is_loopback:
# check router number of drops --> deliberately masked- need to be figured out!!!!!
diff --git a/scripts/automation/regression/unit_tests/trex_imix_test.py b/scripts/automation/regression/unit_tests/trex_imix_test.py
index b56f7f4e..9e772fa7 100755
--- a/scripts/automation/regression/unit_tests/trex_imix_test.py
+++ b/scripts/automation/regression/unit_tests/trex_imix_test.py
@@ -1,10 +1,9 @@
-
#!/router/bin/python
from trex_general_test import CTRexGeneral_Test
from CPlatform import CStaticRouteConfig
from tests_exceptions import *
#import sys
-import time;
+import time
class CTRexIMIX_Test(CTRexGeneral_Test):
"""This class defines the IMIX testcase of the T-Rex traffic generator"""
@@ -167,6 +166,34 @@ class CTRexIMIX_Test(CTRexGeneral_Test):
self.check_CPU_benchmark(trex_res, 10)
+
+ def test_jumbo(self):
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = "config")
+
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ d = 100,
+ f = 'cap2/imix_9k.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResults instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print ("\nLATEST RESULT OBJECT:")
+ print trex_res
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res, minimal_cpu = 0, maximal_cpu = 10)
+
def tearDown(self):
CTRexGeneral_Test.tearDown(self)
# remove nbar config here