summaryrefslogtreecommitdiffstats
path: root/scripts/automation/regression
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/automation/regression')
-rwxr-xr-xscripts/automation/regression/aggregate_results.py115
-rw-r--r--scripts/automation/regression/functional_tests/config.yaml (renamed from scripts/automation/regression/unit_tests/functional_tests/config.yaml)0
-rwxr-xr-xscripts/automation/regression/functional_tests/functional_general_test.py (renamed from scripts/automation/regression/unit_tests/functional_tests/functional_general_test.py)0
-rw-r--r--scripts/automation/regression/functional_tests/golden/basic_imix_golden.cap (renamed from scripts/automation/regression/stl/golden/basic_imix_golden.cap)bin198474 -> 198474 bytes
-rw-r--r--scripts/automation/regression/functional_tests/golden/basic_imix_vm_golden.cap (renamed from scripts/automation/regression/stl/golden/basic_imix_vm_golden.cap)bin316552 -> 316552 bytes
-rw-r--r--scripts/automation/regression/functional_tests/golden/basic_tuple_gen_golden.cap (renamed from scripts/automation/regression/stl/golden/basic_tuple_gen_golden.cap)bin38024 -> 38024 bytes
-rw-r--r--scripts/automation/regression/functional_tests/golden/udp_590.cap (renamed from scripts/automation/regression/stl/golden/udp_590.cap)bin630 -> 630 bytes
-rwxr-xr-xscripts/automation/regression/functional_tests/hltapi_stream_builder_test.py (renamed from scripts/automation/regression/unit_tests/functional_tests/hltapi_stream_builder_test.py)0
-rwxr-xr-xscripts/automation/regression/functional_tests/misc_methods_test.py (renamed from scripts/automation/regression/unit_tests/functional_tests/misc_methods_test.py)0
-rwxr-xr-xscripts/automation/regression/functional_tests/pkt_bld_general_test.py (renamed from scripts/automation/regression/unit_tests/functional_tests/pkt_bld_general_test.py)0
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_cmd_cache_test.py (renamed from scripts/automation/regression/unit_tests/functional_tests/platform_cmd_cache_test.py)0
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_cmd_link_test.py (renamed from scripts/automation/regression/unit_tests/functional_tests/platform_cmd_link_test.py)0
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_device_cfg_test.py (renamed from scripts/automation/regression/unit_tests/functional_tests/platform_device_cfg_test.py)2
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_dual_if_obj_test.py (renamed from scripts/automation/regression/unit_tests/functional_tests/platform_dual_if_obj_test.py)0
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_if_manager_test.py (renamed from scripts/automation/regression/unit_tests/functional_tests/platform_if_manager_test.py)2
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_if_obj_test.py (renamed from scripts/automation/regression/unit_tests/functional_tests/platform_if_obj_test.py)0
-rw-r--r--scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py (renamed from scripts/automation/regression/unit_tests/functional_tests/scapy_pkt_builder_test.py)18
-rw-r--r--scripts/automation/regression/functional_tests/stl_basic_tests.py (renamed from scripts/automation/regression/unit_tests/functional_tests/stl_basic_tests.py)33
-rwxr-xr-xscripts/automation/regression/functional_unit_tests.py78
-rwxr-xr-xscripts/automation/regression/misc_methods.py45
-rwxr-xr-xscripts/automation/regression/outer_packages.py3
-rw-r--r--scripts/automation/regression/stateful_tests/__init__.py0
-rwxr-xr-xscripts/automation/regression/stateful_tests/tests_exceptions.py (renamed from scripts/automation/regression/unit_tests/tests_exceptions.py)0
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_general_test.py (renamed from scripts/automation/regression/unit_tests/trex_general_test.py)49
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_imix_test.py (renamed from scripts/automation/regression/unit_tests/trex_imix_test.py)0
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_ipv6_test.py (renamed from scripts/automation/regression/unit_tests/trex_ipv6_test.py)0
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_nat_test.py (renamed from scripts/automation/regression/unit_tests/trex_nat_test.py)0
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_nbar_test.py (renamed from scripts/automation/regression/unit_tests/trex_nbar_test.py)0
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_rx_test.py (renamed from scripts/automation/regression/unit_tests/trex_rx_test.py)0
-rwxr-xr-xscripts/automation/regression/stateless_tests/stl_examples_test.py33
-rw-r--r--scripts/automation/regression/stateless_tests/stl_general_test.py68
-rw-r--r--scripts/automation/regression/trex.py83
-rwxr-xr-xscripts/automation/regression/trex_unit_test.py343
-rwxr-xr-xscripts/automation/regression/unit_tests/__init__.py1
34 files changed, 498 insertions, 375 deletions
diff --git a/scripts/automation/regression/aggregate_results.py b/scripts/automation/regression/aggregate_results.py
index 01f9ff56..31929d50 100755
--- a/scripts/automation/regression/aggregate_results.py
+++ b/scripts/automation/regression/aggregate_results.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET
+import outer_packages
import argparse
import glob
from pprint import pprint
@@ -9,6 +10,13 @@ import copy
import datetime, time
import cPickle as pickle
import subprocess, shlex
+from ansi2html import Ansi2HTMLConverter
+
+converter = Ansi2HTMLConverter(inline = True)
+convert = converter.convert
+
+def ansi2html(text):
+ return convert(text, full = False)
FUNCTIONAL_CATEGORY = 'Functional' # how to display those categories
ERROR_CATEGORY = 'Error'
@@ -27,9 +35,9 @@ def is_functional_test_name(testname):
#if testname.startswith(('platform_', 'misc_methods_', 'vm_', 'payload_gen_', 'pkt_builder_')):
# return True
#return False
- if testname.startswith('unit_tests.'):
- return False
- return True
+ if testname.startswith('functional_tests.'):
+ return True
+ return False
def is_good_status(text):
return text in ('Successful', 'Fixed', 'Passed', 'True', 'Pass')
@@ -56,15 +64,16 @@ def add_th_th(key, value):
# returns <div> with table of tests under given category.
# category - string with name of category
-# hidden - bool, true = <div> is hidden by CSS
# tests - list of tests, derived from aggregated xml report, changed a little to get easily stdout etc.
+# tests_type - stateful or stateless
# category_info_dir - folder to search for category info file
# expanded - bool, false = outputs (stdout etc.) of tests are hidden by CSS
# brief - bool, true = cut some part of tests outputs (useful for errors section with expanded flag)
-def add_category_of_tests(category, tests, hidden = False, category_info_dir = None, expanded = False, brief = False):
+def add_category_of_tests(category, tests, tests_type = None, category_info_dir = None, expanded = False, brief = False):
is_actual_category = category not in (FUNCTIONAL_CATEGORY, ERROR_CATEGORY)
- html_output = '<div style="display:%s;" id="cat_tglr_%s">\n' % ('none' if hidden else 'block', category)
-
+ category_id = '_'.join([category, tests_type]) if tests_type else category
+ category_name = ' '.join([category, tests_type.capitalize()]) if tests_type else category
+ html_output = ''
if is_actual_category:
html_output += '<br><table class="reference">\n'
@@ -80,6 +89,8 @@ def add_category_of_tests(category, tests, hidden = False, category_info_dir = N
else:
html_output += add_th_td('Info:', 'No info')
print 'add_category_of_tests: no category info %s' % category_info_file
+ if tests_type:
+ html_output += add_th_td('Tests type:', tests_type.capitalize())
if len(tests):
total_duration = 0.0
for test in tests:
@@ -88,13 +99,13 @@ def add_category_of_tests(category, tests, hidden = False, category_info_dir = N
html_output += '</table>\n'
if not len(tests):
- return html_output + pad_tag('<br><font color=red>No tests!</font>', 'b') + '</div>'
+ return html_output + pad_tag('<br><font color=red>No tests!</font>', 'b')
html_output += '<br>\n<table class="reference" width="100%">\n<tr><th align="left">'
if category == ERROR_CATEGORY:
html_output += 'Setup</th><th align="left">Failed tests:'
else:
- html_output += '%s tests:' % category
+ html_output += '%s tests:' % category_name
html_output += '</th><th align="center">Final Result</th>\n<th align="center">Time (s)</th>\n</tr>\n'
for test in tests:
functional_test = is_functional_test_name(test.attrib['name'])
@@ -103,7 +114,7 @@ def add_category_of_tests(category, tests, hidden = False, category_info_dir = N
if category == ERROR_CATEGORY:
test_id = ('err_' + test.attrib['classname'] + test.attrib['name']).replace('.', '_')
else:
- test_id = (category + test.attrib['name']).replace('.', '_')
+ test_id = (category_id + test.attrib['name']).replace('.', '_')
if expanded:
html_output += '<tr>\n<th>'
else:
@@ -128,15 +139,21 @@ def add_category_of_tests(category, tests, hidden = False, category_info_dir = N
result, result_text = test.attrib.get('result', ('', ''))
if result_text:
+ start_index_errors_stl = result_text.find('STLError: \n******')
+ if start_index_errors_stl > 0:
+ result_text = result_text[start_index_errors_stl:].strip() # cut traceback
start_index_errors = result_text.find('Exception: The test is failed, reasons:')
if start_index_errors > 0:
result_text = result_text[start_index_errors + 10:].strip() # cut traceback
+ result_text = ansi2html(result_text)
result_text = '<b style="color:000080;">%s:</b><br>%s<br><br>' % (result.capitalize(), result_text.replace('\n', '<br>'))
stderr = '' if brief and result_text else test.get('stderr', '')
if stderr:
+ stderr = ansi2html(stderr)
stderr = '<b style="color:000080;"><text color=000080>Stderr</text>:</b><br>%s<br><br>\n' % stderr.replace('\n', '<br>')
stdout = '' if brief and result_text else test.get('stdout', '')
if stdout:
+ stdout = ansi2html(stdout)
if brief: # cut off server logs
stdout = stdout.split('>>>>>>>>>>>>>>>', 1)[0]
stdout = '<b style="color:000080;">Stdout:</b><br>%s<br><br>\n' % stdout.replace('\n', '<br>')
@@ -147,7 +164,7 @@ def add_category_of_tests(category, tests, hidden = False, category_info_dir = N
else:
html_output += '<b style="color:000080;">No output</b></td></tr>'
- html_output += '\n</table>\n</div>'
+ html_output += '\n</table>'
return html_output
style_css = """
@@ -292,35 +309,40 @@ if __name__ == '__main__':
##### aggregate results to 1 single tree
aggregated_root = ET.Element('testsuite')
+ test_types = ('functional', 'stateful', 'stateless')
setups = {}
for job in jobs_list:
- xml_file = '%s/report_%s.xml' % (args.input_dir, job)
- if not os.path.exists(xml_file):
- message = '%s referenced in jobs_list.info does not exist!' % xml_file
+ setups[job] = {}
+ for test_type in test_types:
+ xml_file = '%s/report_%s_%s.xml' % (args.input_dir, job, test_type)
+ if not os.path.exists(xml_file):
+ continue
+ if os.path.basename(xml_file) == os.path.basename(args.output_xmlfile):
+ continue
+ setups[job][test_type] = []
+ print('Processing report: %s.%s' % (job, test_type))
+ tree = ET.parse(xml_file)
+ root = tree.getroot()
+ for key, value in root.attrib.items():
+ if key in aggregated_root.attrib and value.isdigit(): # sum total number of failed tests etc.
+ aggregated_root.attrib[key] = str(int(value) + int(aggregated_root.attrib[key]))
+ else:
+ aggregated_root.attrib[key] = value
+ tests = root.getchildren()
+ if not len(tests): # there should be tests:
+ message = 'No tests in xml %s' % xml_file
+ print message
+ #err.append(message)
+ for test in tests:
+ setups[job][test_type].append(test)
+ test.attrib['name'] = test.attrib['classname'] + '.' + test.attrib['name']
+ test.attrib['classname'] = job
+ aggregated_root.append(test)
+ if not sum([len(x) for x in setups[job].values()]):
+ message = 'No reports from setup %s!' % job
print message
err.append(message)
continue
- if os.path.basename(xml_file) == os.path.basename(args.output_xmlfile):
- continue
- setups[job] = []
- print('Processing setup: %s' % job)
- tree = ET.parse(xml_file)
- root = tree.getroot()
- for key, value in root.attrib.items():
- if key in aggregated_root.attrib and value.isdigit(): # sum total number of failed tests etc.
- aggregated_root.attrib[key] = str(int(value) + int(aggregated_root.attrib[key]))
- else:
- aggregated_root.attrib[key] = value
- tests = root.getchildren()
- if not len(tests): # there should be tests:
- message = 'No tests in xml %s' % xml_file
- print message
- err.append(message)
- for test in tests:
- setups[job].append(test)
- test.attrib['name'] = test.attrib['classname'] + '.' + test.attrib['name']
- test.attrib['classname'] = job
- aggregated_root.append(test)
total_tests_count = int(aggregated_root.attrib.get('tests', 0))
error_tests_count = int(aggregated_root.attrib.get('errors', 0))
@@ -426,7 +448,7 @@ if __name__ == '__main__':
if len(error_tests):
html_output += '\n<button onclick=tgl_cat("cat_tglr_{error}")>{error}</button>'.format(error = ERROR_CATEGORY)
# Setups buttons
- for category, tests in setups.items():
+ for category in setups.keys():
category_arr.append(category)
html_output += '\n<button onclick=tgl_cat("cat_tglr_%s")>%s</button>' % (category_arr[-1], category)
# Functional buttons
@@ -436,13 +458,22 @@ if __name__ == '__main__':
# Adding tests
# Error tests
if len(error_tests):
- html_output += add_category_of_tests(ERROR_CATEGORY, error_tests, hidden=False)
+ html_output += '<div style="display:block;" id="cat_tglr_%s">' % ERROR_CATEGORY
+ html_output += add_category_of_tests(ERROR_CATEGORY, error_tests)
+ html_output += '</div>'
# Setups tests
for category, tests in setups.items():
- html_output += add_category_of_tests(category, tests, hidden=True, category_info_dir=args.input_dir)
+ html_output += '<div style="display:none;" id="cat_tglr_%s">' % category
+ if 'stateful' in tests:
+ html_output += add_category_of_tests(category, tests['stateful'], 'stateful', category_info_dir=args.input_dir)
+ if 'stateless' in tests:
+ html_output += add_category_of_tests(category, tests['stateless'], 'stateless', category_info_dir=(None if 'stateful' in tests else args.input_dir))
+ html_output += '</div>'
# Functional tests
if len(functional_tests):
- html_output += add_category_of_tests(FUNCTIONAL_CATEGORY, functional_tests.values(), hidden=True)
+ html_output += '<div style="display:none;" id="cat_tglr_%s">' % FUNCTIONAL_CATEGORY
+ html_output += add_category_of_tests(FUNCTIONAL_CATEGORY, functional_tests.values())
+ html_output += '</div>'
html_output += '\n\n<script type="text/javascript">\n var category_arr = %s\n' % ['cat_tglr_%s' % x for x in category_arr]
html_output += '''
@@ -524,7 +555,7 @@ if __name__ == '__main__':
for test in error_tests:
if test.attrib['classname'] == category:
failing_category = True
- if failing_category or not len(setups[category]):
+ if failing_category or not len(setups[category]) or not sum([len(x) for x in setups[category]]):
mail_output += '<table class="reference_fail" align=left style="Margin-bottom:10;Margin-right:10;">\n'
else:
mail_output += '<table class="reference" align=left style="Margin-bottom:10;Margin-right:10;">\n'
@@ -549,9 +580,9 @@ if __name__ == '__main__':
if len(error_tests) > 5:
mail_output += '\n<font color=red>More than 5 failed tests, showing brief output.<font>\n<br>'
# show only brief version (cut some info)
- mail_output += add_category_of_tests(ERROR_CATEGORY, error_tests, hidden=False, expanded=True, brief=True)
+ mail_output += add_category_of_tests(ERROR_CATEGORY, error_tests, expanded=True, brief=True)
else:
- mail_output += add_category_of_tests(ERROR_CATEGORY, error_tests, hidden=False, expanded=True)
+ mail_output += add_category_of_tests(ERROR_CATEGORY, error_tests, expanded=True)
else:
mail_output += '<table><tr style="font-size:120;color:green;font-family:arial"><td>☺</td><td style="font-size:20">All passed.</td></tr></table>\n'
mail_output += '\n</body>\n</html>'
diff --git a/scripts/automation/regression/unit_tests/functional_tests/config.yaml b/scripts/automation/regression/functional_tests/config.yaml
index 4f4c7c40..4f4c7c40 100644
--- a/scripts/automation/regression/unit_tests/functional_tests/config.yaml
+++ b/scripts/automation/regression/functional_tests/config.yaml
diff --git a/scripts/automation/regression/unit_tests/functional_tests/functional_general_test.py b/scripts/automation/regression/functional_tests/functional_general_test.py
index 525b58d2..525b58d2 100755
--- a/scripts/automation/regression/unit_tests/functional_tests/functional_general_test.py
+++ b/scripts/automation/regression/functional_tests/functional_general_test.py
diff --git a/scripts/automation/regression/stl/golden/basic_imix_golden.cap b/scripts/automation/regression/functional_tests/golden/basic_imix_golden.cap
index 6ca32299..6ca32299 100644
--- a/scripts/automation/regression/stl/golden/basic_imix_golden.cap
+++ b/scripts/automation/regression/functional_tests/golden/basic_imix_golden.cap
Binary files differ
diff --git a/scripts/automation/regression/stl/golden/basic_imix_vm_golden.cap b/scripts/automation/regression/functional_tests/golden/basic_imix_vm_golden.cap
index 43ae2368..43ae2368 100644
--- a/scripts/automation/regression/stl/golden/basic_imix_vm_golden.cap
+++ b/scripts/automation/regression/functional_tests/golden/basic_imix_vm_golden.cap
Binary files differ
diff --git a/scripts/automation/regression/stl/golden/basic_tuple_gen_golden.cap b/scripts/automation/regression/functional_tests/golden/basic_tuple_gen_golden.cap
index 7d5e7ec2..7d5e7ec2 100644
--- a/scripts/automation/regression/stl/golden/basic_tuple_gen_golden.cap
+++ b/scripts/automation/regression/functional_tests/golden/basic_tuple_gen_golden.cap
Binary files differ
diff --git a/scripts/automation/regression/stl/golden/udp_590.cap b/scripts/automation/regression/functional_tests/golden/udp_590.cap
index 29302f22..29302f22 100644
--- a/scripts/automation/regression/stl/golden/udp_590.cap
+++ b/scripts/automation/regression/functional_tests/golden/udp_590.cap
Binary files differ
diff --git a/scripts/automation/regression/unit_tests/functional_tests/hltapi_stream_builder_test.py b/scripts/automation/regression/functional_tests/hltapi_stream_builder_test.py
index c6b477aa..c6b477aa 100755
--- a/scripts/automation/regression/unit_tests/functional_tests/hltapi_stream_builder_test.py
+++ b/scripts/automation/regression/functional_tests/hltapi_stream_builder_test.py
diff --git a/scripts/automation/regression/unit_tests/functional_tests/misc_methods_test.py b/scripts/automation/regression/functional_tests/misc_methods_test.py
index 096f86d8..096f86d8 100755
--- a/scripts/automation/regression/unit_tests/functional_tests/misc_methods_test.py
+++ b/scripts/automation/regression/functional_tests/misc_methods_test.py
diff --git a/scripts/automation/regression/unit_tests/functional_tests/pkt_bld_general_test.py b/scripts/automation/regression/functional_tests/pkt_bld_general_test.py
index 5f89eaff..5f89eaff 100755
--- a/scripts/automation/regression/unit_tests/functional_tests/pkt_bld_general_test.py
+++ b/scripts/automation/regression/functional_tests/pkt_bld_general_test.py
diff --git a/scripts/automation/regression/unit_tests/functional_tests/platform_cmd_cache_test.py b/scripts/automation/regression/functional_tests/platform_cmd_cache_test.py
index 24ccf7a5..24ccf7a5 100755
--- a/scripts/automation/regression/unit_tests/functional_tests/platform_cmd_cache_test.py
+++ b/scripts/automation/regression/functional_tests/platform_cmd_cache_test.py
diff --git a/scripts/automation/regression/unit_tests/functional_tests/platform_cmd_link_test.py b/scripts/automation/regression/functional_tests/platform_cmd_link_test.py
index 7a31815b..7a31815b 100755
--- a/scripts/automation/regression/unit_tests/functional_tests/platform_cmd_link_test.py
+++ b/scripts/automation/regression/functional_tests/platform_cmd_link_test.py
diff --git a/scripts/automation/regression/unit_tests/functional_tests/platform_device_cfg_test.py b/scripts/automation/regression/functional_tests/platform_device_cfg_test.py
index 890d0cb9..3935a4c5 100755
--- a/scripts/automation/regression/unit_tests/functional_tests/platform_device_cfg_test.py
+++ b/scripts/automation/regression/functional_tests/platform_device_cfg_test.py
@@ -9,7 +9,7 @@ from nose.tools import assert_not_equal
class CDeviceCfg_Test(functional_general_test.CGeneralFunctional_Test):
def setUp(self):
- self.dev_cfg = CDeviceCfg('./unit_tests/functional_tests/config.yaml')
+ self.dev_cfg = CDeviceCfg('./functional_tests/config.yaml')
def test_get_interfaces_cfg(self):
assert_equal (self.dev_cfg.get_interfaces_cfg(),
diff --git a/scripts/automation/regression/unit_tests/functional_tests/platform_dual_if_obj_test.py b/scripts/automation/regression/functional_tests/platform_dual_if_obj_test.py
index ff54b9ee..ff54b9ee 100755
--- a/scripts/automation/regression/unit_tests/functional_tests/platform_dual_if_obj_test.py
+++ b/scripts/automation/regression/functional_tests/platform_dual_if_obj_test.py
diff --git a/scripts/automation/regression/unit_tests/functional_tests/platform_if_manager_test.py b/scripts/automation/regression/functional_tests/platform_if_manager_test.py
index 7ba6e66e..b09e8d75 100755
--- a/scripts/automation/regression/unit_tests/functional_tests/platform_if_manager_test.py
+++ b/scripts/automation/regression/functional_tests/platform_if_manager_test.py
@@ -9,7 +9,7 @@ from nose.tools import assert_not_equal
class CIfManager_Test(functional_general_test.CGeneralFunctional_Test):
def setUp(self):
- self.dev_cfg = CDeviceCfg('./unit_tests/functional_tests/config.yaml')
+ self.dev_cfg = CDeviceCfg('./functional_tests/config.yaml')
self.if_mng = CIfManager()
# main testing method to check the entire class
diff --git a/scripts/automation/regression/unit_tests/functional_tests/platform_if_obj_test.py b/scripts/automation/regression/functional_tests/platform_if_obj_test.py
index 534d4170..534d4170 100755
--- a/scripts/automation/regression/unit_tests/functional_tests/platform_if_obj_test.py
+++ b/scripts/automation/regression/functional_tests/platform_if_obj_test.py
diff --git a/scripts/automation/regression/unit_tests/functional_tests/scapy_pkt_builder_test.py b/scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py
index 7e2f6271..eaff9530 100644
--- a/scripts/automation/regression/unit_tests/functional_tests/scapy_pkt_builder_test.py
+++ b/scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py
@@ -80,22 +80,22 @@ class CTRexPktBuilderSanitySCapy_Test(pkt_bld_general_test.CGeneralPktBld_Test):
pkt_builder = CScapyTRexPktBuilder(pkt = pkt);
- assert_equal( pkt_builder.is_def_src_mac () ,True)
- assert_equal( pkt_builder.is_def_dst_mac () ,True)
+ assert_equal( pkt_builder.is_default_src_mac () ,True)
+ assert_equal( pkt_builder.is_default_dst_mac () ,True)
pkt = Ether(src="00:00:00:00:00:01")/IP()/UDP()
pkt_builder = CScapyTRexPktBuilder(pkt = pkt);
- assert_equal( pkt_builder.is_def_src_mac (), False)
- assert_equal( pkt_builder.is_def_dst_mac (), True)
+ assert_equal( pkt_builder.is_default_src_mac (), False)
+ assert_equal( pkt_builder.is_default_dst_mac (), True)
pkt = Ether(dst="00:00:00:00:00:01")/IP()/UDP()
pkt_builder = CScapyTRexPktBuilder(pkt = pkt);
- assert_equal( pkt_builder.is_def_src_mac (),True)
- assert_equal( pkt_builder.is_def_dst_mac (),False)
+ assert_equal( pkt_builder.is_default_src_mac (),True)
+ assert_equal( pkt_builder.is_default_dst_mac (),False)
@@ -299,7 +299,7 @@ class CTRexPktBuilderSanitySCapy_Test(pkt_bld_general_test.CGeneralPktBld_Test):
assert_equal(d['instructions'][4]['pkt_offset'],38)
def test_simple_pkt_loader(self):
- p=RawPcapReader("stl/golden/basic_imix_golden.cap")
+ p=RawPcapReader("functional_tests/golden/basic_imix_golden.cap")
print ""
for pkt in p:
print pkt[1]
@@ -308,7 +308,7 @@ class CTRexPktBuilderSanitySCapy_Test(pkt_bld_general_test.CGeneralPktBld_Test):
def test_simple_pkt_loader1(self):
- pkt_builder = CScapyTRexPktBuilder(pkt = "stl/golden/udp_590.cap", build_raw = False);
+ pkt_builder = CScapyTRexPktBuilder(pkt = "functional_tests/golden/udp_590.cap", build_raw = False);
print ""
pkt_builder.dump_as_hex()
r = pkt_builder.pkt_raw
@@ -322,7 +322,7 @@ class CTRexPktBuilderSanitySCapy_Test(pkt_bld_general_test.CGeneralPktBld_Test):
def test_simple_pkt_loader2(self):
- pkt_builder = CScapyTRexPktBuilder(pkt = "stl/golden/basic_imix_golden.cap");
+ pkt_builder = CScapyTRexPktBuilder(pkt = "functional_tests/golden/basic_imix_golden.cap");
assert_equal(pkt_builder.pkt_layers_desc (), "Ethernet:IP:UDP:Raw");
def test_simple_pkt_loader3(self):
diff --git a/scripts/automation/regression/unit_tests/functional_tests/stl_basic_tests.py b/scripts/automation/regression/functional_tests/stl_basic_tests.py
index cd653895..ea515401 100644
--- a/scripts/automation/regression/unit_tests/functional_tests/stl_basic_tests.py
+++ b/scripts/automation/regression/functional_tests/stl_basic_tests.py
@@ -6,9 +6,10 @@ from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import nottest
from nose.plugins.attrib import attr
-from unit_tests.trex_general_test import CTRexScenario
+from trex import CTRexScenario
from dpkt import pcap
from trex_stl_lib import trex_stl_sim
+from trex_stl_lib.trex_stl_streams import STLProfile
import sys
import os
import subprocess
@@ -73,11 +74,11 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
pkts2 = reader2.readpkts()
assert_equal(len(pkts1), len(pkts2))
-
+
for pkt1, pkt2, i in zip(pkts1, pkts2, xrange(1, len(pkts1))):
ts1 = pkt1[0]
ts2 = pkt2[0]
- if abs(ts1-ts2) > 0.000005: # 5 nsec
+ if abs(ts1-ts2) > 0.000005: # 5 nsec
raise AssertionError("TS error: cap files '{0}', '{1}' differ in cap #{2} - '{3}' vs. '{4}'".format(cap1, cap2, i, ts1, ts2))
if pkt1[1] != pkt2[1]:
@@ -102,7 +103,7 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
- def run_py_profile_path (self, profile, options,silent = False, do_no_remove=False,compare =True, test_generated=True):
+ def run_py_profile_path (self, profile, options,silent = False, do_no_remove=False,compare =True, test_generated=True, do_no_remove_generated = False):
output_cap = "a.pcap"
input_file = os.path.join('stl/', profile)
golden_file = os.path.join('exp',os.path.basename(profile).split('.')[0]+'.pcap');
@@ -118,38 +119,42 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
if compare:
self.compare_caps(output_cap, golden_file)
finally:
- if not do_no_remove:
+ if not do_no_remove:
os.unlink(output_cap)
if test_generated:
try:
- from trex_stl_lib.api import STLProfile # if test is skipped, don't load it
generated_filename = input_file.replace('.py', '_GENERATED.py').replace('.yaml', '_GENERATED.py')
if input_file.endswith('.py'):
profile = STLProfile.load_py(input_file)
elif input_file.endswith('.yaml'):
profile = STLProfile.load_yaml(input_file)
profile.dump_to_code(generated_filename)
+
rc = self.run_sim(generated_filename, output_cap, options, silent)
assert_equal(rc, True)
-
+
if compare:
self.compare_caps(output_cap, golden_file)
+ except Exception as e:
+ print e
finally:
- if not do_no_remove:
+ if not do_no_remove_generated:
os.unlink(generated_filename)
+ os.unlink(generated_filename + 'c')
+ if not do_no_remove:
os.unlink(output_cap)
def test_stl_profiles (self):
- p = [
+ p = [
["udp_1pkt_1mac_override.py","-m 1 -l 50",True],
- ["syn_attack.py","-m 1 -l 50",True], # can't compare random now
+ ["syn_attack.py","-m 1 -l 50",True], # can't compare random now
["udp_1pkt_1mac.py","-m 1 -l 50",True],
["udp_1pkt_mac.py","-m 1 -l 50",True],
["udp_1pkt.py","-m 1 -l 50",True],
["udp_1pkt_tuple_gen.py","-m 1 -l 50",True],
- ["udp_rand_len_9k.py","-m 1 -l 50",True], # can't do the compare
+ ["udp_rand_len_9k.py","-m 1 -l 50",True], # can't do the compare
["udp_1pkt_mpls.py","-m 1 -l 50",True],
["udp_1pkt_mpls_vm.py","-m 1 ",True],
["imix.py","-m 1 -l 100",True],
@@ -195,14 +200,14 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
p1 = [ ["udp_1pkt_range_clients_split_garp.py","-m 1 -l 50",True] ]
-
+
for obj in p:
try:
test_generated = obj[3]
except: # check generated if not said otherwise
test_generated = True
- self.run_py_profile_path (obj[0],obj[1],compare =obj[2], test_generated = test_generated, do_no_remove=True)
+ self.run_py_profile_path (obj[0],obj[1],compare =obj[2], test_generated = test_generated, do_no_remove=True, do_no_remove_generated = False)
def test_hlt_profiles (self):
@@ -231,7 +236,7 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
)
for obj in p:
- self.run_py_profile_path (obj[0], obj[1], compare =obj[2], do_no_remove=True)
+ self.run_py_profile_path (obj[0], obj[1], compare =obj[2], do_no_remove=True, do_no_remove_generated = False)
# valgrind tests - this runs in multi thread as it safe (no output)
def test_valgrind_various_profiles (self):
diff --git a/scripts/automation/regression/functional_unit_tests.py b/scripts/automation/regression/functional_unit_tests.py
deleted file mode 100755
index 30e915c4..00000000
--- a/scripts/automation/regression/functional_unit_tests.py
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/router/bin/python
-
-__copyright__ = "Copyright 2014"
-
-
-
-import os
-import sys
-import outer_packages
-import nose
-from nose.plugins import Plugin
-import logging
-from rednose import RedNose
-import termstyle
-
-
-
-
-def set_report_dir (report_dir):
- if not os.path.exists(report_dir):
- os.mkdir(report_dir)
-
-if __name__ == "__main__":
-
- # setting defaults. By default we run all the test suite
- specific_tests = False
- disableLogCapture = False
- long_test = False
- report_dir = "reports"
-
- nose_argv= sys.argv + ['-s', '-v', '--exe', '--rednose', '--detailed-errors']
-
-# for arg in sys.argv:
-# if 'unit_tests/' in arg:
-# specific_tests = True
-# if 'log-path' in arg:
-# disableLogCapture = True
-# if arg=='--collect-only': # this is a user trying simply to view the available tests. removing xunit param from nose args
-# nose_argv[5:7] = []
-
-
-
- try:
- result = nose.run(argv = nose_argv, addplugins = [RedNose()])
-
- if (result == True):
- print termstyle.green("""
- ..::''''::..
- .;'' ``;.
- :: :: :: ::
- :: :: :: ::
- :: :: :: ::
- :: .:' :: :: `:. ::
- :: : : ::
- :: `:. .:' ::
- `;..``::::''..;'
- ``::,,,,::''
-
- ___ ___ __________
- / _ \/ _ | / __/ __/ /
- / ___/ __ |_\ \_\ \/_/
- /_/ /_/ |_/___/___(_)
-
- """)
- sys.exit(0)
- else:
- sys.exit(-1)
-
- finally:
- pass
-
-
-
-
-
-
-
-
diff --git a/scripts/automation/regression/misc_methods.py b/scripts/automation/regression/misc_methods.py
index 2341b9be..783858e8 100755
--- a/scripts/automation/regression/misc_methods.py
+++ b/scripts/automation/regression/misc_methods.py
@@ -20,29 +20,28 @@ def mix_string (str):
return str.replace(' ', '_').lower()
# executes given command, returns tuple (return_code, stdout, stderr)
-def run_command(cmd):
- print 'Running command:', cmd
- proc = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- (stdout, stderr) = proc.communicate()
- if stdout:
- print 'Stdout:\n%s' % stdout
- if stderr:
- print 'Stderr:\n%s' % stderr
- print 'Return code: %s' % proc.returncode
- return (proc.returncode, stdout, stderr)
-
-
-def run_remote_command(host, passwd, command_string):
- cmd = 'ssh -tt %s \'sudo sh -c "%s"\'' % (host, command_string)
- print 'Trying connection with ssh...'
- return_code, stdout, stderr = run_command(cmd)
- if return_code == 0:
- return (return_code, stdout, stderr)
- elif passwd is not None:
- print 'Trying connection with expect + sshpass.exp...'
- cmd = 'sshpass.exp %s %s root "%s"' % (passwd, host, command_string)
- return_code, stdout, stderr = run_command(cmd)
- return (return_code, stdout, stderr)
+def run_command(cmd, background = False):
+ if background:
+ print 'Running command in background:', cmd
+ with open(os.devnull, 'w') as tempf:
+ subprocess.Popen(shlex.split(cmd), stdin=tempf, stdout=tempf, stderr=tempf)
+ return (None,)*3
+ else:
+ print 'Running command:', cmd
+ proc = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (stdout, stderr) = proc.communicate()
+ if stdout:
+ print 'Stdout:\n%s' % stdout
+ if proc.returncode:
+ if stderr:
+ print 'Stderr:\n%s' % stderr
+ print 'Return code: %s' % proc.returncode
+ return (proc.returncode, stdout, stderr)
+
+
+def run_remote_command(host, command_string, background = False):
+ cmd = 'ssh -tt %s \'sudo sh -ec "%s"\'' % (host, command_string)
+ return run_command(cmd, background)
def generate_intf_lists (interfacesList):
diff --git a/scripts/automation/regression/outer_packages.py b/scripts/automation/regression/outer_packages.py
index 6b7c58f9..f55c247d 100755
--- a/scripts/automation/regression/outer_packages.py
+++ b/scripts/automation/regression/outer_packages.py
@@ -11,7 +11,8 @@ PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(TREX_PATH, 'external_libs'))
PATH_TO_CTRL_PLANE = os.path.abspath(os.path.join(TREX_PATH, 'automation', 'trex_control_plane'))
PATH_STL_API = os.path.abspath(os.path.join(PATH_TO_CTRL_PLANE, 'stl'))
-NIGHTLY_MODULES = ['enum34-1.0.4',
+NIGHTLY_MODULES = ['ansi2html',
+ 'enum34-1.0.4',
'nose-1.3.4',
'rednose-0.4.1',
'progressbar-2.2',
diff --git a/scripts/automation/regression/stateful_tests/__init__.py b/scripts/automation/regression/stateful_tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/__init__.py
diff --git a/scripts/automation/regression/unit_tests/tests_exceptions.py b/scripts/automation/regression/stateful_tests/tests_exceptions.py
index 604efcc8..604efcc8 100755
--- a/scripts/automation/regression/unit_tests/tests_exceptions.py
+++ b/scripts/automation/regression/stateful_tests/tests_exceptions.py
diff --git a/scripts/automation/regression/unit_tests/trex_general_test.py b/scripts/automation/regression/stateful_tests/trex_general_test.py
index f367a397..21f5d8aa 100755
--- a/scripts/automation/regression/unit_tests/trex_general_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_general_test.py
@@ -26,6 +26,7 @@ Description:
from nose.plugins import Plugin
from nose.plugins.skip import SkipTest
import trex
+from trex import CTRexScenario
import misc_methods
import sys
import os
@@ -37,50 +38,14 @@ from tests_exceptions import *
from platform_cmd_link import *
import unittest
-
-class CTRexScenario():
- modes = set() # list of modes of this setup: loopback, virtual etc.
- server_logs = False
- is_test_list = False
- is_init = False
- trex_crashed = False
- configuration = None
- trex = None
- router = None
- router_cfg = None
- daemon_log_lines = 0
- setup_name = None
- setup_dir = None
- router_image = None
- trex_version = None
- scripts_path = None
- benchmark = None
- report_dir = 'reports'
- # logger = None
-
-#scenario = CTRexScenario()
-
def setUpModule(module):
-# print ("") # this is to get a newline after the dots
-# print ("setup_module before anything in this file")
-# # ff = CTRexScenario()
-# scenario.configuration = misc_methods.load_complete_config_file('config/config.yaml')
-# scenario.trex = trex.CTRexRunner(scenario.configuration[0], None)
-# scenario.router = CPlatform(scenario.configuration[1], False, scenario.configuration[2])
-# scenario.router.platform.preCheck()
-# print "Done instantiating trex scenario!"
pass
def tearDownModule(module):
-# print ("") # this is to get a newline after the dots
-# scenario.router.platform.postCheck()
-# print ("teardown_module after anything in this file")
pass
-
-
class CTRexGeneral_Test(unittest.TestCase):
- """This class defines the general testcase of the T-Rex traffic generator"""
+ """This class defines the general stateful testcase of the T-Rex traffic generator"""
def __init__ (self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
if CTRexScenario.is_test_list:
@@ -100,7 +65,8 @@ class CTRexGeneral_Test(unittest.TestCase):
self.is_VM = True if 'VM' in self.modes else False
if not CTRexScenario.is_init:
- CTRexScenario.trex_version = self.trex.get_trex_version()
+ if self.trex: # stateful
+ CTRexScenario.trex_version = self.trex.get_trex_version()
if not self.is_loopback:
# initilize the scenario based on received configuration, once per entire testing session
CTRexScenario.router = CPlatform(CTRexScenario.router_cfg['silent_mode'])
@@ -306,12 +272,13 @@ class CTRexGeneral_Test(unittest.TestCase):
test_setup_modes_conflict = self.modes & set(self.unsupported_modes)
if test_setup_modes_conflict:
self.skip("The test can't run with following modes of given setup: %s " % test_setup_modes_conflict)
- if not self.trex.is_idle():
+ if self.trex and not self.trex.is_idle():
print 'Warning: TRex is not idle at setUp, trying to stop it.'
self.trex.force_kill(confirm = False)
if not self.is_loopback:
print ''
- self.router.load_clean_config()
+ if self.trex: # stateful
+ self.router.load_clean_config()
self.router.clear_counters()
self.router.clear_packet_drop_stats()
@@ -324,6 +291,8 @@ class CTRexGeneral_Test(unittest.TestCase):
# def test_isInitialized(self):
# assert CTRexScenario.is_init == True
def tearDown(self):
+ if not self.trex:
+ return
if not self.trex.is_idle():
print 'Warning: TRex is not idle at tearDown, trying to stop it.'
self.trex.force_kill(confirm = False)
diff --git a/scripts/automation/regression/unit_tests/trex_imix_test.py b/scripts/automation/regression/stateful_tests/trex_imix_test.py
index 43dea900..43dea900 100755
--- a/scripts/automation/regression/unit_tests/trex_imix_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_imix_test.py
diff --git a/scripts/automation/regression/unit_tests/trex_ipv6_test.py b/scripts/automation/regression/stateful_tests/trex_ipv6_test.py
index bffb4754..bffb4754 100755
--- a/scripts/automation/regression/unit_tests/trex_ipv6_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_ipv6_test.py
diff --git a/scripts/automation/regression/unit_tests/trex_nat_test.py b/scripts/automation/regression/stateful_tests/trex_nat_test.py
index e7fe5ca5..e7fe5ca5 100755
--- a/scripts/automation/regression/unit_tests/trex_nat_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_nat_test.py
diff --git a/scripts/automation/regression/unit_tests/trex_nbar_test.py b/scripts/automation/regression/stateful_tests/trex_nbar_test.py
index 74d0227b..74d0227b 100755
--- a/scripts/automation/regression/unit_tests/trex_nbar_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_nbar_test.py
diff --git a/scripts/automation/regression/unit_tests/trex_rx_test.py b/scripts/automation/regression/stateful_tests/trex_rx_test.py
index 37b1c722..37b1c722 100755
--- a/scripts/automation/regression/unit_tests/trex_rx_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_rx_test.py
diff --git a/scripts/automation/regression/stateless_tests/stl_examples_test.py b/scripts/automation/regression/stateless_tests/stl_examples_test.py
new file mode 100755
index 00000000..9e4fffc9
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_examples_test.py
@@ -0,0 +1,33 @@
+#!/router/bin/python
+from stl_general_test import CStlGeneral_Test, CTRexScenario
+import os, sys
+from misc_methods import run_command
+
+class STLExamples_Test(CStlGeneral_Test):
+ """This class defines the IMIX testcase of the T-Rex traffic generator"""
+
+ def setUp(self):
+ CStlGeneral_Test.setUp(self)
+ # examples connect by their own
+ if self.is_connected():
+ CTRexScenario.stl_trex.disconnect()
+
+ @classmethod
+ def tearDownClass(cls):
+ # connect back at end of tests
+ if not cls.is_connected():
+ CTRexScenario.stl_trex.connect()
+
+ def test_stl_examples(self):
+ examples_dir = '../trex_control_plane/stl/examples'
+ examples_to_test = [
+ 'stl_imix.py',
+ ]
+
+ for example in examples_to_test:
+ return_code, stdout, stderr = run_command("sh -c 'cd %s; %s %s -s %s'" % (examples_dir, sys.executable, example, CTRexScenario.configuration.trex['trex_name']))
+ assert return_code == 0, 'example %s failed.\nstdout: %s\nstderr: %s' % (return_code, stdout, stderr)
+
+ def test_stl_examples1(self):
+ print 'in test_stl_examples1'
+
diff --git a/scripts/automation/regression/stateless_tests/stl_general_test.py b/scripts/automation/regression/stateless_tests/stl_general_test.py
new file mode 100644
index 00000000..435c7eea
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_general_test.py
@@ -0,0 +1,68 @@
+import os, sys
+import unittest
+from trex import CTRexScenario
+from stateful_tests.trex_general_test import CTRexGeneral_Test
+from trex_stl_lib.api import *
+import time
+from nose.tools import nottest
+
+
+class CStlGeneral_Test(CTRexGeneral_Test):
+ """This class defines the general stateless testcase of the T-Rex traffic generator"""
+
+ #once for all tests under CStlGeneral_Test
+ @classmethod
+ def setUpClass(cls):
+ cls.stl_trex = CTRexScenario.stl_trex
+
+ def setUp(self):
+ CTRexGeneral_Test.setUp(self)
+ # check basic requirements, should be verified at test_connectivity, here only skip test
+ if CTRexScenario.stl_init_error:
+ self.skip(CTRexScenario.stl_init_error)
+
+ @staticmethod
+ def connect(timeout = 20):
+ sys.stdout.write('Connecting')
+ for i in range(timeout):
+ try:
+ sys.stdout.write('.')
+ sys.stdout.flush()
+ CTRexScenario.stl_trex.connect()
+ return
+ except:
+ time.sleep(1)
+ CTRexScenario.stl_trex.connect()
+
+ @staticmethod
+ def get_port_count():
+ return CTRexScenario.stl_trex.get_port_count()
+
+ @staticmethod
+ def is_connected():
+ return CTRexScenario.stl_trex.is_connected()
+
+class STLBasic_Test(CStlGeneral_Test):
+ # will run it first explicitly, check connectivity and configure routing
+ @nottest
+ def test_connectivity(self):
+ if not self.is_loopback:
+ CTRexScenario.router.load_clean_config()
+ CTRexScenario.router.configure_basic_interfaces()
+ CTRexScenario.router.config_pbr(mode = "config")
+
+ CTRexScenario.stl_init_error = 'Client could not connect'
+ self.connect()
+ print ''
+ try:
+ stl_map_ports(CTRexScenario.stl_trex)
+ except:
+ pass
+ time.sleep(5)
+ CTRexScenario.stl_init_error = 'Client could not map ports'
+ CTRexScenario.stl_ports_map = stl_map_ports(CTRexScenario.stl_trex)
+ CTRexScenario.stl_init_error = 'Could not determine bidirectional ports'
+ print 'Ports mapping: %s' % CTRexScenario.stl_ports_map
+ if not len(CTRexScenario.stl_ports_map['bi']):
+ raise STLError('No bidirectional ports')
+ CTRexScenario.stl_init_error = None
diff --git a/scripts/automation/regression/trex.py b/scripts/automation/regression/trex.py
index b9fd87ec..8efa41f6 100644
--- a/scripts/automation/regression/trex.py
+++ b/scripts/automation/regression/trex.py
@@ -8,10 +8,35 @@ import re
import signal
import time
from CProgressDisp import TimedProgressBar
-import unit_tests.trex_general_test
-from unit_tests.tests_exceptions import TRexInUseError
+from stateful_tests.tests_exceptions import TRexInUseError
import datetime
+class CTRexScenario:
+ modes = set() # list of modes of this setup: loopback, virtual etc.
+ server_logs = False
+ is_test_list = False
+ is_init = False
+ is_stl_init = False
+ trex_crashed = False
+ configuration = None
+ trex = None
+ stl_trex = None
+ stl_ports_map = None
+ stl_init_error = None
+ router = None
+ router_cfg = None
+ daemon_log_lines = 0
+ setup_name = None
+ setup_dir = None
+ router_image = None
+ trex_version = None
+ scripts_path = None
+ benchmark = None
+ report_dir = 'reports'
+ # logger = None
+ test_types = {'functional_tests': [], 'stateful_tests': [], 'stateless_tests': []}
+ is_copied = False
+
class CTRexRunner:
"""This is an instance for generating a CTRexRunner"""
@@ -67,7 +92,7 @@ class CTRexRunner:
trex_cmd = trex_cmd_str % (cores,
multiplier,
- duration,
+ duration,
self.yaml)
# self.trex_config['trex_latency'])
@@ -81,8 +106,8 @@ class CTRexRunner:
print "\nT-REX COMMAND: ", trex_cmd
- cmd = 'sshpass.exp %s %s root "cd %s; %s > %s"' % (self.trex_config['trex_password'],
- self.trex_config['trex_name'],
+ cmd = 'sshpass.exp %s %s root "cd %s; %s > %s"' % (self.trex_config['trex_password'],
+ self.trex_config['trex_name'],
self.trex_config['trex_version_path'],
trex_cmd,
export_path)
@@ -91,18 +116,18 @@ class CTRexRunner:
def generate_fetch_cmd (self, result_file_full_path="/tmp/trex.txt"):
""" generate_fetch_cmd(self, result_file_full_path) -> str
-
+
Generates a custom command for which will enable to fetch the resutls of the T-Rex run.
Returns a command (string) to be issued on the trex server.
-
+
Example use: fetch_trex_results() - command that will fetch the content from the default log file- /tmp/trex.txt
fetch_trex_results("/tmp/trex_secondary_file.txt") - command that will fetch the content from a custom log file- /tmp/trex_secondary_file.txt
"""
#dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
script_running_dir = os.path.dirname(os.path.realpath(__file__)) # get the current script working directory so that the sshpass could be accessed.
- cmd = script_running_dir + '/sshpass.exp %s %s root "cat %s"' % (self.trex_config['trex_password'],
- self.trex_config['trex_name'],
- result_file_full_path);
+ cmd = script_running_dir + '/sshpass.exp %s %s root "cat %s"' % (self.trex_config['trex_password'],
+ self.trex_config['trex_name'],
+ result_file_full_path);
return cmd;
@@ -153,10 +178,10 @@ class CTRexRunner:
interrupted = True
if ((end_time - start_time) < 2):
raise TRexInUseError ('T-Rex run failed since T-Rex is used by another process, or due to reachability issues')
- else:
- unit_tests.trex_general_test.CTRexScenario.trex_crashed = True
- # results = subprocess.Popen(cmd, stdout = open(os.devnull, 'wb'),
- # shell=True, preexec_fn=os.setsid)
+ else:
+ CTRexScenario.trex_crashed = True
+ # results = subprocess.Popen(cmd, stdout = open(os.devnull, 'wb'),
+ # shell=True, preexec_fn=os.setsid)
except KeyboardInterrupt:
print "\nT-Rex test interrupted by user during traffic generation!!"
results.killpg(results.pid, signal.SIGTERM) # Send the kill signal to all the process groups
@@ -174,7 +199,7 @@ class CTRexRunner:
sys.stderr.flush()
return None
else:
-
+
if tmp_path:
cmd = self.generate_fetch_cmd( tmp_path )#**kwargs)#results_file_path)
else:
@@ -198,7 +223,7 @@ class CTRexResult():
def __init__ (self, file, buffer = None):
self.file = file
self.buffer = buffer
- self.result = {}
+ self.result = {}
def load_file_lines (self):
@@ -230,7 +255,7 @@ class CTRexResult():
Parameters
----------
- key :
+ key :
Key of the self.result dictionary of the TRexResult instance
val : float
Key of the self.result dictionary of the TRexResult instance
@@ -240,8 +265,8 @@ class CTRexResult():
"""
s = _str.strip()
-
- if s[0]=="G":
+
+ if s[0]=="G":
val = val*1E9
elif s[0]=="M":
val = val*1E6
@@ -262,14 +287,14 @@ class CTRexResult():
def parse (self):
""" parse(self) -> None
- Parse the content of the result file from the TRex test and upload the data into
+ Parse the content of the result file from the TRex test and upload the data into
"""
stop_read = False
d = {
- 'total-tx' : 0,
- 'total-rx' : 0,
- 'total-pps' : 0,
- 'total-cps' : 0,
+ 'total-tx' : 0,
+ 'total-rx' : 0,
+ 'total-pps' : 0,
+ 'total-cps' : 0,
'expected-pps' : 0,
'expected-cps' : 0,
@@ -296,7 +321,7 @@ class CTRexResult():
# # continue to parse !! we try the second
# self.result[key] = val #update latest
- # check if we need to stop reading
+ # check if we need to stop reading
match = re.match(".*latency daemon has stopped.*", line)
if match:
stop_read = True
@@ -307,7 +332,7 @@ class CTRexResult():
key = misc_methods.mix_string(match.group(1))
val = float(match.group(4))
if d.has_key(key):
- if stop_read == False:
+ if stop_read == False:
self.update (key, val, match.group(5))
else:
self.result[key] = val # update latest
@@ -321,7 +346,7 @@ class CTRexResult():
key = misc_methods.mix_string(match.group(1))
val = float(match.group(4))
if d.has_key(key):
- if stop_read == False:
+ if stop_read == False:
self.update (key, val, match.group(5))
else:
self.result[key] = val # update latest
@@ -337,7 +362,7 @@ class CTRexResult():
match = re.match("\W*(\w(\w|[-])+)\W*([:]|[=])\W*(OK)(.*)", line)
if match:
key = misc_methods.mix_string(match.group(1))
- val = 0 # valid
+ val = 0 # valid
self.result[key] = val #update latest
continue
@@ -347,7 +372,7 @@ class CTRexResult():
val = float(match.group(3))
if self.result.has_key(key):
if (self.result[key] < val): # update only if larger than previous value
- self.result[key] = val
+ self.result[key] = val
else:
self.result[key] = val
continue
diff --git a/scripts/automation/regression/trex_unit_test.py b/scripts/automation/regression/trex_unit_test.py
index 1d75a8b6..c90d5bdc 100755
--- a/scripts/automation/regression/trex_unit_test.py
+++ b/scripts/automation/regression/trex_unit_test.py
@@ -34,14 +34,16 @@ import CustomLogger
import misc_methods
from rednose import RedNose
import termstyle
-from unit_tests.trex_general_test import CTRexScenario
+from trex import CTRexScenario
from client.trex_client import *
from common.trex_exceptions import *
+from trex_stl_lib.api import *
import trex
import socket
from pprint import pprint
import subprocess
import re
+import time
def check_trex_path(trex_path):
if os.path.isfile('%s/trex_daemon_server' % trex_path):
@@ -60,34 +62,44 @@ def get_trex_path():
raise Exception('Could not determine trex_under_test folder, try setting env.var. TREX_UNDER_TEST')
return latest_build_path
-def _start_stop_trex_remote_server(trex_data, command):
- # start t-rex server as daemon process
- # subprocess.call(["/usr/bin/python", "trex_daemon_server", "restart"], cwd = trex_latest_build)
- misc_methods.run_remote_command(trex_data['trex_name'],
- trex_data['trex_password'],
- command)
-
-def start_trex_remote_server(trex_data, kill_running = False):
- if kill_running:
- (return_code, stdout, stderr) = misc_methods.run_remote_command(trex_data['trex_name'],
- trex_data['trex_password'],
- 'ps -u root --format comm,pid,cmd | grep t-rex-64')
- if stdout:
- for process in stdout.split('\n'):
- try:
- proc_name, pid, full_cmd = re.split('\s+', process, maxsplit=2)
- if proc_name.find('t-rex-64') >= 0:
- print 'Killing remote process: %s' % full_cmd
- misc_methods.run_remote_command(trex_data['trex_name'],
- trex_data['trex_password'],
- 'kill %s' % pid)
- except:
- continue
-
- _start_stop_trex_remote_server(trex_data, DAEMON_START_COMMAND)
-
-def stop_trex_remote_server(trex_data):
- _start_stop_trex_remote_server(trex_data, DAEMON_STOP_COMMAND)
+STATEFUL_STOP_COMMAND = './trex_daemon_server stop; sleep 1; ./trex_daemon_server stop; sleep 1'
+STATEFUL_RUN_COMMAND = 'rm /var/log/trex/trex_daemon_server.log; ./trex_daemon_server start; sleep 2; ./trex_daemon_server show'
+TREX_FILES = ('_t-rex-64', '_t-rex-64-o', '_t-rex-64-debug', '_t-rex-64-debug-o')
+
+def trex_remote_command(trex_data, command, background = False, from_scripts = True):
+ if from_scripts:
+ return misc_methods.run_remote_command(trex_data['trex_name'], ('cd %s; ' % CTRexScenario.scripts_path)+ command, background)
+ return misc_methods.run_remote_command(trex_data['trex_name'], command, background)
+
+# 1 = running, 0 - not running
+def check_trex_running(trex_data):
+ commands = []
+ for filename in TREX_FILES:
+ commands.append('ps -C %s > /dev/null' % filename)
+ return_code, _, _ = trex_remote_command(trex_data, ' || '.join(commands), from_scripts = False)
+ return not return_code
+
+def kill_trex_process(trex_data):
+ return_code, stdout, _ = trex_remote_command(trex_data, 'ps -u root --format comm,pid,cmd | grep _t-rex-64 | grep -v grep || true', from_scripts = False)
+ assert return_code == 0, 'last remote command failed'
+ if stdout:
+ for process in stdout.split('\n'):
+ try:
+ proc_name, pid, full_cmd = re.split('\s+', process, maxsplit=2)
+ if proc_name.find('t-rex-64') >= 0:
+ print 'Killing remote process: %s' % full_cmd
+ trex_remote_command(trex_data, 'kill %s' % pid, from_scripts = False)
+ except:
+ continue
+
+def address_to_ip(address):
+ for i in range(10):
+ try:
+ return socket.gethostbyname(address)
+ except:
+ continue
+ return socket.gethostbyname(address)
+
class CTRexTestConfiguringPlugin(Plugin):
def options(self, parser, env = os.environ):
@@ -105,74 +117,124 @@ class CTRexTestConfiguringPlugin(Plugin):
dest='log_path',
help='Specify path for the tests` log to be saved at. Once applied, logs capturing by nose will be disabled.') # Default is CURRENT/WORKING/PATH/trex_log/trex_log.log')
parser.add_option('--verbose-mode', '--verbose_mode', action="store_true", default = False,
- dest="verbose_mode",
+ dest="verbose_mode",
help="Print RPC command and router commands.")
parser.add_option('--server-logs', '--server_logs', action="store_true", default = False,
- dest="server_logs",
+ dest="server_logs",
help="Print server side (TRex and trex_daemon) logs per test.")
parser.add_option('--kill-running', '--kill_running', action="store_true", default = False,
- dest="kill_running",
+ dest="kill_running",
help="Kills running TRex process on remote server (useful for regression).")
- parser.add_option('--functional', action="store_true", default = False,
- dest="functional",
- help="Don't connect to remote server for runnning daemon (For functional tests).")
- parser.add_option('--copy', action="store_true", default = False,
- dest="copy",
- help="Copy TRex server to temp directory and run from there.")
+ parser.add_option('--func', '--functional', action="store_true", default = False,
+ dest="functional",
+ help="Run functional tests.")
+ parser.add_option('--stl', '--stateless', action="store_true", default = False,
+ dest="stateless",
+ help="Run stateless tests.")
+ parser.add_option('--stf', '--stateful', action="store_true", default = False,
+ dest="stateful",
+ help="Run stateful tests.")
+ parser.add_option('--pkg', action="store",
+ dest="pkg",
+ help="Run with given TRex package. Make sure the path available at server machine.")
+ parser.add_option('--no-ssh', '--no_ssh', action="store_true", default = False,
+ dest="no_ssh",
+ help="Flag to disable any ssh to server machine.")
def configure(self, options, conf):
- self.functional = options.functional
self.collect_only = options.collect_only
- if self.functional or self.collect_only:
+ if self.collect_only:
+ return
+ self.functional = options.functional
+ self.stateless = options.stateless
+ self.stateful = options.stateful
+ self.pkg = options.pkg
+ self.no_ssh = options.no_ssh
+ self.verbose_mode = options.verbose_mode
+ if self.functional and (not self.pkg or self.no_ssh):
return
if CTRexScenario.setup_dir and options.config_path:
raise Exception('Please either define --cfg or use env. variable SETUP_DIR, not both.')
if not options.config_path and CTRexScenario.setup_dir:
options.config_path = CTRexScenario.setup_dir
- if options.config_path:
- self.configuration = misc_methods.load_complete_config_file(os.path.join(options.config_path, 'config.yaml'))
- self.benchmark = misc_methods.load_benchmark_config_file(os.path.join(options.config_path, 'benchmark.yaml'))
- self.enabled = True
- else:
+ if not options.config_path:
raise Exception('Please specify path to config.yaml using --cfg parameter or env. variable SETUP_DIR')
+ self.configuration = misc_methods.load_complete_config_file(os.path.join(options.config_path, 'config.yaml'))
+ self.configuration.trex['trex_name'] = address_to_ip(self.configuration.trex['trex_name']) # translate hostname to ip
+ self.benchmark = misc_methods.load_benchmark_config_file(os.path.join(options.config_path, 'benchmark.yaml'))
+ self.enabled = True
self.modes = self.configuration.trex.get('modes', [])
self.kill_running = options.kill_running
self.load_image = options.load_image
- self.verbose_mode = options.verbose_mode
self.clean_config = False if options.skip_clean_config else True
self.server_logs = options.server_logs
if options.log_path:
self.loggerPath = options.log_path
-
- def begin (self):
- if self.functional or self.collect_only:
- return
# initialize CTRexScenario global testing class, to be used by all tests
CTRexScenario.configuration = self.configuration
CTRexScenario.benchmark = self.benchmark
CTRexScenario.modes = set(self.modes)
CTRexScenario.server_logs = self.server_logs
- # launch TRex daemon on relevant setup
- start_trex_remote_server(self.configuration.trex, self.kill_running)
- CTRexScenario.trex = CTRexClient(trex_host = self.configuration.trex['trex_name'], verbose = self.verbose_mode)
+ def begin (self):
+ if self.pkg and not CTRexScenario.is_copied and not self.no_ssh:
+ new_path = '/tmp/trex-scripts'
+ rsync_template = 'rm -rf /tmp/trex-scripts; mkdir -p %s; rsync -Lc %s /tmp; tar -mxzf /tmp/%s -C %s; mv %s/v*.*/* %s'
+ rsync_command = rsync_template % (new_path, self.pkg, os.path.basename(self.pkg), new_path, new_path, new_path)
+ return_code, stdout, stderr = trex_remote_command(self.configuration.trex, rsync_command, from_scripts = False)
+ if return_code:
+ print 'Failed copying'
+ sys.exit(-1)
+ CTRexScenario.scripts_path = new_path
+ CTRexScenario.is_copied = True
+ if self.functional or self.collect_only:
+ return
+ # launch TRex daemon on relevant setup
+ if not self.no_ssh:
+ if self.kill_running:
+ if self.stateful:
+ trex_remote_command(self.configuration.trex, STATEFUL_STOP_COMMAND)
+ kill_trex_process(self.configuration.trex)
+ time.sleep(1)
+ elif check_trex_running(self.configuration.trex):
+ print 'TRex is already running'
+ sys.exit(-1)
+
+
+ if self.stateful:
+ if not self.no_ssh:
+ trex_remote_command(self.configuration.trex, STATEFUL_RUN_COMMAND)
+ CTRexScenario.trex = CTRexClient(trex_host = self.configuration.trex['trex_name'], verbose = self.verbose_mode)
+ elif self.stateless:
+ if not self.no_ssh:
+ trex_remote_command(self.configuration.trex, './t-rex-64 -i', background = True)
+ CTRexScenario.stl_trex = STLClient(username = 'TRexRegression',
+ server = self.configuration.trex['trex_name'],
+ verbose_level = self.verbose_mode)
if 'loopback' not in self.modes:
- CTRexScenario.router_cfg = dict( config_dict = self.configuration.router,
- forceImageReload = self.load_image,
- silent_mode = not self.verbose_mode,
- forceCleanConfig = self.clean_config,
- tftp_config_dict = self.configuration.tftp )
+ CTRexScenario.router_cfg = dict(config_dict = self.configuration.router,
+ forceImageReload = self.load_image,
+ silent_mode = not self.verbose_mode,
+ forceCleanConfig = self.clean_config,
+ tftp_config_dict = self.configuration.tftp)
try:
CustomLogger.setup_custom_logger('TRexLogger', self.loggerPath)
except AttributeError:
CustomLogger.setup_custom_logger('TRexLogger')
-
+
def finalize(self, result):
if self.functional or self.collect_only:
return
- CTRexScenario.is_init = False
- stop_trex_remote_server(self.configuration.trex)
+ CTRexScenario.is_init = False
+ if self.stateful:
+ CTRexScenario.trex = None
+ if self.stateless:
+ CTRexScenario.trex_stl = None
+ if not self.no_ssh:
+ if self.stateful:
+ trex_remote_command(self.configuration.trex, STATEFUL_STOP_COMMAND)
+ kill_trex_process(self.configuration.trex)
def save_setup_info():
@@ -195,102 +257,111 @@ def set_report_dir (report_dir):
if not os.path.exists(report_dir):
os.mkdir(report_dir)
-
if __name__ == "__main__":
-
+
# setting defaults. By default we run all the test suite
specific_tests = False
- disableLogCapture = False
- long_test = False
- xml_name = 'unit_test.xml'
CTRexScenario.report_dir = 'reports'
- CTRexScenario.scripts_path = get_trex_path()
- COMMON_RUN_COMMAND = 'rm /var/log/trex/trex_daemon_server.log; ./trex_daemon_server start; sleep 2; ./trex_daemon_server show'
- COMMON_STOP_COMMAND = './trex_daemon_server stop; sleep 1; ./trex_daemon_server stop; sleep 1'
- if '--copy' in sys.argv:
- new_path = '/tmp/trex_scripts'
- DAEMON_STOP_COMMAND = 'cd %s; %s' % (new_path, COMMON_STOP_COMMAND)
- DAEMON_START_COMMAND = 'mkdir -p %s; cd %s; %s; rsync -L -az %s/ %s; %s' % (new_path, new_path, COMMON_STOP_COMMAND,
- CTRexScenario.scripts_path, new_path, COMMON_RUN_COMMAND)
- else:
- DAEMON_STOP_COMMAND = 'cd %s; %s' % (CTRexScenario.scripts_path, COMMON_STOP_COMMAND)
- DAEMON_START_COMMAND = DAEMON_STOP_COMMAND + COMMON_RUN_COMMAND
-
+ need_to_copy = False
setup_dir = os.getenv('SETUP_DIR', '').rstrip('/')
CTRexScenario.setup_dir = check_setup_path(setup_dir)
+ CTRexScenario.scripts_path = get_trex_path()
if not CTRexScenario.setup_dir:
CTRexScenario.setup_dir = check_setup_path(os.path.join('setups', setup_dir))
-
- if CTRexScenario.setup_dir:
- CTRexScenario.setup_name = os.path.basename(CTRexScenario.setup_dir)
- xml_name = 'report_%s.xml' % CTRexScenario.setup_name
+
nose_argv = ['', '-s', '-v', '--exe', '--rednose', '--detailed-errors']
if '--collect-only' in sys.argv: # this is a user trying simply to view the available tests. no need xunit.
- CTRexScenario.is_test_list = True
+ CTRexScenario.is_test_list = True
+ xml_arg = ''
else:
- nose_argv += ['--with-xunit', '--xunit-file=%s/%s' % (CTRexScenario.report_dir, xml_name)]
+ xml_name = 'unit_test.xml'
+ if CTRexScenario.setup_dir:
+ CTRexScenario.setup_name = os.path.basename(CTRexScenario.setup_dir)
+ xml_name = 'report_%s.xml' % CTRexScenario.setup_name
+ xml_arg= '--xunit-file=%s/%s' % (CTRexScenario.report_dir, xml_name)
set_report_dir(CTRexScenario.report_dir)
+ sys_args = sys.argv[:]
for i, arg in enumerate(sys.argv):
- if 'unit_tests/' in arg:
- specific_tests = True
- sys.argv[i] = arg[arg.find('unit_tests/'):]
if 'log-path' in arg:
- disableLogCapture = True
-
- nose_argv += sys.argv
-
- # Run all of the unit tests or just the selected ones
- if not specific_tests:
- if '--functional' in sys.argv:
- nose_argv += ['unit_tests/functional_tests']
+ nose_argv += ['--nologcapture']
else:
- nose_argv += ['unit_tests']
- if disableLogCapture:
- nose_argv += ['--nologcapture']
+ for tests_type in CTRexScenario.test_types.keys():
+ if tests_type in arg:
+ specific_tests = True
+ CTRexScenario.test_types[tests_type].append(arg[arg.find(tests_type):])
+ sys_args.remove(arg)
+ if not specific_tests:
+ for key in ('--func', '--functional'):
+ if key in sys_args:
+ CTRexScenario.test_types['functional_tests'].append('functional_tests')
+ sys_args.remove(key)
+ for key in ('--stf', '--stateful'):
+ if key in sys_args:
+ CTRexScenario.test_types['stateful_tests'].append('stateful_tests')
+ sys_args.remove(key)
+ for key in ('--stl', '--stateless'):
+ if key in sys_args:
+ CTRexScenario.test_types['stateless_tests'].append('stateless_tests')
+ sys_args.remove(key)
+ # Run all of the tests or just the selected ones
+ if not sum([len(x) for x in CTRexScenario.test_types.values()]):
+ for key in CTRexScenario.test_types.keys():
+ CTRexScenario.test_types[key].append(key)
+
+ nose_argv += sys_args
+
+ config_plugin = CTRexTestConfiguringPlugin()
+ red_nose = RedNose()
+ result = True
try:
- config_plugin = CTRexTestConfiguringPlugin()
- red_nose = RedNose()
- try:
- result = nose.run(argv = nose_argv, addplugins = [red_nose, config_plugin])
- except socket.error: # handle consecutive tests exception, try once again
- print "TRex connectivity error identified. Possibly due to consecutive nightly runs.\nRetrying..."
- result = nose.run(argv = nose_argv, addplugins = [red_nose, config_plugin])
- finally:
- save_setup_info()
-
- if (result == True and not CTRexScenario.is_test_list):
- print termstyle.green("""
- ..::''''::..
- .;'' ``;.
- :: :: :: ::
- :: :: :: ::
- :: :: :: ::
- :: .:' :: :: `:. ::
- :: : : ::
- :: `:. .:' ::
- `;..``::::''..;'
- ``::,,,,::''
-
- ___ ___ __________
- / _ \/ _ | / __/ __/ /
- / ___/ __ |_\ \_\ \/_/
- /_/ /_/ |_/___/___(_)
-
- """)
- sys.exit(0)
- else:
- sys.exit(-1)
-
+ if len(CTRexScenario.test_types['functional_tests']):
+ additional_args = ['--func'] + CTRexScenario.test_types['functional_tests']
+ if xml_arg:
+ additional_args += ['--with-xunit', xml_arg.replace('.xml', '_functional.xml')]
+ result = nose.run(argv = nose_argv + additional_args, addplugins = [red_nose, config_plugin])
+ if len(CTRexScenario.test_types['stateful_tests']):
+ additional_args = ['--stf'] + CTRexScenario.test_types['stateful_tests']
+ if xml_arg:
+ additional_args += ['--with-xunit', xml_arg.replace('.xml', '_stateful.xml')]
+ result = result and nose.run(argv = nose_argv + additional_args, addplugins = [red_nose, config_plugin])
+ if len(CTRexScenario.test_types['stateless_tests']):
+ additional_args = ['--stl', 'stateless_tests/stl_general_test.py:STLBasic_Test.test_connectivity'] + CTRexScenario.test_types['stateless_tests']
+ if xml_arg:
+ additional_args += ['--with-xunit', xml_arg.replace('.xml', '_stateless.xml')]
+ result = result and nose.run(argv = nose_argv + additional_args, addplugins = [red_nose, config_plugin])
finally:
- pass
-
-
-
+ save_setup_info()
+
+ if (result == True and not CTRexScenario.is_test_list):
+ print termstyle.green("""
+ ..::''''::..
+ .;'' ``;.
+ :: :: :: ::
+ :: :: :: ::
+ :: :: :: ::
+ :: .:' :: :: `:. ::
+ :: : : ::
+ :: `:. .:' ::
+ `;..``::::''..;'
+ ``::,,,,::''
+
+ ___ ___ __________
+ / _ \/ _ | / __/ __/ /
+ / ___/ __ |_\ \_\ \/_/
+ /_/ /_/ |_/___/___(_)
+
+ """)
+ sys.exit(0)
+ sys.exit(-1)
+
+
+
+
+
-
diff --git a/scripts/automation/regression/unit_tests/__init__.py b/scripts/automation/regression/unit_tests/__init__.py
deleted file mode 100755
index 8b137891..00000000
--- a/scripts/automation/regression/unit_tests/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-