summaryrefslogtreecommitdiffstats
path: root/scripts/automation/regression/stateful_tests
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/automation/regression/stateful_tests')
-rwxr-xr-xscripts/automation/regression/stateful_tests/tests_exceptions.py48
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_client_pkg_test.py2
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_general_test.py47
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_imix_test.py41
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_ipv6_test.py17
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_nat_test.py31
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_nbar_test.py47
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_rx_test.py56
8 files changed, 145 insertions, 144 deletions
diff --git a/scripts/automation/regression/stateful_tests/tests_exceptions.py b/scripts/automation/regression/stateful_tests/tests_exceptions.py
index 604efcc8..360f44a5 100755
--- a/scripts/automation/regression/stateful_tests/tests_exceptions.py
+++ b/scripts/automation/regression/stateful_tests/tests_exceptions.py
@@ -1,37 +1,37 @@
#!/router/bin/python
class TRexInUseError(Exception):
- def __init__(self, value):
- self.value = value
- def __str__(self):
- return repr(self.value)
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
class TRexRunFailedError(Exception):
- def __init__(self, value):
- self.value = value
- def __str__(self):
- return repr(self.value)
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
class TRexIncompleteRunError(Exception):
- def __init__(self, value):
- self.value = value
- def __str__(self):
- return repr(self.value)
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
class TRexLowCpuUtilError(Exception):
- def __init__(self, value):
- self.value = value
- def __str__(self):
- return repr(self.value)
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
class AbnormalResultError(Exception):
- def __init__(self, value):
- self.value = value
- def __str__(self):
- return repr(self.value)
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
class ClassificationMissmatchError(Exception):
- def __init__(self, value):
- self.value = value
- def __str__(self):
- return repr(self.value)
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
diff --git a/scripts/automation/regression/stateful_tests/trex_client_pkg_test.py b/scripts/automation/regression/stateful_tests/trex_client_pkg_test.py
index e2040e73..4ad7fba3 100755
--- a/scripts/automation/regression/stateful_tests/trex_client_pkg_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_client_pkg_test.py
@@ -1,5 +1,5 @@
#!/router/bin/python
-from trex_general_test import CTRexGeneral_Test, CTRexScenario
+from .trex_general_test import CTRexGeneral_Test, CTRexScenario
from misc_methods import run_command
from nose.plugins.attrib import attr
diff --git a/scripts/automation/regression/stateful_tests/trex_general_test.py b/scripts/automation/regression/stateful_tests/trex_general_test.py
index 5a13e5ff..2e5adc40 100755
--- a/scripts/automation/regression/stateful_tests/trex_general_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_general_test.py
@@ -34,7 +34,7 @@ import os
from CPlatform import *
import termstyle
import threading
-from tests_exceptions import *
+from .tests_exceptions import *
from platform_cmd_link import *
import unittest
from glob import glob
@@ -77,11 +77,11 @@ class CTRexGeneral_Test(unittest.TestCase):
CTRexScenario.router.load_platform_data_from_file(device_cfg)
CTRexScenario.router.launch_connection(device_cfg)
running_image = CTRexScenario.router.get_running_image_details()['image']
- print 'Current router image: %s' % running_image
+ print('Current router image: %s' % running_image)
if CTRexScenario.router_cfg['forceImageReload']:
needed_image = device_cfg.get_image_name()
if not CTRexScenario.router.is_image_matches(needed_image):
- print 'Setting router image: %s' % needed_image
+ print('Setting router image: %s' % needed_image)
CTRexScenario.router.config_tftp_server(device_cfg)
CTRexScenario.router.load_platform_image(needed_image)
CTRexScenario.router.set_boot_image(needed_image)
@@ -91,14 +91,14 @@ class CTRexGeneral_Test(unittest.TestCase):
if not CTRexScenario.router.is_image_matches(needed_image):
self.fail('Unable to set router image: %s, current image is: %s' % (needed_image, running_image))
else:
- print 'Matches needed image: %s' % needed_image
+ print('Matches needed image: %s' % needed_image)
CTRexScenario.router_image = running_image
if self.modes:
- print termstyle.green('\t!!!\tRunning with modes: %s, not suitable tests will be skipped.\t!!!' % list(self.modes))
+ print(termstyle.green('\t!!!\tRunning with modes: %s, not suitable tests will be skipped.\t!!!' % list(self.modes)))
CTRexScenario.is_init = True
- print termstyle.green("Done instantiating T-Rex scenario!\n")
+ print(termstyle.green("Done instantiating T-Rex scenario!\n"))
# raise RuntimeError('CTRexScenario class is not initialized!')
self.router = CTRexScenario.router
@@ -138,7 +138,10 @@ class CTRexGeneral_Test(unittest.TestCase):
def check_CPU_benchmark (self, trex_res, err = 10, minimal_cpu = 30, maximal_cpu = 85):
#cpu_util = float(trex_res.get_last_value("trex-global.data.m_cpu_util"))
cpu_util = sum([float(x) for x in trex_res.get_value_list("trex-global.data.m_cpu_util")[-4:-1]]) / 3 # mean of 3 values before last
-
+
+ if '1G' in self.modes:
+ minimal_cpu /= 10
+
if not self.is_virt_nics:
if cpu_util > maximal_cpu:
self.fail("CPU is too high (%s%%), probably queue full." % cpu_util )
@@ -149,11 +152,11 @@ class CTRexGeneral_Test(unittest.TestCase):
trex_tx_bps = trex_res.get_last_value("trex-global.data.m_total_tx_bytes")
test_norm_cpu = 100.0*(trex_tx_bps/(cores*cpu_util))/1e6
- print "TRex CPU utilization: %g%%, norm_cpu is : %d Mb/core" % (round(cpu_util), int(test_norm_cpu))
+ print("TRex CPU utilization: %g%%, norm_cpu is : %d Mb/core" % (round(cpu_util), int(test_norm_cpu)))
#expected_norm_cpu = self.get_benchmark_param('cpu_to_core_ratio')
- #calc_error_precent = abs(100.0*(test_norm_cpu/expected_norm_cpu)-100.0)
+ #calc_error_precent = abs(100.0*(test_norm_cpu/expected_norm_cpu)-100.0)
# if calc_error_precent > err:
# msg ='Normalized bandwidth to CPU utilization ratio is %2.0f Mb/core expected %2.0f Mb/core more than %2.0f %% - ERROR' % (test_norm_cpu, expected_norm_cpu, err)
@@ -250,8 +253,8 @@ class CTRexGeneral_Test(unittest.TestCase):
# check for trex-router packet consistency
# TODO: check if it's ok
- print 'router drop stats: %s' % pkt_drop_stats
- print 'TRex drop stats: %s' % trex_drops
+ print('router drop stats: %s' % pkt_drop_stats)
+ print('TRex drop stats: %s' % trex_drops)
#self.assertEqual(pkt_drop_stats, trex_drops, "TRex's and router's drop stats don't match.")
except KeyError as e:
@@ -279,12 +282,12 @@ class CTRexGeneral_Test(unittest.TestCase):
# We encountered error, don't fail the test immediately
def fail(self, reason = 'Unknown error'):
- print 'Error: %s' % reason
+ print('Error: %s' % reason)
self.fail_reasons.append(reason)
# skip running of the test, counts as 'passed' but prints 'skipped'
def skip(self, message = 'Unknown reason'):
- print 'Skip: %s' % message
+ print('Skip: %s' % message)
self.skipping = True
raise SkipTest(message)
@@ -297,10 +300,10 @@ class CTRexGeneral_Test(unittest.TestCase):
if test_setup_modes_conflict:
self.skip("The test can't run with following modes of given setup: %s " % test_setup_modes_conflict)
if self.trex and not self.trex.is_idle():
- print 'Warning: TRex is not idle at setUp, trying to stop it.'
+ print('Warning: TRex is not idle at setUp, trying to stop it.')
self.trex.force_kill(confirm = False)
if not self.is_loopback:
- print ''
+ print('')
if self.trex: # stateful
self.router.load_clean_config()
self.router.clear_counters()
@@ -316,24 +319,24 @@ class CTRexGeneral_Test(unittest.TestCase):
# assert CTRexScenario.is_init == True
def tearDown(self):
if self.trex and not self.trex.is_idle():
- print 'Warning: TRex is not idle at tearDown, trying to stop it.'
+ print('Warning: TRex is not idle at tearDown, trying to stop it.')
self.trex.force_kill(confirm = False)
if not self.skipping:
# print server logs of test run
if self.trex and CTRexScenario.server_logs:
try:
- print termstyle.green('\n>>>>>>>>>>>>>>> Daemon log <<<<<<<<<<<<<<<')
+ print(termstyle.green('\n>>>>>>>>>>>>>>> Daemon log <<<<<<<<<<<<<<<'))
daemon_log = self.trex.get_trex_daemon_log()
log_size = len(daemon_log)
- print ''.join(daemon_log[CTRexScenario.daemon_log_lines:])
+ print(''.join(daemon_log[CTRexScenario.daemon_log_lines:]))
CTRexScenario.daemon_log_lines = log_size
except Exception as e:
- print "Can't get TRex daemon log:", e
+ print("Can't get TRex daemon log:", e)
try:
- print termstyle.green('>>>>>>>>>>>>>>>> Trex log <<<<<<<<<<<<<<<<')
- print ''.join(self.trex.get_trex_log())
+ print(termstyle.green('>>>>>>>>>>>>>>>> Trex log <<<<<<<<<<<<<<<<'))
+ print(''.join(self.trex.get_trex_log()))
except Exception as e:
- print "Can't get TRex log:", e
+ print("Can't get TRex log:", e)
if len(self.fail_reasons):
raise Exception('The test is failed, reasons:\n%s' % '\n'.join(self.fail_reasons))
diff --git a/scripts/automation/regression/stateful_tests/trex_imix_test.py b/scripts/automation/regression/stateful_tests/trex_imix_test.py
index c93480c3..95a5471d 100755
--- a/scripts/automation/regression/stateful_tests/trex_imix_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_imix_test.py
@@ -1,7 +1,7 @@
#!/router/bin/python
-from trex_general_test import CTRexGeneral_Test
+from .trex_general_test import CTRexGeneral_Test
from CPlatform import CStaticRouteConfig
-from tests_exceptions import *
+from .tests_exceptions import *
#import sys
import time
from nose.tools import nottest
@@ -11,7 +11,6 @@ class CTRexIMIX_Test(CTRexGeneral_Test):
def __init__(self, *args, **kwargs):
# super(CTRexIMIX_Test, self).__init__()
CTRexGeneral_Test.__init__(self, *args, **kwargs)
- pass
def setUp(self):
super(CTRexIMIX_Test, self).setUp() # launch super test class setUp process
@@ -43,8 +42,8 @@ class CTRexIMIX_Test(CTRexGeneral_Test):
# trex_res is a CTRexResult instance- and contains the summary of the test results
# you may see all the results keys by simply calling here for 'print trex_res.result'
- print ("\nLATEST RESULT OBJECT:")
- print trex_res
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
self.check_general_scenario_results(trex_res)
self.check_CPU_benchmark(trex_res)
@@ -62,7 +61,7 @@ class CTRexIMIX_Test(CTRexGeneral_Test):
trex_development = True)
trex_res = self.trex.sample_to_run_finish()
- print trex_res
+ print(trex_res)
def test_routing_imix (self):
# test initializtion
@@ -87,8 +86,8 @@ class CTRexIMIX_Test(CTRexGeneral_Test):
# trex_res is a CTRexResult instance- and contains the summary of the test results
# you may see all the results keys by simply calling here for 'print trex_res.result'
- print ("\nLATEST RESULT OBJECT:")
- print trex_res
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
self.check_general_scenario_results(trex_res)
@@ -123,10 +122,10 @@ class CTRexIMIX_Test(CTRexGeneral_Test):
# trex_res is a CTRexResult instance- and contains the summary of the test results
# you may see all the results keys by simply calling here for 'print trex_res.result'
- print ("\nLATEST RESULT OBJECT:")
- print trex_res
- print ("\nLATEST DUMP:")
- print trex_res.get_latest_dump()
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ print("\nLATEST DUMP:")
+ print(trex_res.get_latest_dump())
self.check_general_scenario_results(trex_res)
self.check_CPU_benchmark(trex_res)
@@ -157,15 +156,15 @@ class CTRexIMIX_Test(CTRexGeneral_Test):
# trex_res is a CTRexResults instance- and contains the summary of the test results
# you may see all the results keys by simply calling here for 'print trex_res.result'
- print ("\nLATEST RESULT OBJECT:")
- print trex_res
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
self.check_general_scenario_results(trex_res)
self.check_CPU_benchmark(trex_res)
- def test_jumbo(self, duration = 100):
+ def test_jumbo(self, duration = 100, **kwargs):
if not self.is_loopback:
self.router.configure_basic_interfaces(mtu = 9216)
self.router.config_pbr(mode = "config")
@@ -180,14 +179,15 @@ class CTRexIMIX_Test(CTRexGeneral_Test):
nc = True,
d = duration,
f = 'cap2/imix_9k.yaml',
- l = 1000)
+ l = 1000,
+ **kwargs)
trex_res = self.trex.sample_to_run_finish()
# trex_res is a CTRexResults instance- and contains the summary of the test results
# you may see all the results keys by simply calling here for 'print trex_res.result'
- print ("\nLATEST RESULT OBJECT:")
- print trex_res
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
self.check_general_scenario_results(trex_res)
self.check_CPU_benchmark(trex_res, minimal_cpu = 0, maximal_cpu = 10)
@@ -197,9 +197,12 @@ class CTRexIMIX_Test(CTRexGeneral_Test):
def test_warm_up(self):
try:
self._testMethodName = 'test_jumbo'
- self.test_jumbo(duration = 30)
+ self.test_jumbo(duration = 5, trex_development = True)
except Exception as e:
print('Ignoring this error: %s' % e)
+ if self.fail_reasons:
+ print('Ignoring this error(s):\n%s' % '\n'.join(self.fail_reasons))
+ self.fail_reasons = []
def tearDown(self):
CTRexGeneral_Test.tearDown(self)
diff --git a/scripts/automation/regression/stateful_tests/trex_ipv6_test.py b/scripts/automation/regression/stateful_tests/trex_ipv6_test.py
index bffb4754..6aba9ae0 100755
--- a/scripts/automation/regression/stateful_tests/trex_ipv6_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_ipv6_test.py
@@ -1,14 +1,13 @@
#!/router/bin/python
-from trex_general_test import CTRexGeneral_Test
-from tests_exceptions import *
+from .trex_general_test import CTRexGeneral_Test
+from .tests_exceptions import *
import time
from nose.tools import assert_equal
class CTRexIPv6_Test(CTRexGeneral_Test):
"""This class defines the IPv6 testcase of the T-Rex traffic generator"""
def __init__(self, *args, **kwargs):
- super(CTRexIPv6_Test, self).__init__(*args, **kwargs)
- pass
+ super(CTRexIPv6_Test, self).__init__(*args, **kwargs)
def setUp(self):
super(CTRexIPv6_Test, self).setUp() # launch super test class setUp process
@@ -43,8 +42,8 @@ class CTRexIPv6_Test(CTRexGeneral_Test):
# trex_res is a CTRexResult instance- and contains the summary of the test results
# you may see all the results keys by simply calling here for 'print trex_res.result'
- print ("\nLATEST RESULT OBJECT:")
- print trex_res
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
self.check_general_scenario_results(trex_res)
@@ -80,8 +79,8 @@ class CTRexIPv6_Test(CTRexGeneral_Test):
# trex_res is a CTRexResult instance- and contains the summary of the test results
# you may see all the results keys by simply calling here for 'print trex_res.result'
- print ("\nLATEST RESULT OBJECT:")
- print trex_res
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
trex_tx_pckt = float(trex_res.get_last_value("trex-global.data.m_total_tx_pkts"))
trex_drops = int(trex_res.get_total_drops())
@@ -95,7 +94,7 @@ class CTRexIPv6_Test(CTRexGeneral_Test):
def tearDown(self):
CTRexGeneral_Test.tearDown(self)
- # remove config here
+ # remove config here
pass
if __name__ == "__main__":
diff --git a/scripts/automation/regression/stateful_tests/trex_nat_test.py b/scripts/automation/regression/stateful_tests/trex_nat_test.py
index e7fe5ca5..512ad4e4 100755
--- a/scripts/automation/regression/stateful_tests/trex_nat_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_nat_test.py
@@ -1,6 +1,6 @@
#!/router/bin/python
-from trex_general_test import CTRexGeneral_Test
-from tests_exceptions import *
+from .trex_general_test import CTRexGeneral_Test
+from .tests_exceptions import *
import time
from CPlatform import CStaticRouteConfig, CNatConfig
from nose.tools import assert_equal
@@ -9,13 +9,11 @@ from nose.tools import assert_equal
class CTRexNoNat_Test(CTRexGeneral_Test):#(unittest.TestCase):
"""This class defines the NAT testcase of the T-Rex traffic generator"""
def __init__(self, *args, **kwargs):
- super(CTRexNoNat_Test, self).__init__(*args, **kwargs)
+ super(CTRexNoNat_Test, self).__init__(*args, **kwargs)
self.unsupported_modes = ['loopback'] # NAT requires device
- pass
def setUp(self):
super(CTRexNoNat_Test, self).setUp() # launch super test class setUp process
- pass
def check_nat_stats (self, nat_stats):
pass
@@ -46,10 +44,10 @@ class CTRexNoNat_Test(CTRexGeneral_Test):#(unittest.TestCase):
trex_res = self.trex.sample_to_run_finish()
- print ("\nLATEST RESULT OBJECT:")
- print trex_res
- print ("\nLATEST DUMP:")
- print trex_res.get_latest_dump()
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ print("\nLATEST DUMP:")
+ print(trex_res.get_latest_dump())
expected_nat_opened = self.get_benchmark_param('nat_opened')
@@ -77,9 +75,8 @@ class CTRexNoNat_Test(CTRexGeneral_Test):#(unittest.TestCase):
class CTRexNat_Test(CTRexGeneral_Test):#(unittest.TestCase):
"""This class defines the NAT testcase of the T-Rex traffic generator"""
def __init__(self, *args, **kwargs):
- super(CTRexNat_Test, self).__init__(*args, **kwargs)
+ super(CTRexNat_Test, self).__init__(*args, **kwargs)
self.unsupported_modes = ['loopback'] # NAT requires device
- pass
def setUp(self):
super(CTRexNat_Test, self).setUp() # launch super test class setUp process
@@ -124,10 +121,10 @@ class CTRexNat_Test(CTRexGeneral_Test):#(unittest.TestCase):
trex_res = self.trex.sample_to_run_finish()
- print ("\nLATEST RESULT OBJECT:")
- print trex_res
- print ("\nLATEST DUMP:")
- print trex_res.get_latest_dump()
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ print("\nLATEST DUMP:")
+ print(trex_res.get_latest_dump())
trex_nat_stats = trex_res.get_last_value("trex-global.data", ".*nat.*") # extract all nat data
if self.get_benchmark_param('allow_timeout_dev'):
@@ -153,7 +150,7 @@ class CTRexNat_Test(CTRexGeneral_Test):#(unittest.TestCase):
# raiseraise AbnormalResultError('Normalized bandwidth to CPU utilization ratio exceeds 3%')
nat_stats = self.router.get_nat_stats()
- print nat_stats
+ print(nat_stats)
self.assert_gt(nat_stats['total_active_trans'], 5000, 'total active translations is not high enough')
self.assert_gt(nat_stats['dynamic_active_trans'], 5000, 'total dynamic active translations is not high enough')
@@ -161,8 +158,8 @@ class CTRexNat_Test(CTRexGeneral_Test):#(unittest.TestCase):
self.assert_gt(nat_stats['num_of_hits'], 50000, 'total nat hits is not high enough')
def tearDown(self):
- CTRexGeneral_Test.tearDown(self)
self.router.clear_nat_translations()
+ CTRexGeneral_Test.tearDown(self)
if __name__ == "__main__":
diff --git a/scripts/automation/regression/stateful_tests/trex_nbar_test.py b/scripts/automation/regression/stateful_tests/trex_nbar_test.py
index 1453c02b..69c3f605 100755
--- a/scripts/automation/regression/stateful_tests/trex_nbar_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_nbar_test.py
@@ -1,6 +1,6 @@
#!/router/bin/python
-from trex_general_test import CTRexGeneral_Test
-from tests_exceptions import *
+from .trex_general_test import CTRexGeneral_Test
+from .tests_exceptions import *
from interfaces_e import IFType
from nose.tools import nottest
from misc_methods import print_r
@@ -8,9 +8,8 @@ from misc_methods import print_r
class CTRexNbar_Test(CTRexGeneral_Test):
"""This class defines the NBAR testcase of the T-Rex traffic generator"""
def __init__(self, *args, **kwargs):
- super(CTRexNbar_Test, self).__init__(*args, **kwargs)
+ super(CTRexNbar_Test, self).__init__(*args, **kwargs)
self.unsupported_modes = ['loopback'] # obviously no NBar in loopback
- pass
def setUp(self):
super(CTRexNbar_Test, self).setUp() # launch super test class setUp process
@@ -21,8 +20,8 @@ class CTRexNbar_Test(CTRexGeneral_Test):
def match_classification (self):
nbar_benchmark = self.get_benchmark_param("nbar_classification")
test_classification = self.router.get_nbar_stats()
- print "TEST CLASSIFICATION:"
- print test_classification
+ print("TEST CLASSIFICATION:")
+ print(test_classification)
missmatchFlag = False
missmatchMsg = "NBAR classification contians a missmatch on the following protocols:"
fmt = '\n\t{0:15} | Expected: {1:>3.2f}%, Got: {2:>3.2f}%'
@@ -31,7 +30,7 @@ class CTRexNbar_Test(CTRexGeneral_Test):
for cl_intf in self.router.get_if_manager().get_if_list(if_type = IFType.Client):
client_intf = cl_intf.get_name()
- for protocol, bench in nbar_benchmark.iteritems():
+ for protocol, bench in nbar_benchmark.items():
if protocol != 'total':
try:
bench = float(bench)
@@ -44,11 +43,11 @@ class CTRexNbar_Test(CTRexGeneral_Test):
missmatchMsg += fmt.format(protocol, bench, protocol_test_res)
except KeyError as e:
missmatchFlag = True
- print e
- print "Changes missmatchFlag to True. ", "\n\tProtocol {0} isn't part of classification results on interface {intf}".format( protocol, intf = client_intf )
+ print(e)
+ print("Changes missmatchFlag to True. ", "\n\tProtocol {0} isn't part of classification results on interface {intf}".format( protocol, intf = client_intf ))
missmatchMsg += "\n\tProtocol {0} isn't part of classification results on interface {intf}".format( protocol, intf = client_intf )
except ZeroDivisionError as e:
- print "ZeroDivisionError: %s" % protocol
+ print("ZeroDivisionError: %s" % protocol)
pass
if missmatchFlag:
self.fail(missmatchMsg)
@@ -78,10 +77,10 @@ class CTRexNbar_Test(CTRexGeneral_Test):
# trex_res is a CTRexResult instance- and contains the summary of the test results
# you may see all the results keys by simply calling here for 'print trex_res.result'
- print ("\nLATEST RESULT OBJECT:")
- print trex_res
- print ("\nLATEST DUMP:")
- print trex_res.get_latest_dump()
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ print("\nLATEST DUMP:")
+ print(trex_res.get_latest_dump())
self.check_general_scenario_results(trex_res, check_latency = False) # NBAR can cause latency
@@ -89,16 +88,16 @@ class CTRexNbar_Test(CTRexGeneral_Test):
trex_tx_pckt = trex_res.get_last_value("trex-global.data.m_total_tx_pkts")
cpu_util = trex_res.get_last_value("trex-global.data.m_cpu_util")
cpu_util_hist = trex_res.get_value_list("trex-global.data.m_cpu_util")
- print "cpu util is:", cpu_util
- print cpu_util_hist
+ print("cpu util is:", cpu_util)
+ print(cpu_util_hist)
test_norm_cpu = 2 * trex_tx_pckt / (core * cpu_util)
- print "test_norm_cpu is:", test_norm_cpu
+ print("test_norm_cpu is:", test_norm_cpu)
if self.get_benchmark_param('cpu2core_custom_dev'):
# check this test by custom deviation
deviation_compare_value = self.get_benchmark_param('cpu2core_dev')
- print "Comparing test with custom deviation value- {dev_val}%".format( dev_val = int(deviation_compare_value*100) )
+ print("Comparing test with custom deviation value- {dev_val}%".format( dev_val = int(deviation_compare_value*100) ))
# need to be fixed !
#if ( abs((test_norm_cpu/self.get_benchmark_param('cpu_to_core_ratio')) - 1) > deviation_compare_value):
@@ -134,10 +133,10 @@ class CTRexNbar_Test(CTRexGeneral_Test):
# trex_res is a CTRexResult instance- and contains the summary of the test results
# you may see all the results keys by simply calling here for 'print trex_res.result'
- print ("\nLATEST RESULT OBJECT:")
- print trex_res
- print ("\nLATEST DUMP:")
- print trex_res.get_latest_dump()
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ print("\nLATEST DUMP:")
+ print(trex_res.get_latest_dump())
self.check_general_scenario_results(trex_res)
@@ -170,8 +169,8 @@ class CTRexNbar_Test(CTRexGeneral_Test):
# trex_res is a CTRexResult instance- and contains the summary of the test results
# you may see all the results keys by simply calling here for 'print trex_res.result'
- print ("\nLATEST RESULT OBJECT:")
- print trex_res
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
self.check_general_scenario_results(trex_res, check_latency = False)
diff --git a/scripts/automation/regression/stateful_tests/trex_rx_test.py b/scripts/automation/regression/stateful_tests/trex_rx_test.py
index 37b1c722..a6cc4bc6 100755
--- a/scripts/automation/regression/stateful_tests/trex_rx_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_rx_test.py
@@ -1,7 +1,7 @@
#!/router/bin/python
-from trex_general_test import CTRexGeneral_Test
+from .trex_general_test import CTRexGeneral_Test
from CPlatform import CStaticRouteConfig, CNatConfig
-from tests_exceptions import *
+from .tests_exceptions import *
#import sys
import time
import copy
@@ -13,11 +13,9 @@ class CTRexRx_Test(CTRexGeneral_Test):
def __init__(self, *args, **kwargs):
CTRexGeneral_Test.__init__(self, *args, **kwargs)
self.unsupported_modes = ['virt_nics'] # TODO: fix
- pass
def setUp(self):
CTRexGeneral_Test.setUp(self)
- pass
def check_rx_errors(self, trex_res, allow_error_tolerance = True):
@@ -54,13 +52,14 @@ class CTRexRx_Test(CTRexGeneral_Test):
path = 'rx-check.data.stats.m_total_rx'
total_rx = trex_res.get_last_value(path)
- if not total_rx:
+ if total_rx is None:
raise AbnormalResultError('No TRex results by path: %s' % path)
+ elif not total_rx:
+ raise AbnormalResultError('Total rx_check (%s) packets is zero.' % path)
-
- print 'Total packets checked: %s' % total_rx
- print 'Latency counters: %s' % latency_counters_display
- print 'rx_check counters: %s' % rx_counters
+ print('Total packets checked: %s' % total_rx)
+ print('Latency counters: %s' % latency_counters_display)
+ print('rx_check counters: %s' % rx_counters)
except KeyError as e:
self.fail('Expected key in TRex result was not found.\n%s' % traceback.print_exc())
@@ -77,11 +76,11 @@ class CTRexRx_Test(CTRexGeneral_Test):
if self.is_loopback or error_percentage > error_tolerance:
self.fail('Too much errors in rx_check. (~%s%% of traffic)' % error_percentage)
else:
- print 'There are errors in rx_check (%f%%), not exceeding allowed limit (%s%%)' % (error_percentage, error_tolerance)
+ print('There are errors in rx_check (%f%%), not exceeding allowed limit (%s%%)' % (error_percentage, error_tolerance))
else:
- print 'No errors in rx_check.'
+ print('No errors in rx_check.')
except Exception as e:
- print traceback.print_exc()
+ print(traceback.print_exc())
self.fail('Errors in rx_check: %s' % e)
def test_rx_check_sfr(self):
@@ -108,8 +107,8 @@ class CTRexRx_Test(CTRexGeneral_Test):
trex_res = self.trex.sample_to_run_finish()
- print ("\nLATEST RESULT OBJECT:")
- print trex_res
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
#print ("\nLATEST DUMP:")
#print trex_res.get_latest_dump()
@@ -144,8 +143,8 @@ class CTRexRx_Test(CTRexGeneral_Test):
trex_res = self.trex.sample_to_run_finish()
- print ("\nLATEST RESULT OBJECT:")
- print trex_res
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
self.check_general_scenario_results(trex_res)
self.check_CPU_benchmark(trex_res)
@@ -176,8 +175,8 @@ class CTRexRx_Test(CTRexGeneral_Test):
trex_res = self.trex.sample_to_run_finish()
- print ("\nLATEST RESULT OBJECT:")
- print trex_res
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
#print ("\nLATEST DUMP:")
#print trex_res.get_latest_dump()
@@ -210,8 +209,8 @@ class CTRexRx_Test(CTRexGeneral_Test):
trex_res = self.trex.sample_to_run_finish()
- print ("\nLATEST RESULT OBJECT:")
- print trex_res
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
self.check_general_scenario_results(trex_res)
self.check_CPU_benchmark(trex_res)
@@ -241,15 +240,15 @@ class CTRexRx_Test(CTRexGeneral_Test):
learn_verify = True,
l_pkt_mode = 2)
- print 'Run for 40 seconds, expect no errors'
+ print('Run for 40 seconds, expect no errors')
trex_res = self.trex.sample_x_seconds(40)
- print ("\nLATEST RESULT OBJECT:")
- print trex_res
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
self.check_general_scenario_results(trex_res)
self.check_CPU_benchmark(trex_res)
self.check_rx_errors(trex_res)
- print 'Run until finish, expect errors'
+ print('Run until finish, expect errors')
old_errors = copy.deepcopy(self.fail_reasons)
nat_dict = self.get_benchmark_param('nat_dict', test_name = 'test_nat_simple')
nat_obj = CNatConfig(nat_dict)
@@ -257,14 +256,15 @@ class CTRexRx_Test(CTRexGeneral_Test):
self.router.config_zbf()
trex_res = self.trex.sample_to_run_finish()
self.router.config_no_zbf()
- self.router.clear_nat_translations()
- print ("\nLATEST RESULT OBJECT:")
- print trex_res
+ self.router.config_no_nat(nat_obj)
+ #self.router.clear_nat_translations()
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
self.check_rx_errors(trex_res, allow_error_tolerance = False)
if self.fail_reasons == old_errors:
self.fail('Expected errors here, got none.')
else:
- print 'Got errors as expected.'
+ print('Got errors as expected.')
self.fail_reasons = old_errors
def tearDown(self):