summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHanoh Haim <hhaim@cisco.com>2016-01-20 15:32:22 +0200
committerHanoh Haim <hhaim@cisco.com>2016-01-20 15:32:22 +0200
commit1d8c0a97f418555058efd9b149b1fec0cb808eb7 (patch)
tree0b0469282684d5d97f8aca477f69c3d60d29ef4b
parentfbbbb8fe6025da27ee3d703c3803e07cb7ecfd51 (diff)
parent0798c924dfcdfcf0c02ae74136013daa5d7d16f4 (diff)
merge
-rw-r--r--.gitignore4
-rwxr-xr-xscripts/automation/regression/aggregate_results.py9
-rw-r--r--scripts/automation/regression/setups/kiwi02/benchmark.yaml5
-rw-r--r--scripts/automation/regression/setups/trex-dan/benchmark.yaml5
-rw-r--r--scripts/automation/regression/setups/trex14/benchmark.yaml7
-rwxr-xr-xscripts/automation/regression/unit_tests/functional_tests/pkt_builder_test.py8
-rwxr-xr-xscripts/automation/regression/unit_tests/functional_tests/test.pcapbin346 -> 0 bytes
-rwxr-xr-xscripts/automation/regression/unit_tests/functional_tests/test2.pcapbin93 -> 0 bytes
-rwxr-xr-xscripts/automation/regression/unit_tests/functional_tests/test_cmp.pcapbin346 -> 0 bytes
-rwxr-xr-xscripts/automation/regression/unit_tests/trex_nat_test.py10
-rwxr-xr-xscripts/automation/regression/unit_tests/trex_rx_test.py20
11 files changed, 43 insertions, 25 deletions
diff --git a/.gitignore b/.gitignore
index a2bef60f..68c9f5e6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -101,3 +101,7 @@ Thumbs.db
*.vpw
*.vtg
*.vpwhist
+
+# Regression #
+##############
+scripts/automation/regression/reports/*
diff --git a/scripts/automation/regression/aggregate_results.py b/scripts/automation/regression/aggregate_results.py
index 71c4c9f8..0ef3b5af 100755
--- a/scripts/automation/regression/aggregate_results.py
+++ b/scripts/automation/regression/aggregate_results.py
@@ -18,9 +18,12 @@ def pad_tag(text, tag):
return '<%s>%s</%s>' % (tag, text, tag)
def is_functional_test_name(testname):
- if testname.startswith('platform_') or testname.startswith('misc_methods_'):
- return True
- return False
+ #if testname.startswith(('platform_', 'misc_methods_', 'vm_', 'payload_gen_', 'pkt_builder_')):
+ # return True
+ #return False
+ if testname.startswith('unit_tests.'):
+ return False
+ return True
def is_good_status(text):
return text in ('Successful', 'Fixed', 'Passed', 'True', 'Pass')
diff --git a/scripts/automation/regression/setups/kiwi02/benchmark.yaml b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
index ccd566cc..936af03a 100644
--- a/scripts/automation/regression/setups/kiwi02/benchmark.yaml
+++ b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
@@ -36,7 +36,7 @@ test_rx_check :
exp_bw : 1
exp_latency : 1
-test_nat_simple :
+test_nat_simple : &test_nat_simple
stat_route_dict :
clients_start : 16.0.0.1
servers_start : 48.0.0.1
@@ -56,6 +56,9 @@ test_nat_simple :
exp_latency : 1
allow_timeout_dev : YES
+test_nat_simple_mode1 : *test_nat_simple
+test_nat_simple_mode2 : *test_nat_simple
+
test_nat_learning :
stat_route_dict :
clients_start : 16.0.0.1
diff --git a/scripts/automation/regression/setups/trex-dan/benchmark.yaml b/scripts/automation/regression/setups/trex-dan/benchmark.yaml
index ae814551..3232dfec 100644
--- a/scripts/automation/regression/setups/trex-dan/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex-dan/benchmark.yaml
@@ -36,7 +36,7 @@ test_rx_check :
exp_bw : 1
exp_latency : 1
-test_nat_simple :
+test_nat_simple : &test_nat_simple
stat_route_dict :
clients_start : 16.0.0.1
servers_start : 48.0.0.1
@@ -56,6 +56,9 @@ test_nat_simple :
exp_latency : 1
allow_timeout_dev : YES
+test_nat_simple_mode1 : *test_nat_simple
+test_nat_simple_mode2 : *test_nat_simple
+
test_nat_learning :
stat_route_dict :
clients_start : 16.0.0.1
diff --git a/scripts/automation/regression/setups/trex14/benchmark.yaml b/scripts/automation/regression/setups/trex14/benchmark.yaml
index b0e4f31d..4870eefe 100644
--- a/scripts/automation/regression/setups/trex14/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex14/benchmark.yaml
@@ -36,7 +36,7 @@ test_rx_check :
exp_bw : 13
exp_latency : 1
-test_nat_simple :
+test_nat_simple : &test_nat_simple
stat_route_dict :
clients_start : 16.0.0.1
servers_start : 48.0.0.1
@@ -56,6 +56,9 @@ test_nat_simple :
exp_latency : 1
allow_timeout_dev : YES
+test_nat_simple_mode1 : *test_nat_simple
+test_nat_simple_mode2 : *test_nat_simple
+
test_nat_learning :
stat_route_dict :
clients_start : 16.0.0.1
@@ -147,6 +150,8 @@ test_rx_check_http_negative:
multiplier : 13000
cores : 1
rx_sample_rate : 16
+ # allow 0.03% errors, bad router
+ error_tolerance : 0.03
stat_route_dict :
clients_start : 16.0.0.1
servers_start : 48.0.0.1
diff --git a/scripts/automation/regression/unit_tests/functional_tests/pkt_builder_test.py b/scripts/automation/regression/unit_tests/functional_tests/pkt_builder_test.py
index fd157c8a..96393d1e 100755
--- a/scripts/automation/regression/unit_tests/functional_tests/pkt_builder_test.py
+++ b/scripts/automation/regression/unit_tests/functional_tests/pkt_builder_test.py
@@ -188,7 +188,7 @@ class CTRexPktBuilder_Test(pkt_bld_general_test.CGeneralPktBld_Test):
def test_get_layer(self):
assert_equal(self.pkt_bld.get_layer('no_such_layer'), None)
assert(not(self.pkt_bld.get_layer('l2') is self.pkt_bld._packet))
- assert(type(self.pkt_bld.get_layer('l2')).__name__, "ethernet")
+ assert_equal(type(self.pkt_bld.get_layer('l2')).__name__, "Ethernet")
def test_dump_to_pcap(self):
# set Ethernet layer attributes
@@ -212,11 +212,11 @@ class CTRexPktBuilder_Test(pkt_bld_general_test.CGeneralPktBld_Test):
# finally, set IP header len with relation to payload data
self.pkt_bld.set_layer_attr("l3_ip", "len", len(self.pkt_bld.get_layer('l3_ip')))
- filepath = "unit_tests/functional_tests/test.pcap"
+ filepath = "reports/test.pcap"
self.pkt_bld.dump_pkt_to_pcap(filepath)
assert os.path.isfile(filepath)
- # remove pcap after creation - masked for now
- # os.remove(filepath)
+ # remove pcap after ensuring it exists
+ os.remove(filepath)
filepath = "/not/a/valid/path/test.pcap"
assert_raises(IOError, self.pkt_bld.dump_pkt_to_pcap, filepath)
# check that dump is not available for empty packet
diff --git a/scripts/automation/regression/unit_tests/functional_tests/test.pcap b/scripts/automation/regression/unit_tests/functional_tests/test.pcap
deleted file mode 100755
index 131b7883..00000000
--- a/scripts/automation/regression/unit_tests/functional_tests/test.pcap
+++ /dev/null
Binary files differ
diff --git a/scripts/automation/regression/unit_tests/functional_tests/test2.pcap b/scripts/automation/regression/unit_tests/functional_tests/test2.pcap
deleted file mode 100755
index 1d35d9c1..00000000
--- a/scripts/automation/regression/unit_tests/functional_tests/test2.pcap
+++ /dev/null
Binary files differ
diff --git a/scripts/automation/regression/unit_tests/functional_tests/test_cmp.pcap b/scripts/automation/regression/unit_tests/functional_tests/test_cmp.pcap
deleted file mode 100755
index 4c92859f..00000000
--- a/scripts/automation/regression/unit_tests/functional_tests/test_cmp.pcap
+++ /dev/null
Binary files differ
diff --git a/scripts/automation/regression/unit_tests/trex_nat_test.py b/scripts/automation/regression/unit_tests/trex_nat_test.py
index c70c03a1..e50831e4 100755
--- a/scripts/automation/regression/unit_tests/trex_nat_test.py
+++ b/scripts/automation/regression/unit_tests/trex_nat_test.py
@@ -101,17 +101,17 @@ class CTRexNat_Test(CTRexGeneral_Test):#(unittest.TestCase):
self.router.configure_basic_interfaces()
- stat_route_dict = self.get_benchmark_param('stat_route_dict', test_name="test_nat_simple")
+ stat_route_dict = self.get_benchmark_param('stat_route_dict')
stat_route_obj = CStaticRouteConfig(stat_route_dict)
self.router.config_static_routing(stat_route_obj, mode = "config")
- nat_dict = self.get_benchmark_param('nat_dict', test_name="test_nat_simple")
+ nat_dict = self.get_benchmark_param('nat_dict')
nat_obj = CNatConfig(nat_dict)
self.router.config_nat(nat_obj)
# self.trex.set_yaml_file('cap2/http_simple.yaml')
- mult = self.get_benchmark_param('multiplier', test_name="test_nat_simple")
- core = self.get_benchmark_param('cores', test_name="test_nat_simple")
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
# trex_res = self.trex.run(nc=False,multiplier = mult, cores = core, duration = 100, l = 1000, learn = True)
ret = self.trex.start_trex(
@@ -130,7 +130,7 @@ class CTRexNat_Test(CTRexGeneral_Test):#(unittest.TestCase):
print trex_res.get_latest_dump()
trex_nat_stats = trex_res.get_last_value("trex-global.data", ".*nat.*") # extract all nat data
- if self.get_benchmark_param('allow_timeout_dev', test_name="test_nat_simple"):
+ if self.get_benchmark_param('allow_timeout_dev'):
nat_timeout_ratio = trex_nat_stats['m_total_nat_time_out']/trex_nat_stats['m_total_nat_open']
if nat_timeout_ratio > 0.005:
self.fail('TRex nat_timeout ratio %f > 0.5%%' % nat_timeout_ratio)
diff --git a/scripts/automation/regression/unit_tests/trex_rx_test.py b/scripts/automation/regression/unit_tests/trex_rx_test.py
index 4f404616..37b1c722 100755
--- a/scripts/automation/regression/unit_tests/trex_rx_test.py
+++ b/scripts/automation/regression/unit_tests/trex_rx_test.py
@@ -20,7 +20,7 @@ class CTRexRx_Test(CTRexGeneral_Test):
pass
- def check_rx_errors(self, trex_res):
+ def check_rx_errors(self, trex_res, allow_error_tolerance = True):
try:
# counters to check
@@ -69,7 +69,7 @@ class CTRexRx_Test(CTRexGeneral_Test):
total_errors = sum(rx_counters.values()) + sum(latency_counters_compare.values())
error_tolerance = self.get_benchmark_param('error_tolerance')
- if not error_tolerance:
+ if not error_tolerance or not allow_error_tolerance:
error_tolerance = 0
error_percentage = float(total_errors) * 100 / total_rx
@@ -114,7 +114,7 @@ class CTRexRx_Test(CTRexGeneral_Test):
#print trex_res.get_latest_dump()
self.check_general_scenario_results(trex_res)
- self.check_CPU_benchmark(trex_res, 10)
+ self.check_CPU_benchmark(trex_res)
self.check_rx_errors(trex_res)
@@ -148,7 +148,7 @@ class CTRexRx_Test(CTRexGeneral_Test):
print trex_res
self.check_general_scenario_results(trex_res)
- self.check_CPU_benchmark(trex_res, 10)
+ self.check_CPU_benchmark(trex_res)
self.check_rx_errors(trex_res)
@@ -182,7 +182,7 @@ class CTRexRx_Test(CTRexGeneral_Test):
#print trex_res.get_latest_dump()
self.check_general_scenario_results(trex_res)
- self.check_CPU_benchmark(trex_res, 10)
+ self.check_CPU_benchmark(trex_res)
self.check_rx_errors(trex_res)
@@ -214,10 +214,10 @@ class CTRexRx_Test(CTRexGeneral_Test):
print trex_res
self.check_general_scenario_results(trex_res)
- self.check_CPU_benchmark(trex_res, 10)
+ self.check_CPU_benchmark(trex_res)
self.check_rx_errors(trex_res)
- @nottest
+ #@nottest
def test_rx_check_http_negative(self):
if self.is_loopback:
self.skip('This test uses NAT, not relevant for loopback')
@@ -234,7 +234,7 @@ class CTRexRx_Test(CTRexGeneral_Test):
m = mult,
p = True,
rx_check = sample_rate,
- d = 50,
+ d = 60,
f = 'cap2/http_simple.yaml',
l = 1000,
k = 10,
@@ -256,11 +256,11 @@ class CTRexRx_Test(CTRexGeneral_Test):
self.router.config_nat(nat_obj)
self.router.config_zbf()
trex_res = self.trex.sample_to_run_finish()
- self.router.config_no_nat(nat_obj)
self.router.config_no_zbf()
+ self.router.clear_nat_translations()
print ("\nLATEST RESULT OBJECT:")
print trex_res
- self.check_rx_errors(trex_res)
+ self.check_rx_errors(trex_res, allow_error_tolerance = False)
if self.fail_reasons == old_errors:
self.fail('Expected errors here, got none.')
else: