summaryrefslogtreecommitdiffstats
path: root/scripts/automation/regression/stateless_tests
diff options
context:
space:
mode:
authorimarom <imarom@cisco.com>2017-01-22 16:20:45 +0200
committerimarom <imarom@cisco.com>2017-01-22 16:20:45 +0200
commit904eacd9be1230efb7ae0ab7997ec131b588ec8a (patch)
tree8e4bcd1b1a5f683efdb8f3eeb962acefc3201961 /scripts/automation/regression/stateless_tests
parentd2f1c8451e2e8ffc47b208f68f9b16697d706d60 (diff)
parentb81cdb6c2d6d118c1c346e7c8dae6a5e747d867d (diff)
Merge branch 'master' into capture
Signed-off-by: imarom <imarom@cisco.com> Conflicts: scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py src/main_dpdk.cpp
Diffstat (limited to 'scripts/automation/regression/stateless_tests')
-rwxr-xr-xscripts/automation/regression/stateless_tests/stl_benchmark_test.py82
-rw-r--r--scripts/automation/regression/stateless_tests/stl_performance_test.py65
-rw-r--r--scripts/automation/regression/stateless_tests/stl_rx_test.py2
3 files changed, 113 insertions, 36 deletions
diff --git a/scripts/automation/regression/stateless_tests/stl_benchmark_test.py b/scripts/automation/regression/stateless_tests/stl_benchmark_test.py
index 6940efd3..fbc58765 100755
--- a/scripts/automation/regression/stateless_tests/stl_benchmark_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_benchmark_test.py
@@ -4,6 +4,7 @@ from trex_stl_lib.api import *
import os, sys
from collections import deque
from time import time, sleep
+import pprint
class STLBenchmark_Test(CStlGeneral_Test):
"""Benchark stateless performance"""
@@ -14,9 +15,21 @@ class STLBenchmark_Test(CStlGeneral_Test):
stabilize = 5 # ensure stabilization over this period
print('')
- for profile_bench in self.get_benchmark_param('profiles'):
+ #self.get_benchmark_param('profiles')
+ #profiles=[{'bw_per_core': 1,
+ # 'cpu_util': 1,
+ # 'kwargs': {'packet_len': 64},
+ # 'name': 'stl/udp_for_benchmarks.py'}]
+
+ profiles = self.get_benchmark_param('profiles')
+ dp_cores = self.stl_trex.system_info.get('dp_core_count', 0)
+
+ for profile_bench in profiles:
+
cpu_utils = deque([0] * stabilize, maxlen = stabilize)
- bws_per_core = deque([0] * stabilize, maxlen = stabilize)
+ bps = deque([0] * stabilize, maxlen = stabilize)
+ pps = deque([0] * stabilize, maxlen = stabilize)
+
kwargs = profile_bench.get('kwargs', {})
print('Testing profile %s, kwargs: %s' % (profile_bench['name'], kwargs))
profile = STLProfile.load(os.path.join(CTRexScenario.scripts_path, profile_bench['name']), **kwargs)
@@ -32,13 +45,30 @@ class STLBenchmark_Test(CStlGeneral_Test):
for i in range(timeout + 1):
stats = self.stl_trex.get_stats()
cpu_utils.append(stats['global']['cpu_util'])
- bws_per_core.append(stats['global']['bw_per_core'])
+ bps.append(stats['global']['tx_bps'])
+ pps.append(stats['global']['tx_pps'])
+
if i > stabilize and min(cpu_utils) > max(cpu_utils) * 0.95:
break
sleep(0.5)
agv_cpu_util = sum(cpu_utils) / stabilize
- agv_bw_per_core = sum(bws_per_core) / stabilize
+ agv_pps = sum(pps) / stabilize
+ agv_bps = sum(bps) / stabilize
+
+ if agv_cpu_util == 0.0:
+ agv_cpu_util=1.0;
+
+ agv_mpps = (agv_pps/1e6);
+ agv_gbps = (agv_bps/1e9)
+
+
+ agv_gbps_norm = agv_gbps * 100.0/agv_cpu_util;
+ agv_mpps_norm = agv_mpps * 100.0/agv_cpu_util;
+
+ agv_gbps_norm_pc = agv_gbps_norm/dp_cores;
+ agv_mpps_norm_pc = agv_mpps_norm/dp_cores;
+
if critical_test and i == timeout and agv_cpu_util > 10:
raise Exception('Timeout on waiting for stabilization, last CPU util values: %s' % list(cpu_utils))
@@ -48,24 +78,32 @@ class STLBenchmark_Test(CStlGeneral_Test):
raise Exception('Too much queue_full: %s' % stats['global']['queue_full'])
if not cpu_utils[-1]:
raise Exception('CPU util is zero, last values: %s' % list(cpu_utils))
- print('Done (%ss), CPU util: %4g, bw_per_core: %6sGb/core' % (int(time() - start_time), agv_cpu_util, round(agv_bw_per_core, 2)))
- # TODO: add check of benchmark based on results from regression
-
- # report benchmarks
- if self.GAManager:
- try:
- pass
- #profile_repr = '%s.%s %s' % (CTRexScenario.setup_name,
- # os.path.basename(profile_bench['name']),
- # repr(kwargs).replace("'", ''))
- #self.GAManager.gaAddAction(Event = 'stateless_test', action = profile_repr,
- # label = 'bw_per_core', value = int(agv_bw_per_core))
- # TODO: report expected once acquired
- #self.GAManager.gaAddAction(Event = 'stateless_test', action = profile_repr,
- # label = 'bw_per_core_exp', value = int(expected_norm_cpu))
- #self.GAManager.emptyAndReportQ()
- except Exception as e:
- print('Sending GA failed: %s' % e)
+ print('Done (%ss), CPU util: %4g, norm_pps_per_core:%6smpps norm_bw_per_core: %6sGb/core' % (int(time() - start_time), agv_cpu_util, round(agv_mpps_norm_pc,2), round(agv_gbps_norm_pc, 2)))
+
+ # report benchmarks to elk
+ if self.elk:
+ streams=kwargs.get('stream_count',1)
+ elk_obj = self.get_elk_obj()
+ print("\n* Reporting to elk *\n")
+ name=profile_bench['name']
+ elk_obj['test']={ "name" : name,
+ "type" : "stateless-range",
+ "cores" : dp_cores,
+ "cpu%" : agv_cpu_util,
+ "mpps" : (agv_mpps),
+ "streams_count" :streams,
+ "mpps_pc" : (agv_mpps_norm_pc),
+ "gbps_pc" : (agv_gbps_norm_pc),
+ "gbps" : (agv_gbps),
+ "avg-pktsize" : round((1000.0*agv_gbps/(8.0*agv_mpps))),
+ "latecny" : { "min" : -1.0,
+ "max" : -1.0,
+ "avr" : -1.0
+ }
+ };
+ #pprint.pprint(elk_obj);
+ self.elk.perf.push_data(elk_obj)
+
def tearDown(self):
self.stl_trex.reset()
diff --git a/scripts/automation/regression/stateless_tests/stl_performance_test.py b/scripts/automation/regression/stateless_tests/stl_performance_test.py
index 641f0a33..a1f4dd3b 100644
--- a/scripts/automation/regression/stateless_tests/stl_performance_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_performance_test.py
@@ -1,6 +1,7 @@
import os
from .stl_general_test import CStlGeneral_Test, CTRexScenario
from trex_stl_lib.api import *
+import pprint
def avg (values):
return (sum(values) / float(len(values)))
@@ -67,6 +68,42 @@ class PerformanceReport(object):
ga.emptyAndReportQ()
+ def norm_senario (self):
+ s=self.scenario
+ s='+'.join(s.split(' '));
+ s='+'.join(s.split('-'));
+ s='+'.join(s.split(','));
+ l=s.split('+')
+ lr=[]
+ for obj in l:
+ if len(obj):
+ lr.append(obj);
+ s='-'.join(lr);
+ return(s);
+
+ def report_to_elk(self, elk,elk_obj, golden_mpps):
+ print("\n* Reporting to elk *\n")
+ elk_obj['test']={ "name" : self.norm_senario(),
+ "type" : "stateless",
+ "cores" : self.core_count,
+ "cpu%" : self.avg_cpu,
+ "mpps" : self.avg_mpps,
+ "streams_count" : 1,
+ "mpps_pc" : self.avg_mpps_per_core,
+ "gbps_pc" : self.avg_gbps_per_core,
+ "gbps" : self.avg_gbps,
+ "avg-pktsize" : ((1000.0*self.avg_gbps/(8.0*self.avg_mpps))),
+ "latecny" : { "min" : -1.0,
+ "max" : -1.0,
+ "avr" : -1.0
+ }
+ };
+
+ #pprint.pprint(elk_obj);
+ # push to elk
+ elk.perf.push_data(elk_obj)
+
+
class STLPerformance_Test(CStlGeneral_Test):
"""Tests for stateless client"""
@@ -238,24 +275,25 @@ class STLPerformance_Test(CStlGeneral_Test):
############################################# test's infra functions ###########################################
- def execute_single_scenario (self, scenario_cfg, iterations = 4):
+ def execute_single_scenario (self, scenario_cfg):
golden = scenario_cfg['mpps_per_core_golden']
-
- for i in range(iterations, -1, -1):
- report = self.execute_single_scenario_iteration(scenario_cfg)
- rc = report.check_golden(golden)
+ report = self.execute_single_scenario_iteration(scenario_cfg)
+
+ if self.GAManager:
+ report.report_to_analytics(self.GAManager, golden)
- if (rc == PerformanceReport.GOLDEN_NORMAL) or (rc == PerformanceReport.GOLDEN_BETTER):
- if self.GAManager:
- report.report_to_analytics(self.GAManager, golden)
+ #report to elk
+ if self.elk:
+ elk_obj = self.get_elk_obj()
+ report.report_to_elk(self.elk,elk_obj, golden)
- return
+ rc = report.check_golden(golden)
- if rc == PerformanceReport.GOLDEN_BETTER:
- return
+ if rc == PerformanceReport.GOLDEN_NORMAL or rc == PerformanceReport.GOLDEN_BETTER:
+ return
- print("\n*** Measured Mpps per core '{0}' is lower than expected golden '{1} - re-running scenario...{2} attempts left".format(report.avg_mpps_per_core, scenario_cfg['mpps_per_core_golden'], i))
+ print("\n*** Measured Mpps per core '{0}' is lower than expected golden '{1}'".format(report.avg_mpps_per_core, scenario_cfg['mpps_per_core_golden']))
assert 0, "performance failure"
@@ -296,7 +334,8 @@ class STLPerformance_Test(CStlGeneral_Test):
# sample bps/pps
for _ in range(0, 20):
stats = self.c.get_stats(ports = 0)
- if stats['global'][ 'queue_full']>10000:
+ max_queue_full = 100000 if self.is_VM else 10000
+ if stats['global'][ 'queue_full'] > max_queue_full:
assert 0, "Queue is full need to tune the multiplier"
# CPU results are not valid cannot use them
diff --git a/scripts/automation/regression/stateless_tests/stl_rx_test.py b/scripts/automation/regression/stateless_tests/stl_rx_test.py
index 4dad712f..8812ac48 100644
--- a/scripts/automation/regression/stateless_tests/stl_rx_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_rx_test.py
@@ -52,7 +52,7 @@ class STLRX_Test(CStlGeneral_Test):
'allow_packets_drop_num': 1, # allow 1 pkt drop
},
- 'librte_pmd_mlx5': {
+ 'net_mlx5': {
'rate_percent': 80,
'total_pkts': 1000,
'rate_latency': 1,