From 7235003e49a22e044aa8da18006ae46f7df9e2cf Mon Sep 17 00:00:00 2001 From: imarom Date: Tue, 25 Oct 2016 12:47:47 +0300 Subject: added performance test (starting with trex09) Signed-off-by: imarom --- .../regression/setups/trex09/benchmark.yaml | 51 ++++ .../stateless_tests/stl_performance_test.py | 332 +++++++++++++++++++++ .../stl/trex_stl_lib/trex_stl_stats.py | 2 +- 3 files changed, 384 insertions(+), 1 deletion(-) create mode 100644 scripts/automation/regression/stateless_tests/stl_performance_test.py (limited to 'scripts/automation') diff --git a/scripts/automation/regression/setups/trex09/benchmark.yaml b/scripts/automation/regression/setups/trex09/benchmark.yaml index 416bfa68..ed51a46c 100644 --- a/scripts/automation/regression/setups/trex09/benchmark.yaml +++ b/scripts/automation/regression/setups/trex09/benchmark.yaml @@ -181,3 +181,54 @@ test_CPU_benchmark: bw_per_core : 1 +# performance tests + +test_performance_vm_single_cpu: + cfg: + mult : "90%" + mpps_per_core_golden : + min: 16.4 + max: 17.3 + + +test_performance_vm_single_cpu_cached: + cfg: + mult : "90%" + mpps_per_core_golden : + min: 30.5 + max: 31.2 + + + +test_performance_syn_attack_single_cpu: + cfg: + mult : "90%" + mpps_per_core_golden : + min: 13.8 + max: 14.5 + +test_performance_vm_multi_cpus: + cfg: + core_count : 3 + mult : "90%" + mpps_per_core_golden : + min: 15.9 + max: 16.5 + + +test_performance_vm_multi_cpus_cached: + cfg: + core_count : 3 + mult : "90%" + mpps_per_core_golden : + min: 29.6 + max: 30.5 + +test_performance_syn_attack_multi_cpus: + cfg: + core_count : 3 + mult : "90%" + mpps_per_core_golden : + min: 13.0 + max: 13.8 + diff --git a/scripts/automation/regression/stateless_tests/stl_performance_test.py b/scripts/automation/regression/stateless_tests/stl_performance_test.py new file mode 100644 index 00000000..b17eba96 --- /dev/null +++ b/scripts/automation/regression/stateless_tests/stl_performance_test.py @@ -0,0 +1,332 @@ +import os +from .stl_general_test import CStlGeneral_Test, CTRexScenario +from trex_stl_lib.api import * + +def avg (values): + return (sum(values) / float(len(values))) + +# performance report object +class PerformanceReport(object): + GOLDEN_NORMAL = 1 + GOLDEN_FAIL = 2 + GOLDEN_BETTER = 3 + + def __init__ (self, + scenario, + machine_name, + core_count, + avg_cpu, + avg_gbps, + avg_mpps, + avg_gbps_per_core, + avg_mpps_per_core, + ): + + self.scenario = scenario + self.machine_name = machine_name + self.core_count = core_count + self.avg_cpu = avg_cpu + self.avg_gbps = avg_gbps + self.avg_mpps = avg_mpps + self.avg_gbps_per_core = avg_gbps_per_core + self.avg_mpps_per_core = avg_mpps_per_core + + def show (self): + + print("\n") + print("scenario: {0}".format(self.scenario)) + print("machine name: {0}".format(self.machine_name)) + print("DP core count: {0}".format(self.core_count)) + print("average CPU: {0}".format(self.avg_cpu)) + print("average Gbps: {0}".format(self.avg_gbps)) + print("average Mpps: {0}".format(self.avg_mpps)) + print("average pkt size (bytes): {0}".format( (self.avg_gbps * 1000 / 8) / self.avg_mpps)) + print("average Gbps per core (at 100% CPU): {0}".format(self.avg_gbps_per_core)) + print("average Mpps per core (at 100% CPU): {0}".format(self.avg_mpps_per_core)) + + + def check_golden (self, golden_mpps): + if self.avg_mpps_per_core < golden_mpps['min']: + return self.GOLDEN_FAIL + + if self.avg_mpps_per_core > golden_mpps['max']: + return self.GOLDEN_BETTER + + return self.GOLDEN_NORMAL + + + +class STLPerformance_Test(CStlGeneral_Test): + """Tests for stateless client""" + + def setUp(self): + CStlGeneral_Test.setUp(self) + + self.c = CTRexScenario.stl_trex + self.c.connect() + self.c.reset() + + + + def tearDown (self): + self.c.disconnect() + CStlGeneral_Test.tearDown(self) + + + def build_perf_profile_vm (self, pkt_size, cache_size = None): + size = pkt_size - 4; # HW will add 4 bytes ethernet FCS + src_ip = '16.0.0.1' + dst_ip = '48.0.0.1' + + base_pkt = Ether()/IP(src=src_ip,dst=dst_ip)/UDP(dport=12,sport=1025) + pad = max(0, size - len(base_pkt)) * 'x' + + vm = STLScVmRaw( [ STLVmFlowVar ( "ip_src", min_value="10.0.0.1", max_value="10.0.0.255", size=4, step=1,op="inc"), + STLVmWrFlowVar (fv_name="ip_src", pkt_offset= "IP.src" ), + STLVmFixIpv4(offset = "IP") + ], + cache_size = cache_size + ); + + pkt = STLPktBuilder(pkt = base_pkt/pad, vm = vm) + return STLStream(packet = pkt, mode = STLTXCont()) + + + def build_perf_profile_syn_attack (self, pkt_size): + size = pkt_size - 4; # HW will add 4 bytes ethernet FCS + + # TCP SYN + base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S") + pad = max(0, size - len(base_pkt)) * 'x' + + # vm + vm = STLScVmRaw( [ STLVmFlowVar(name="ip_src", + min_value="16.0.0.0", + max_value="18.0.0.254", + size=4, op="random"), + + STLVmFlowVar(name="src_port", + min_value=1025, + max_value=65000, + size=2, op="random"), + + STLVmWrFlowVar(fv_name="ip_src", pkt_offset= "IP.src" ), + + STLVmFixIpv4(offset = "IP"), # fix checksum + + STLVmWrFlowVar(fv_name="src_port", + pkt_offset= "TCP.sport") # fix udp len + + ] + ) + + pkt = STLPktBuilder(pkt = base_pkt, + vm = vm) + + return STLStream(packet = pkt, + random_seed = 0x1234,# can be remove. will give the same random value any run + mode = STLTXCont()) + + + + # single CPU, VM, no cache, 64 bytes + def test_performance_vm_single_cpu (self): + setup_cfg = self.get_benchmark_param('cfg') + scenario_cfg = {} + + scenario_cfg['name'] = "VM - 64 bytes, single CPU" + scenario_cfg['streams'] = self.build_perf_profile_vm(64) + scenario_cfg['core_count'] = 1 + + scenario_cfg['mult'] = setup_cfg['mult'] + scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden'] + + + + self.execute_single_scenario(scenario_cfg) + + + # single CPU, VM, cached, 64 bytes + def test_performance_vm_single_cpu_cached (self): + setup_cfg = self.get_benchmark_param('cfg') + scenario_cfg = {} + + scenario_cfg['name'] = "VM - 64 bytes, single CPU, cache size 1024" + scenario_cfg['streams'] = self.build_perf_profile_vm(64, cache_size = 1024) + scenario_cfg['core_count'] = 1 + + scenario_cfg['mult'] = setup_cfg['mult'] + scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden'] + + self.execute_single_scenario(scenario_cfg) + + + # single CPU, syn attack, 64 bytes + def test_performance_syn_attack_single_cpu (self): + setup_cfg = self.get_benchmark_param('cfg') + scenario_cfg = {} + + scenario_cfg['name'] = "syn attack - 64 bytes, single CPU" + scenario_cfg['streams'] = self.build_perf_profile_syn_attack(64) + scenario_cfg['core_count'] = 1 + + scenario_cfg['mult'] = setup_cfg['mult'] + scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden'] + + self.execute_single_scenario(scenario_cfg) + + + # two CPUs, VM, no cache, 64 bytes + def test_performance_vm_multi_cpus (self): + setup_cfg = self.get_benchmark_param('cfg') + scenario_cfg = {} + + scenario_cfg['name'] = "VM - 64 bytes, two CPUs" + scenario_cfg['streams'] = self.build_perf_profile_vm(64) + + scenario_cfg['core_count'] = setup_cfg['core_count'] + scenario_cfg['mult'] = setup_cfg['mult'] + scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden'] + + self.execute_single_scenario(scenario_cfg) + + + + # two CPUs, VM, cached, 64 bytes + def test_performance_vm_multi_cpus_cached (self): + setup_cfg = self.get_benchmark_param('cfg') + scenario_cfg = {} + + scenario_cfg['name'] = "VM - 64 bytes, single CPU, cache size 1024" + scenario_cfg['streams'] = self.build_perf_profile_vm(64, cache_size = 1024) + + + scenario_cfg['core_count'] = setup_cfg['core_count'] + scenario_cfg['mult'] = setup_cfg['mult'] + scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden'] + + self.execute_single_scenario(scenario_cfg) + + + # two CPUs, syn attack, 64 bytes + def test_performance_syn_attack_multi_cpus (self): + setup_cfg = self.get_benchmark_param('cfg') + scenario_cfg = {} + + scenario_cfg['name'] = "syn attack - 64 bytes, two CPUs" + scenario_cfg['streams'] = self.build_perf_profile_syn_attack(64) + + scenario_cfg['core_count'] = setup_cfg['core_count'] + scenario_cfg['mult'] = setup_cfg['mult'] + scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden'] + + self.execute_single_scenario(scenario_cfg) + + + +############################################# test's infra functions ########################################### + + def execute_single_scenario (self, scenario_cfg, iterations = 4): + golden = scenario_cfg['mpps_per_core_golden'] + + + for i in range(iterations, -1, -1): + report = self.execute_single_scenario_iteration(scenario_cfg) + rc = report.check_golden(golden) + + if rc == PerformanceReport.GOLDEN_NORMAL: + return + + if rc == PerformanceReport.GOLDEN_BETTER: + return + + print("\n*** Measured Mpps per core '{0}' is lower than expected golden '{1} - re-running scenario...{2} attempts left".format(report.avg_mpps_per_core, scenario_cfg['mpps_per_core_golden'], i)) + + assert 0, "performance failure" + + + + + def execute_single_scenario_iteration (self, scenario_cfg): + + print("\nExecuting performance scenario: '{0}'\n".format(scenario_cfg['name'])) + + self.c.reset(ports = [0]) + self.c.add_streams(ports = [0], streams = scenario_cfg['streams']) + + # use one core + core_mask = (2 ** scenario_cfg['core_count']) - 1 + self.c.start(ports = [0], mult = scenario_cfg['mult'], core_mask = [core_mask]) + + # stablize + print("Step 1 - waiting for stabilization... (10 seconds)") + for _ in range(10): + time.sleep(1) + sys.stdout.write('.') + sys.stdout.flush() + + print("\n") + + samples = {'cpu' : [], 'bps': [], 'pps': []} + + # let the server gather samples + print("Step 2 - Waiting for samples... (40 seconds)") + + for i in range(0, 1): + + # sample bps/pps + for _ in range(0, 20): + stats = self.c.get_stats(ports = 0) + samples['bps'].append(stats[0]['tx_bps']) + samples['pps'].append(stats[0]['tx_pps']) + time.sleep(1) + sys.stdout.write('.') + sys.stdout.flush() + + # sample CPU per core + rc = self.c._transmit('get_utilization') + if not rc: + raise Exception(rc) + + data = rc.data()['cpu'] + # filter + data = [s for s in data if s['ports'][0] == 0] + + assert len(data) == scenario_cfg['core_count'] , "sampling info does not match core count" + + for s in data: + samples['cpu'] += s['history'] + + + stats = self.c.get_stats(ports = 0) + self.c.stop(ports = [0]) + + + + avg_values = {k:avg(v) for k, v in samples.iteritems()} + avg_cpu = avg_values['cpu'] * scenario_cfg['core_count'] + avg_gbps = avg_values['bps'] / 1e9 + avg_mpps = avg_values['pps'] / 1e6 + + avg_gbps_per_core = avg_gbps * (100.0 / avg_cpu) + avg_mpps_per_core = avg_mpps * (100.0 / avg_cpu) + + report = PerformanceReport(scenario = scenario_cfg['name'], + machine_name = os.uname()[1], + core_count = scenario_cfg['core_count'], + avg_cpu = avg_cpu, + avg_gbps = avg_gbps, + avg_mpps = avg_mpps, + avg_gbps_per_core = avg_gbps_per_core, + avg_mpps_per_core = avg_mpps_per_core) + + + report.show() + + print("") + golden = scenario_cfg['mpps_per_core_golden'] + print("golden Mpps per core (at 100% CPU): min: {0}, max {1}".format(golden['min'], golden['max'])) + + + return report + diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py index 48854b76..5d9cdcaa 100644 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py @@ -902,7 +902,7 @@ class CGlobalStats(CTRexStats): # absolute stats['cpu_util'] = self.get("m_cpu_util") stats['rx_cpu_util'] = self.get("m_rx_cpu_util") - stats['bw_per_core'] = self.get("m_bw_per_core") + stats['bw_per_core'] = self.get("m_bw_per_core") stats['tx_bps'] = self.get("m_tx_bps") stats['tx_pps'] = self.get("m_tx_pps") -- cgit 1.2.3-korg From 3477703325cafe2a629e60c60e619dc576c16d62 Mon Sep 17 00:00:00 2001 From: imarom Date: Tue, 25 Oct 2016 15:02:41 +0200 Subject: performance test did disconnect after end of tests Signed-off-by: imarom --- scripts/automation/regression/stateless_tests/stl_performance_test.py | 1 - 1 file changed, 1 deletion(-) (limited to 'scripts/automation') diff --git a/scripts/automation/regression/stateless_tests/stl_performance_test.py b/scripts/automation/regression/stateless_tests/stl_performance_test.py index b17eba96..a77c5c27 100644 --- a/scripts/automation/regression/stateless_tests/stl_performance_test.py +++ b/scripts/automation/regression/stateless_tests/stl_performance_test.py @@ -69,7 +69,6 @@ class STLPerformance_Test(CStlGeneral_Test): def tearDown (self): - self.c.disconnect() CStlGeneral_Test.tearDown(self) -- cgit 1.2.3-korg