diff options
author | 2016-04-28 20:32:44 +0300 | |
---|---|---|
committer | 2016-04-28 20:32:44 +0300 | |
commit | 101c7b8b5f1e7e9eeb4df9d79e68c15cd0d49ef7 (patch) | |
tree | bcf6aee5b6249467e4c9a82b5dfb77dca5321076 | |
parent | 798efac6db9d057073dae13b818815422ae926cc (diff) |
increase accuracy of low values in json report (0.03 etc.)
-rwxr-xr-x | scripts/automation/regression/stateless_tests/stl_benchmark_test.py | 17 | ||||
-rw-r--r-- | src/main_dpdk.cpp | 11 |
2 files changed, 19 insertions, 9 deletions
diff --git a/scripts/automation/regression/stateless_tests/stl_benchmark_test.py b/scripts/automation/regression/stateless_tests/stl_benchmark_test.py index 258717dc..9bf20579 100755 --- a/scripts/automation/regression/stateless_tests/stl_benchmark_test.py +++ b/scripts/automation/regression/stateless_tests/stl_benchmark_test.py @@ -9,8 +9,11 @@ class STLBenchmark_Test(CStlGeneral_Test): """Benchark stateless performance""" def test_CPU_benchmark(self): - timeout = 30 # max time to wait for stabilization + timeout = 60 # max time to wait for stabilization stabilize = 5 # ensure stabilization over this period + cores = self.configuration.trex['trex_cores'] + ports = self.stl_trex.get_port_count() + print('') for profile_bench in self.get_benchmark_param('profiles'): cpu_utils = deque([0] * stabilize, maxlen = stabilize) @@ -27,7 +30,7 @@ class STLBenchmark_Test(CStlGeneral_Test): for i in range(timeout + 1): stats = self.stl_trex.get_stats() cpu_utils.append(stats['global']['cpu_util']) - if i > stabilize and min(cpu_utils) > max(cpu_utils) * 0.98 - 0.1: + if i > stabilize and min(cpu_utils) > max(cpu_utils) * 0.98: break sleep(0.5) @@ -39,13 +42,13 @@ class STLBenchmark_Test(CStlGeneral_Test): raise Exception('Too much queue_full: %s' % stats['global']['queue_full']) if not cpu_utils[-1]: raise Exception('CPU util is zero, last values: %s' % cpu_utils) - if not stats['global']['tx_bps']: - raise Exception('TX bps is zero: %s' % stats['global']['tx_bps']) - bw_per_core = 2 * 2 * 100 * stats['global']['tx_bps'] / (cpu_utils[-1] * self.stl_trex.get_port_count() * 1e6) - print('Done (%ss), CPU util: %4g, bw_per_core: %6sMb/core' % (int(time() - start_time), cpu_utils[-1], int(bw_per_core))) + bw_per_core = 2 * 2 * (100 / cpu_utils[-1]) * stats['global']['tx_bps'] / (ports * cores * 1e9) + print('Done (%ss), CPU util: %4g, bw_per_core: %6sGb/core' % (int(time() - start_time), cpu_utils[-1], round(bw_per_core, 2))) # TODO: add check of benchmark based on results from regression + + def tearDown(self): self.stl_trex.reset() self.stl_trex.clear_stats() - + CStlGeneral_Test.tearDown(self) diff --git a/src/main_dpdk.cpp b/src/main_dpdk.cpp index 1496513d..9a66db51 100644 --- a/src/main_dpdk.cpp +++ b/src/main_dpdk.cpp @@ -2335,7 +2335,10 @@ private: std::string CGlobalStats::get_field(std::string name,float &f){ char buff[200]; - snprintf(buff, sizeof(buff), "\"%s\":%.1f,",name.c_str(),f); + if(f >= 10 || f <= -10) + snprintf(buff, sizeof(buff), "\"%s\":%.1f,",name.c_str(),f); + else + snprintf(buff, sizeof(buff), "\"%s\":%.3e,",name.c_str(),f); return (std::string(buff)); } @@ -2347,7 +2350,10 @@ std::string CGlobalStats::get_field(std::string name,uint64_t &f){ std::string CGlobalStats::get_field_port(int port,std::string name,float &f){ char buff[200]; - snprintf(buff, sizeof(buff), "\"%s-%d\":%.1f,",name.c_str(),port,f); + if(f >= 10 || f <= -10) + snprintf(buff, sizeof(buff), "\"%s-%d\":%.1f,",name.c_str(),port,f); + else + snprintf(buff, sizeof(buff), "\"%s-%d\":%.3e,",name.c_str(),port,f); return (std::string(buff)); } @@ -2442,6 +2448,7 @@ void CGlobalStats::DumpAllPorts(FILE *fd){ fprintf (fd," Cpu Utilization : %2.1f %% %2.1f Gb/core \n",m_cpu_util,(2*(m_tx_bps/1e9)*100.0/(m_cpu_util*m_threads))); + fprintf (fd,"tx_bps: %2.1f, m_cpu_util: %2.5f, m_threads: %2.1f", (double) m_tx_bps, (double) m_cpu_util, (double) m_threads); fprintf (fd," Platform_factor : %2.1f \n",m_platform_factor); fprintf (fd," Total-Tx : %s ",double_to_human_str(m_tx_bps,"bps",KBYE_1000).c_str()); if ( CGlobalInfo::is_learn_mode() ) { |