summaryrefslogtreecommitdiffstats
path: root/doc
diff options
context:
space:
mode:
authoritraviv <itraviv@cisco.com>2016-12-11 17:35:37 +0200
committeritraviv <itraviv@cisco.com>2016-12-11 17:35:52 +0200
commitd83a3f21745d6245f1cc2040e3e9cd704c7aadb0 (patch)
tree83a208b6455b19a6dcbe5d999cd67215921737a5 /doc
parent296951010f94dbadba90d06ee4f56e1b221afd67 (diff)
changed trex_analytics to support detailed mode, generating a detailed table for test runs, also inserted machine information per setup to asciidoc. to use detailed mode run ./b build --performance-detailed at doc folder on trex-05
Signed-off-by: itraviv <itraviv@cisco.com>
Diffstat (limited to 'doc')
-rwxr-xr-xdoc/AnalyticsConnect.py182
-rwxr-xr-xdoc/AnalyticsWebReport.py9
-rwxr-xr-xdoc/TRexDataAnalysis.py63
-rwxr-xr-xdoc/trex_analytics.asciidoc26
-rwxr-xr-xdoc/ws_main.py8
5 files changed, 164 insertions, 124 deletions
diff --git a/doc/AnalyticsConnect.py b/doc/AnalyticsConnect.py
index 10619532..bb473c52 100755
--- a/doc/AnalyticsConnect.py
+++ b/doc/AnalyticsConnect.py
@@ -21,112 +21,117 @@ VIEW_ID = '120207451'
def initialize_analyticsreporting():
- """Initializes an analyticsreporting service object.
-
- Returns:
- analytics an authorized analyticsreporting service object.
- """
-
- credentials = ServiceAccountCredentials.from_p12_keyfile(
- SERVICE_ACCOUNT_EMAIL, KEY_FILE_LOCATION, scopes=SCOPES)
-
- http = credentials.authorize(httplib2.Http())
-
- # Build the service object.
- analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)
-
- return analytics
-
-
-def get_report(analytics,start_date='2016-11-27',end_date='2016-11-27'):
- # Use the Analytics Service Object to query the Analytics Reporting API V4.
- return analytics.reports().batchGet(
- body={
- 'reportRequests': [
- {
- 'viewId': VIEW_ID,
- 'dateRanges': [{'startDate': start_date, 'endDate': end_date}],
- 'metrics': [{'expression': 'ga:metric1','formattingType':'CURRENCY'},
- {'expression': 'ga:metric2','formattingType':'CURRENCY'},
- {'expression': 'ga:metric3','formattingType':'CURRENCY'},
- {'expression': 'ga:totalEvents'}],
- 'dimensions': [{"name":"ga:eventAction"},{"name": "ga:dimension1"},{"name": "ga:dimension2"},{"name": "ga:dimension3"},{"name": "ga:dimension4"}],
- 'pageSize': 10000
+ """Initializes an analyticsreporting service object.
+
+ Returns:
+ analytics an authorized analyticsreporting service object.
+ """
+
+ credentials = ServiceAccountCredentials.from_p12_keyfile(
+ SERVICE_ACCOUNT_EMAIL, KEY_FILE_LOCATION, scopes=SCOPES)
+
+ http = credentials.authorize(httplib2.Http())
+
+ # Build the service object.
+ analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)
+
+ return analytics
+
+
+def get_report(analytics, start_date='2016-11-27', end_date='2016-11-27'):
+ # Use the Analytics Service Object to query the Analytics Reporting API V4.
+ return analytics.reports().batchGet(
+ body={
+ 'reportRequests': [
+ {
+ 'viewId': VIEW_ID,
+ 'dateRanges': [{'startDate': start_date, 'endDate': end_date}],
+ 'metrics': [{'expression': 'ga:metric1', 'formattingType': 'CURRENCY'},
+ {'expression': 'ga:metric2', 'formattingType': 'CURRENCY'},
+ {'expression': 'ga:metric3', 'formattingType': 'CURRENCY'},
+ {'expression': 'ga:totalEvents'}],
+ 'dimensions': [{"name": "ga:eventAction"}, {"name": "ga:dimension1"}, {"name": "ga:dimension2"},
+ {"name": "ga:dimension3"},
+ {"name": "ga:date"}, {"name": "ga:hour"}, {"name": "ga:minute"}],
+ 'pageSize': 10000
+ }
+ ]
}
- ]
- }
- ).execute()
+ ).execute()
def print_response(response):
- """Parses and prints the Analytics Reporting API V4 response"""
-
- for report in response.get('reports', []):
- columnHeader = report.get('columnHeader', {})
- dimensionHeaders = columnHeader.get('dimensions', [])
- metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])
- rows = report.get('data', {}).get('rows', [])
+ """Parses and prints the Analytics Reporting API V4 response"""
- for row in rows:
- dimensions = row.get('dimensions', [])
- dateRangeValues = row.get('metrics', [])
+ for report in response.get('reports', []):
+ columnHeader = report.get('columnHeader', {})
+ dimensionHeaders = columnHeader.get('dimensions', [])
+ metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])
+ rows = report.get('data', {}).get('rows', [])
- for header, dimension in zip(dimensionHeaders, dimensions):
- print header + ': ' + dimension
+ for row in rows:
+ dimensions = row.get('dimensions', [])
+ dateRangeValues = row.get('metrics', [])
- for i, values in enumerate(dateRangeValues):
- print 'Date range (' + str(i) + ')'
- for metricHeader, value in zip(metricHeaders, values.get('values')):
- print metricHeader.get('name') + ': ' + value
+ for header, dimension in zip(dimensionHeaders, dimensions):
+ print header + ': ' + dimension
+ for i, values in enumerate(dateRangeValues):
+ print 'Date range (' + str(i) + ')'
+ for metricHeader, value in zip(metricHeaders, values.get('values')):
+ print metricHeader.get('name') + ': ' + value
def export_to_tuples(response):
- # counter = 0
- setups = set()
- df = {}
- for report in response.get('reports', []):
- rows = report.get('data', {}).get('rows', [])
- for row in rows:
- data = []
- dimensions = row.get('dimensions', [])
- # print 'this is dimensions'
- # print dimensions
- data.append(dimensions[1]) #test name
- data.append(dimensions[2]) # state
- # data.append(dimensions[3]) # setup
- data.append(dimensions[4]) # test_type
- dateRangeValues = row.get('metrics', [])
- value = dateRangeValues[0].get('values',[])[0] #MPPS
- golden_min = dateRangeValues[0].get('values',[])[1] #golden min
- golden_max = dateRangeValues[0].get('values',[])[2] #golden max
- data.append(value)
- data.append(golden_min)
- data.append(golden_max)
- if dimensions[3] in setups:
- if dimensions[1] in df[dimensions[3]]:
- df[dimensions[3]][dimensions[1]].append(tuple(data))
- else:
- df[dimensions[3]][dimensions[1]] = [tuple(data)]
- else:
- df[dimensions[3]] = {}
- df[dimensions[3]][dimensions[1]] = [tuple(data)]
- setups.add(dimensions[3])
- # print 'counter is: %d' % counter
- return df, setups
+ # counter = 0
+ setups = set()
+ df = {}
+ for report in response.get('reports', []):
+ rows = report.get('data', {}).get('rows', [])
+ for row in rows:
+ data = []
+ dimensions = row.get('dimensions', [])
+ # print 'this is dimensions'
+ # print dimensions
+ data.append(dimensions[1]) # test name
+ data.append(dimensions[2]) # state
+ # data.append(dimensions[3]) # setup
+ data.append(dimensions[4]) # date in YYYYMMDD format
+ data.append(dimensions[5]) # hour
+ data.append(dimensions[6]) # minute
+ dateRangeValues = row.get('metrics', [])
+ value = dateRangeValues[0].get('values', [])[0] # MPPS
+ golden_min = dateRangeValues[0].get('values', [])[1] # golden min
+ golden_max = dateRangeValues[0].get('values', [])[2] # golden max
+ data.append(value)
+ # counter += 1
+ data.append(golden_min)
+ data.append(golden_max)
+ data.append(dimensions[0]) # build id
+ if dimensions[3] in setups:
+ if dimensions[1] in df[dimensions[3]]:
+ df[dimensions[3]][dimensions[1]].append(tuple(data))
+ else:
+ df[dimensions[3]][dimensions[1]] = [tuple(data)]
+ else:
+ df[dimensions[3]] = {}
+ df[dimensions[3]][dimensions[1]] = [tuple(data)]
+ setups.add(dimensions[3])
+ # print 'counter is: %d' % counter
+ return df, setups
def main():
- analytics = initialize_analyticsreporting()
- response = get_report(analytics)
- df, setups = export_to_tuples(response)
- # pprint(df)
- return df,setups
+ analytics = initialize_analyticsreporting()
+ response = get_report(analytics)
+ df, setups = export_to_tuples(response)
+ # pprint(df)
+ return df, setups
+
if __name__ == '__main__':
main()
-
"""
response structure (when fetched with "export to tuples"):
@@ -188,4 +193,3 @@ response structure (when fetched with "export to tuples"):
"""
-
diff --git a/doc/AnalyticsWebReport.py b/doc/AnalyticsWebReport.py
index bd4a9a2b..1806cab9 100755
--- a/doc/AnalyticsWebReport.py
+++ b/doc/AnalyticsWebReport.py
@@ -6,7 +6,7 @@ import time
import datetime
-def main(verbose = False):
+def main(verbose=False, detailed_test_stats=''):
if verbose:
print('Retrieving data from Google Analytics')
analytics = ac.initialize_analyticsreporting()
@@ -18,10 +18,13 @@ def main(verbose = False):
dest_path = os.path.join(os.getcwd(), 'build', 'images')
if verbose:
print('Saving data to %s' % dest_path)
- tr.create_all_data(ga_all_data_dict, setups, start_date, current_date, save_path = dest_path,
- add_stats='yes')
+ if detailed_test_stats:
+ print('generating detailed table for test results')
+ tr.create_all_data(ga_all_data_dict, setups, start_date, current_date, save_path=dest_path,
+ add_stats='yes', detailed_test_stats=detailed_test_stats)
if verbose:
print('Done without errors.')
+
if __name__ == "__main__":
main()
diff --git a/doc/TRexDataAnalysis.py b/doc/TRexDataAnalysis.py
index fb855a16..ed674262 100755
--- a/doc/TRexDataAnalysis.py
+++ b/doc/TRexDataAnalysis.py
@@ -2,34 +2,46 @@
import pandas as pd
import numpy as np
import matplotlib
+
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import os
+import time
-def generate_dframe_for_test(test_name, test_data):
+def generate_dframe_for_test(setup_name, test_name, test_data):
test_results = []
+ test_dates = []
+ test_build_ids = []
test_mins = set()
test_maxs = set()
for query in test_data:
- test_results.append(float(query[3]))
- test_mins.add(float(query[4]))
- test_maxs.add(float(query[5]))
+ test_results.append(float(query[5]))
+ date_formatted = time.strftime("%d-%m-%Y", time.strptime(query[2], "%Y%m%d"))
+ time_of_res = date_formatted + '-' + query[3] + ':' + query[4]
+ test_dates.append(time_of_res)
+ test_build_ids.append(query[8])
+ test_mins.add(float(query[6]))
+ test_maxs.add(float(query[7]))
df = pd.DataFrame({test_name: test_results})
+ df_detailed = pd.DataFrame({(test_name + ' Results'): test_results, (test_name + ' Date'): test_dates,
+ "Setup": ([setup_name] * len(test_results)), "Build Id": test_build_ids})
stats = tuple([float(df.mean()), min(test_mins), max(test_maxs)]) # stats = (avg_mpps,min,max)
- return df, stats
+ return df, stats, df_detailed
def generate_dframe_arr_and_stats_of_tests_per_setup(date, setup_name, setup_dict):
dframe_arr_trend = []
stats_arr = []
dframe_arr_latest = []
+ dframe_arr_detailed = []
test_names = setup_dict.keys()
for test in test_names:
- df, stats = generate_dframe_for_test(test, setup_dict[test])
+ df, stats, df_detailed = generate_dframe_for_test(setup_name, test, setup_dict[test])
+ dframe_arr_detailed.append(df_detailed)
dframe_arr_trend.append(df)
stats_arr.append(stats)
- df_latest = float(setup_dict[test][-1][3])
+ df_latest = float(setup_dict[test][-1][6])
dframe_arr_latest.append(df_latest)
dframe_arr_latest = pd.DataFrame({'Date': [date] * len(dframe_arr_latest),
'Setup': [setup_name],
@@ -38,7 +50,7 @@ def generate_dframe_arr_and_stats_of_tests_per_setup(date, setup_name, setup_dic
index=range(1, len(dframe_arr_latest) + 1))
stats_df = pd.DataFrame(stats_arr, index=setup_dict.keys(), columns=['Avg MPPS', 'Golden Min', 'Golden Max'])
stats_df.index.name = 'Test Name'
- return dframe_arr_trend, stats_df, dframe_arr_latest
+ return dframe_arr_trend, stats_df, dframe_arr_latest, dframe_arr_detailed
def create_plot_for_dframe_arr(dframe_arr, setup_name, start_date, end_date, show='no', save_path='',
@@ -78,20 +90,43 @@ def create_bar_plot_for_latest_runs_per_setup(dframe_all_tests_latest, setup_nam
plt.show()
-def create_all_data_per_setup(setup_dict, setup_name, start_date, end_date, show='no', save_path='', add_stats=''):
- dframe_arr, stats_arr, dframe_latest_arr = generate_dframe_arr_and_stats_of_tests_per_setup(end_date, setup_name,
- setup_dict)
+def create_all_data_per_setup(setup_dict, setup_name, start_date, end_date, show='no', save_path='', add_stats='',
+ detailed_test_stats=''):
+ dframe_arr, stats_arr, dframe_latest_arr, dframe_detailed = generate_dframe_arr_and_stats_of_tests_per_setup(
+ end_date, setup_name,
+ setup_dict)
+ if detailed_test_stats:
+ detailed_table = create_detailed_table(dframe_detailed, setup_name, save_path)
+ else:
+ detailed_table = []
create_bar_plot_for_latest_runs_per_setup(dframe_latest_arr, setup_name, show=show, save_path=save_path)
create_plot_for_dframe_arr(dframe_arr, setup_name, start_date, end_date, show, save_path)
if add_stats:
stats_arr = stats_arr.round(2)
stats_arr.to_csv(os.path.join(save_path, setup_name + '_trend_stats.csv'))
plt.close('all')
+ return detailed_table
+
+
+def create_detailed_table(dframe_arr_detailed, setup_name, save_path=''):
+ result = reduce(lambda x, y: pd.merge(x, y, on=('Build Id', 'Setup')), dframe_arr_detailed)
+ return result
-def create_all_data(ga_data, setup_names, start_date, end_date, save_path='', add_stats=''):
+# WARNING: if the file _all_stats.csv already exists, this script deletes it, to prevent overflowing of data
+# since data is appended to the file
+def create_all_data(ga_data, setup_names, start_date, end_date, save_path='', add_stats='', detailed_test_stats=''):
+ total_detailed_data = []
+ if detailed_test_stats:
+ if os.path.exists(os.path.join(save_path, '_detailed_table.csv')):
+ os.remove(os.path.join(save_path, '_detailed_table.csv'))
for setup_name in setup_names:
if setup_name == 'trex11':
continue
- create_all_data_per_setup(ga_data[setup_name], setup_name, start_date, end_date, show='no', save_path=save_path,
- add_stats=add_stats)
+ detailed_setup_data = create_all_data_per_setup(ga_data[setup_name], setup_name, start_date, end_date,
+ show='no', save_path=save_path,
+ add_stats=add_stats, detailed_test_stats=detailed_test_stats)
+ total_detailed_data.append(detailed_setup_data)
+ if detailed_test_stats:
+ total_detailed_dframe = pd.DataFrame().append(total_detailed_data)
+ total_detailed_dframe.to_csv(os.path.join(save_path, '_detailed_table.csv'))
diff --git a/doc/trex_analytics.asciidoc b/doc/trex_analytics.asciidoc
index 35c3a3e4..5f4cbfe0 100755
--- a/doc/trex_analytics.asciidoc
+++ b/doc/trex_analytics.asciidoc
@@ -26,11 +26,9 @@ endif::backend-xhtml11[]
.Setup Details
[options='header',halign='center',cols="1,5"]
|=================
-| Server: | UCSC-C240-M4SX
| CPU: | 2 x Intel(R) Xeon(R) CPU E5-2667 v3 @ 3.20GHz
-| RAM: | 65536 @ 2133 MHz
-| NICs: | 2 x Intel Corporation Ethernet Controller X710
-| OS: | Fedora 18
+| NICs: | 1 x 100G (2 interfaces) Mellanox ConnectX-4
+| OS: | CentOS 7
|=================
image:images/trex07_latest_test_runs.png[title="trex07 test runs",align="left",width={p_width}, link="images/trex07_latest_test_runs.png"]
@@ -52,10 +50,8 @@ include::build/images/trex07_trend_stats.csv[]
.Setup Details
[options='header',halign='center',cols="1,5"]
|=================
-| Server: | UCSC-C240-M4SX
| CPU: | 2 x Intel(R) Xeon(R) CPU E5-2667 v3 @ 3.20GHz
-| RAM: | 65536 @ 2133 MHz
-| NICs: | 2 x Intel Corporation Ethernet Controller X710
+| NICs: | 2 x 40G (4 interfaces) Intel XL710
| OS: | Fedora 18
|=================
@@ -79,9 +75,7 @@ include::build/images/trex08_trend_stats.csv[]
.Setup Details
[options='header',halign='center',cols="1,5"]
|=================
-| Server: | UCSC-C240-M4SX
| CPU: | 2 x Intel(R) Xeon(R) CPU E5-2667 v3 @ 3.20GHz
-| RAM: | 65536 @ 2133 MHz
| NICs: | 2x10G (X710) (8 interfaces)
| OS: | Fedora 18
|=================
@@ -105,11 +99,12 @@ include::build/images/trex09_trend_stats.csv[]
//////////////////////////////////////////////////////////
== Setup: TRex11
.Setup Details
-[options='header',halign='center']
-|====================================================================================================================
-|Name |OS |NICs |Routers
-| trex11 | Fedora 18| 2x10G (X710) (8 interfaces), 1x10G (2 interfaces), 1x1G (4 interfaces) | Loopback on X710 + ASA 5520 + ASA 5512 + ASA 5585-ssp10
-|====================================================================================================================
+[options='header',halign='center',cols="1,5"]
+|=================
+| CPU: | 2 x Intel(R) Xeon(R) CPU E5-2667 v3 @ 3.20GHz
+| NICs: | 1x40G (2 interfaces) Cisco VIC
+| OS: | Ubuntu 14
+|=================
image:images/trex11_latest_test_runs.png[title="trex11 test runs",align="left",width={p_width}, link="images/trex11_latest_test_runs.png"]
@@ -132,8 +127,7 @@ include::build/images/trex11_trend_stats.csv[]
[options='header',halign='center',cols="1,5"]
|=================
| CPU: | 2 x Intel(R) Xeon(R) CPU E5-2650 0 @ 2.00GHz
-| RAM: | 31 Gib
-| NICs: | 2x10G (X710) (4 interfaces)
+| NICs: | 2x10G (4 interfaces) Intel 82599EB
| OS: | Fedora 18
|=================
diff --git a/doc/ws_main.py b/doc/ws_main.py
index 3fdc2be1..54975d02 100755
--- a/doc/ws_main.py
+++ b/doc/ws_main.py
@@ -251,6 +251,7 @@ def scansize(self):
def options(opt):
opt.add_option('--exe', action='store_true', default=False, help='Execute the program after it is compiled')
opt.add_option('--performance', action='store_true', help='Build a performance report based on google analytics')
+ opt.add_option('--performance-detailed',action='store_true',help='print detailed test results (date,time, build id and results) to csv file named _detailed_table.csv.')
def configure(conf):
search_path = '~/.local/bin /usr/local/bin/ /usr/bin'
@@ -891,7 +892,10 @@ def build_cp(bld,dir,root,callback):
def create_analytic_report(task):
try:
import AnalyticsWebReport as analytics
- analytics.main(verbose = Logs.verbose)
+ if task.generator.bld.options.performance_detailed:
+ analytics.main(verbose = Logs.verbose,detailed_test_stats='yes')
+ else:
+ analytics.main(verbose = Logs.verbose)
except Exception as e:
raise Exception('Error importing or using AnalyticsWebReport script: %s' % e)
@@ -921,7 +925,7 @@ def build(bld):
bld(rule=my_copy, target=x)
bld.add_group()
- if bld.options.performance:
+ if bld.options.performance or bld.options.performance_detailed:
bld(rule=create_analytic_report)
bld.add_group()
bld(rule=convert_to_html_toc_book, source='trex_analytics.asciidoc waf.css', target='trex_analytics.html',scan=ascii_doc_scan);