summaryrefslogtreecommitdiffstats
path: root/doc
diff options
context:
space:
mode:
Diffstat (limited to 'doc')
-rwxr-xr-xdoc/AnalyticsConnect.py182
-rwxr-xr-xdoc/AnalyticsWebReport.py18
-rwxr-xr-xdoc/TRexDataAnalysis.py136
-rwxr-xr-xdoc/trex_analytics.asciidoc75
-rwxr-xr-xdoc/ws_main.py16
5 files changed, 424 insertions, 3 deletions
diff --git a/doc/AnalyticsConnect.py b/doc/AnalyticsConnect.py
new file mode 100755
index 00000000..2b603ab1
--- /dev/null
+++ b/doc/AnalyticsConnect.py
@@ -0,0 +1,182 @@
+"""Hello Analytics Reporting API V4."""
+
+import argparse
+
+from apiclient.discovery import build
+from oauth2client.service_account import ServiceAccountCredentials
+
+import httplib2
+from oauth2client import client
+from oauth2client import file
+from oauth2client import tools
+
+from pprint import pprint
+import time
+
+SCOPES = ['https://www.googleapis.com/auth/analytics.readonly']
+DISCOVERY_URI = ('https://analyticsreporting.googleapis.com/$discovery/rest')
+KEY_FILE_LOCATION = '/auto/srg-sce-swinfra-usr/emb/users/itraviv/GoogleAnalytics/GA_ReportingAPI/My Project-da37fc42de8f.p12'
+SERVICE_ACCOUNT_EMAIL = 'trex-cisco@i-jet-145907.iam.gserviceaccount.com'
+VIEW_ID = '120207451'
+
+
+def initialize_analyticsreporting():
+ """Initializes an analyticsreporting service object.
+
+ Returns:
+ analytics an authorized analyticsreporting service object.
+ """
+
+ credentials = ServiceAccountCredentials.from_p12_keyfile(
+ SERVICE_ACCOUNT_EMAIL, KEY_FILE_LOCATION, scopes=SCOPES)
+
+ http = credentials.authorize(httplib2.Http())
+
+ # Build the service object.
+ analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)
+
+ return analytics
+
+
+def get_report(analytics,start_date='2016-11-06',end_date='2016-11-13'):
+ # Use the Analytics Service Object to query the Analytics Reporting API V4.
+ return analytics.reports().batchGet(
+ body={
+ 'reportRequests': [
+ {
+ 'viewId': VIEW_ID,
+ 'dateRanges': [{'startDate': start_date, 'endDate': end_date}],
+ 'metrics': [{'expression': 'ga:metric1','formattingType':'CURRENCY'},
+ {'expression': 'ga:metric2','formattingType':'CURRENCY'},
+ {'expression': 'ga:metric3','formattingType':'CURRENCY'},
+ {'expression': 'ga:totalEvents'}],
+ 'dimensions': [{"name":"ga:eventAction"},{"name": "ga:dimension1"},{"name": "ga:dimension2"},{"name": "ga:dimension3"},{"name": "ga:dimension4"}]
+ }
+ ]
+ }
+ ).execute()
+
+
+def print_response(response):
+ """Parses and prints the Analytics Reporting API V4 response"""
+
+ for report in response.get('reports', []):
+ columnHeader = report.get('columnHeader', {})
+ dimensionHeaders = columnHeader.get('dimensions', [])
+ metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])
+ rows = report.get('data', {}).get('rows', [])
+
+ for row in rows:
+ dimensions = row.get('dimensions', [])
+ dateRangeValues = row.get('metrics', [])
+
+ for header, dimension in zip(dimensionHeaders, dimensions):
+ print header + ': ' + dimension
+
+ for i, values in enumerate(dateRangeValues):
+ print 'Date range (' + str(i) + ')'
+ for metricHeader, value in zip(metricHeaders, values.get('values')):
+ print metricHeader.get('name') + ': ' + value
+
+
+def export_to_dict(response):
+ df = {'Test_name':[],'State':[],'Setup':[],'Test_type':[],'MPPS':[],'MPPS-Golden min':[],'MPPS-Golden max':[]}
+ for report in response.get('reports', []):
+ rows = report.get('data', {}).get('rows', [])
+ for row in rows:
+ dimensions = row.get('dimensions', [])
+ # print 'this is dimensions'
+ # print dimensions
+ df['Test_name'].append(dimensions[1])
+ df['State'].append(dimensions[2])
+ df['Setup'].append(dimensions[3])
+ df['Test_type'].append(dimensions[4])
+ dateRangeValues = row.get('metrics', [])
+ value = dateRangeValues[0].get('values',[])[0]
+ golden_min = dateRangeValues[0].get('values',[])[1]
+ golden_max = dateRangeValues[0].get('values',[])[2]
+ # print value
+ df['MPPS'].append(value)
+ df['MPPS-Golden min'].append(golden_min)
+ df['MPPS-Golden max'].append(golden_max)
+ return df
+
+
+def export_to_tuples(response):
+ setups = set()
+ df = {}
+ for report in response.get('reports', []):
+ rows = report.get('data', {}).get('rows', [])
+ for row in rows:
+ data = []
+ dimensions = row.get('dimensions', [])
+ # print 'this is dimensions'
+ # print dimensions
+ data.append(dimensions[1]) #test name
+ data.append(dimensions[2]) # state
+ # data.append(dimensions[3]) # setup
+ data.append(dimensions[4]) # test_type
+ dateRangeValues = row.get('metrics', [])
+ value = dateRangeValues[0].get('values',[])[0] #MPPS
+ golden_min = dateRangeValues[0].get('values',[])[1] #golden min
+ golden_max = dateRangeValues[0].get('values',[])[2] #golden max
+ data.append(value)
+ data.append(golden_min)
+ data.append(golden_max)
+ if dimensions[3] in setups:
+ if dimensions[1] in df[dimensions[3]]:
+ df[dimensions[3]][dimensions[1]].append(tuple(data))
+ else:
+ df[dimensions[3]][dimensions[1]] = [tuple(data)]
+ else:
+ df[dimensions[3]] = {}
+ df[dimensions[3]][dimensions[1]] = [tuple(data)]
+ setups.add(dimensions[3])
+ return df, setups
+
+
+def main():
+ analytics = initialize_analyticsreporting()
+ response = get_report(analytics)
+ print_response(response)
+ g_dict = export_to_dict(response)
+ print g_dict
+ pprint(g_dict)
+
+ #pprint(response)
+if __name__ == '__main__':
+ main()
+
+"""
+ response = {u'reports': [{u'columnHeader': {u'dimensions': [u'ga:dimension1',
+ u'ga:dimension2',
+ u'ga:dimension3',
+ u'ga:dimension4'],
+ u'metricHeader': {u'metricHeaderEntries': [{u'name': u'ga:metric1',
+ u'type': u'CURRENCY'}]}},
+ u'data': {u'isDataGolden': True,
+ u'maximums': [{u'values': [u'8532.0']}],
+ u'minimums': [{u'values': [u'2133.0']}],
+ u'rowCount': 4,
+ u'rows': [{u'dimensions': [u'test_name_to_date_9-10-161',
+ u'State_Less',
+ u'Setup_Name1',
+ u'Test_Type'],
+ u'metrics': [{u'values': [u'2133.0']}]},
+ {u'dimensions': [u'test_name_to_date_9-10-162',
+ u'State_Less',
+ u'Setup_Name2',
+ u'Test_Type'],
+ u'metrics': [{u'values': [u'4266.0']}]},
+ {u'dimensions': [u'test_name_to_date_9-10-163',
+ u'State_Less',
+ u'Setup_Name3',
+ u'Test_Type'],
+ u'metrics': [{u'values': [u'6399.0']}]},
+ {u'dimensions': [u'test_name_to_date_9-10-164',
+ u'State_Less',
+ u'Setup_Name4',
+ u'Test_Type'],
+ u'metrics': [{u'values': [u'8532.0']}]}],
+ u'totals': [{u'values': [u'21330.0']}]}}]}
+ """ \ No newline at end of file
diff --git a/doc/AnalyticsWebReport.py b/doc/AnalyticsWebReport.py
new file mode 100755
index 00000000..9bf186bd
--- /dev/null
+++ b/doc/AnalyticsWebReport.py
@@ -0,0 +1,18 @@
+import os
+import sys
+import AnalyticsConnect as ac
+import TRexDataAnalysis as tr
+import time
+
+
+def main():
+ analytics = ac.initialize_analyticsreporting()
+ # print 'retrieving data from Google Analytics'
+ current_date = time.strftime("%Y-%m-%d")
+ response = ac.get_report(analytics, '2016-11-06', current_date)
+ ga_all_data_dict, setups = ac.export_to_tuples(response)
+ tr.create_all_data(ga_all_data_dict, setups, '2016-11-06', current_date, save_path=os.getcwd() + '/images/',
+ add_stats='yes')
+
+if __name__ == "__main__":
+ main()
diff --git a/doc/TRexDataAnalysis.py b/doc/TRexDataAnalysis.py
new file mode 100755
index 00000000..3561b0f1
--- /dev/null
+++ b/doc/TRexDataAnalysis.py
@@ -0,0 +1,136 @@
+#!/scratch/Anaconda2.4.0/bin/python
+import pandas as pd
+import numpy as np
+import matplotlib
+matplotlib.use('Agg')
+from matplotlib import pyplot as plt
+import os
+
+PATH_FOR_GRAPHS = 'Z:/trex/trex-doc/images/'
+
+
+def convert_dict_to_dframe(data, categories, index=''):
+ data_input = {}
+ for category in categories:
+ data_input[category] = data[category]
+ if index:
+ df = pd.DataFrame(data_input, index=data[index])
+ else:
+ df = pd.DataFrame(data_input)
+ return df
+
+
+def plot_bar_by_category(data_frame, category, index='', graph_name='graph.png', show='', gtitle='', save_path=''):
+ if index:
+ data_frame = data_frame.sort_index(by=index)
+ print data_frame[index]
+ else:
+ print data_frame
+ data_frame = pd.DataFrame(data_frame[category], columns=category).astype(float)
+ data_frame.plot(kind='bar')
+ plt.xticks(rotation='horizontal')
+ plt.title(gtitle)
+ if save_path:
+ plt.savefig(save_path + graph_name)
+ if show:
+ plt.show()
+
+
+def generate_csv(data_frame, file_name, save_path=(os.getcwd() + "/")):
+ f = open(save_path + file_name, 'w')
+ data_frame.to_csv(f)
+ f.close()
+
+
+# category is an array of category names that will appear as metrics
+def plot_bar_by_test_name(data_frame, test_name, category, graph_name='graph.png', show='', gtitle='', save_path=''):
+ data_frame = data_frame[data_frame['Test_name'] == test_name]
+ plot_bar_by_category(data_frame, category, 'Test_name', graph_name, show, gtitle=test_name, save_path=save_path)
+
+
+def generate_dframe_for_test(test_name, test_data):
+ test_results = []
+ test_mins = set()
+ test_maxs = set()
+ for query in test_data:
+ test_results.append(float(query[3]))
+ test_mins.add(float(query[4]))
+ test_maxs.add(float(query[5]))
+ df = pd.DataFrame({test_name: test_results})
+ stats = tuple([float(df.mean()), min(test_mins), max(test_maxs)]) # stats = (avg_mpps,min,max)
+ return df, stats
+
+
+def generate_dframe_arr_and_stats_of_tests_per_setup(date, setup_name, setup_dict):
+ dframe_arr_trend = []
+ stats_arr = []
+ dframe_arr_latest = []
+ test_names = setup_dict.keys()
+ for test in test_names:
+ df, stats = generate_dframe_for_test(test, setup_dict[test])
+ dframe_arr_trend.append(df)
+ stats_arr.append(stats)
+ df_latest = float(setup_dict[test][-1][3])
+ dframe_arr_latest.append(df_latest)
+ dframe_arr_latest = pd.DataFrame({'Date': [date] * len(dframe_arr_latest),
+ 'Setup': [setup_name],
+ 'Test Name': test_names,
+ 'MPPS': dframe_arr_latest},
+ index=range(1, len(dframe_arr_latest) + 1))
+ stats_df = pd.DataFrame(stats_arr, index=setup_dict.keys(), columns=['Avg MPPS', 'Golden Min', 'Golden Max'])
+ stats_df.index.name = 'Test Name'
+ return dframe_arr_trend, stats_df, dframe_arr_latest
+
+
+def create_plot_for_dframe_arr(dframe_arr, setup_name, start_date, end_date, show='no', save_path='',
+ file_name='trend_graph'):
+ dframe_all = pd.concat(dframe_arr, axis=1)
+ dframe_all = dframe_all.astype(float)
+ dframe_all.plot()
+ plt.legend(fontsize='small', loc='best')
+ plt.ylabel('MPPS')
+ plt.title('Setup: ' + setup_name)
+ plt.tick_params(
+ axis='x',
+ which='both',
+ bottom='off',
+ top='off',
+ labelbottom='off')
+ plt.xlabel('Time Period: ' + start_date + ' - ' + end_date)
+ if save_path:
+ plt.savefig(save_path + setup_name + file_name + '.png')
+ if show == 'yes':
+ plt.show()
+
+
+def create_bar_plot_for_latest_runs_per_setup(dframe_all_tests_latest, setup_name, show='no', save_path=''):
+ plt.figure()
+ dframe_all_tests_latest['MPPS'].plot(kind='bar', legend=False)
+ dframe_all_tests_latest = dframe_all_tests_latest[['Test Name', 'Setup', 'Date', 'MPPS']]
+ plt.xticks(rotation='horizontal')
+ plt.xlabel('Index of Tests')
+ plt.ylabel('MPPS')
+ plt.title("Test Runs for Setup: " + setup_name)
+ if save_path:
+ plt.savefig(save_path + setup_name + '_latest_test_runs.png')
+ dframe_all_tests_latest = dframe_all_tests_latest.round(2)
+ dframe_all_tests_latest.to_csv(save_path + setup_name + '_latest_test_runs_stats.csv')
+ if show == 'yes':
+ plt.show()
+
+
+def create_all_data_per_setup(setup_dict, setup_name, start_date, end_date, show='no', save_path='', add_stats=''):
+ dframe_arr, stats_arr, dframe_latest_arr = generate_dframe_arr_and_stats_of_tests_per_setup(end_date, setup_name,
+ setup_dict)
+ create_bar_plot_for_latest_runs_per_setup(dframe_latest_arr, setup_name, show=show, save_path=save_path)
+ create_plot_for_dframe_arr(dframe_arr, setup_name, start_date, end_date, show, save_path)
+ if add_stats:
+ stats_arr = stats_arr.round(2)
+ stats_arr.to_csv(save_path + setup_name + '_trend_stats.csv')
+ plt.close('all')
+
+
+def create_all_data(ga_data, setup_names, start_date, end_date, save_path='', add_stats=''):
+ for setup_name in setup_names:
+ create_all_data_per_setup(ga_data[setup_name], setup_name, start_date, end_date, show='no', save_path=save_path,
+ add_stats=add_stats)
diff --git a/doc/trex_analytics.asciidoc b/doc/trex_analytics.asciidoc
new file mode 100755
index 00000000..84946dfe
--- /dev/null
+++ b/doc/trex_analytics.asciidoc
@@ -0,0 +1,75 @@
+TRex Analytics Report
+======================
+:email: trex.tgen@gmail.com
+:quotes.++:
+:numbered:
+:web_server_url: https://trex-tgn.cisco.com/trex
+:local_web_server_url: csi-wiki-01:8181/trex
+:toclevels: 6
+:tabledef-default.subs: normal,callouts
+
+include::trex_ga.asciidoc[]
+
+// PDF version - image width variable
+ifdef::backend-docbook[]
+:p_width: 450
+endif::backend-docbook[]
+
+// HTML version - image width variable
+ifdef::backend-xhtml11[]
+:p_width: 800
+endif::backend-xhtml11[]
+
+
+= Analytics
+
+== Setup: TRex09
+.Setup Details
+[options='header',halign='center']
+|====================================================================================================================
+|Name |OS |NICs |Routers
+| trex09 | Fedora 18| 2x10G (X710) (8 interfaces), 1x10G (2 interfaces), 1x1G (4 interfaces) | Loopback on X710 + ASA 5520 + ASA 5512 + ASA 5585-ssp10
+|====================================================================================================================
+
+image:images/trex09_latest_test_runs.png[title="trex09 test runs",align="left",width={p_width}, link="images/trex09_latest_test_runs.png"]
+
+[format="csv", options="header",halign='center']
+|===
+include::images/trex09_latest_test_runs_stats.csv[]
+|===
+
+=== Trend: Analysis Over Time
+
+image:images/trex09trend_graph.png[title="trex09trend_graph",align="left",width={p_width}, link="images/trex09trend_graph.png"]
+
+[format="csv", options="header",halign='center']
+|===
+include::images/trex09_trend_stats.csv[]
+|===
+
+== Setup: Kiwi02
+.Setup Details
+[options='header',halign='center']
+|================================================================================================
+|Name |OS |NICs |Routers
+|kiwi02| Fedora 18| 2x10G (4 interfaces)| ESP100
+|================================================================================================
+image:images/kiwi02_latest_test_runs.png[title="trex09 test runs",align="left",width={p_width}, link="images/kiwi02_latest_test_runs.png"]
+
+[format="csv", options="header",halign='center']
+|===
+include::images/kiwi02_latest_test_runs_stats.csv[]
+|===
+
+=== Trend: Analysis Over Time
+
+image:images/kiwi02trend_graph.png[title="kiwi02trend_graph",align="left",width={p_width}, link="images/kiwi02trend_graph.png"]
+
+[format="csv", options="header",halign='center']
+|===
+include::images/kiwi02_trend_stats.csv[]
+|===
+
+
+
+
diff --git a/doc/ws_main.py b/doc/ws_main.py
index 237ea743..9d2882c8 100755
--- a/doc/ws_main.py
+++ b/doc/ws_main.py
@@ -250,6 +250,7 @@ def scansize(self):
def options(opt):
opt.add_option('--exe', action='store_true', default=False, help='Execute the program after it is compiled')
+ opt.add_option('--performance', action='store_true', help='Build a performance report based on google analytics')
def configure(conf):
search_path = '~/.local/bin /usr/local/bin/ /usr/bin'
@@ -887,8 +888,12 @@ def build_cp(bld,dir,root,callback):
-
-
+def create_analytic_report(task):
+ try:
+ import AnalyticsWebReport as analytics
+ analytics.main()
+ except:
+ raise Exception('Error importing or using AnalyticsWebReport script')
@@ -916,6 +921,11 @@ def build(bld):
bld(rule=my_copy, target=x)
bld.add_group()
+ if bld.options.performance:
+ bld(rule=create_analytic_report)
+ bld(rule=convert_to_html_toc_book, source='trex_analytics.asciidoc waf.css', target='trex_analytics.html',scan=ascii_doc_scan);
+ return
+
bld(rule=my_copy, target='my_chart.js')
build_cp(bld,'hlt_args.asciidoc','stl/trex_stl_lib', parse_hlt_args)
@@ -973,7 +983,7 @@ def build(bld):
bld(rule=convert_to_html_toc_book,
source='trex_rpc_server_spec.asciidoc waf.css', target='trex_rpc_server_spec.html',scan=ascii_doc_scan);
-
+
bld(rule=convert_to_html_toc_book,
source='trex_scapy_rpc_server.asciidoc waf.css', target='trex_scapy_rpc_server.html',scan=ascii_doc_scan);