summaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rwxr-xr-xscripts/automation/regression/functional_tests/scapy_server_test.py179
-rwxr-xr-xscripts/automation/trex_control_plane/stl/examples/scapy_server.py272
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py179
3 files changed, 572 insertions, 58 deletions
diff --git a/scripts/automation/regression/functional_tests/scapy_server_test.py b/scripts/automation/regression/functional_tests/scapy_server_test.py
new file mode 100755
index 00000000..1c6d2bd0
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/scapy_server_test.py
@@ -0,0 +1,179 @@
+# scapy server unit test
+
+import sys,os
+scapy_server_path = os.path.abspath(os.path.join(os.pardir, 'trex_control_plane', 'stl', 'examples'))
+print scapy_server_path
+stl_pathname = os.path.abspath(os.path.join(os.pardir, os.pardir, 'trex_control_plane','stl'))
+sys.path.append(scapy_server_path)
+sys.path.append(stl_pathname)
+
+
+
+#import stl_path
+import trex_stl_lib
+from trex_stl_lib.api import *
+from copy import deepcopy
+
+import tempfile
+import md5
+
+import outer_packages
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+from nose.tools import nottest
+from nose.plugins.attrib import attr
+
+from scapy_server import *
+
+
+class scapy_server_tester(functional_general_test.CGeneralFunctional_Test):
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ '''
+ test for db and field update - checking check_update_test()
+ '''
+ def test_check_update(self):
+ allData = get_all()
+ allDataParsed = json.loads(allData)
+ dbMD5 = allDataParsed['DB_md5']
+ fieldMD5 = allDataParsed['fields_md5']
+ result = check_update(dbMD5,fieldMD5)
+ result = json.loads(result)
+ '''
+ if result[0][0] == 'Success' and result[1][0] == 'Success':
+ print 'check_update_test [md5 comparison test]: Success'
+ else:
+ print 'check_update_test [md5 comparison test]: md5s of fields or db do not match the source'
+ '''
+ resT1 = (result[0][0] == 'Success' and result[1][0] == 'Success')
+ assert_equal(resT1,True)
+
+ result = check_update(json.dumps('falseMD5'),json.dumps('falseMD5'))
+ result = json.loads(result)
+ '''
+ if result[0][0] == 'Fail' and result[1][0] == 'Fail':
+ print 'check_update_test [wrong md5s return failure]: Success'
+ else:
+ print 'check_update_test [wrong md5s return failure]: md5s of fields or db return Success for invalid value'
+ '''
+ resT2 = (result[0][0] == 'Fail' and result[1][0] == 'Fail')
+ assert_equal(resT2,True)
+
+ result = check_update(dbMD5,json.dumps('falseMD5'))
+ result = json.loads(result)
+ '''
+ if result[0][0] == 'Fail' and result[1][0] == 'Success':
+ print 'check_update_test [wrong field md5 returns error, correct db md5]: Success'
+ else:
+ print 'md5 of field return Success for invalid value'
+ '''
+ resT3 = (result[0][0] == 'Fail' and result[1][0] == 'Success')
+ assert_equal(resT3,True)
+
+ result = check_update(json.dumps('falseMD5'),fieldMD5)
+ result = json.loads(result)
+ '''
+ if result[0][0] == 'Success' and result[1][0] == 'Fail':
+ print 'check_update_test [wrong db md5 returns error, correct field md5]: Success'
+ else:
+ print 'md5 of db return Success for invalid value'
+ '''
+ resT4 = (result[0][0] == 'Success' and result[1][0] == 'Fail')
+ assert_equal(resT4,True)
+
+
+ def test_check_updating_db(self):
+ #assume i got old db
+ result = check_update(json.dumps('falseMD5'),json.dumps('falseMD5'))
+ result = json.loads(result)
+ if result[0][0] == 'Fail' or result[1][0] == 'Fail':
+ newAllData = get_all()
+ allDataParsed = json.loads(newAllData)
+ dbMD5 = allDataParsed['DB_md5']
+ fieldMD5 = allDataParsed['fields_md5']
+ result = check_update(dbMD5,fieldMD5)
+ result = json.loads(result)
+ '''
+ if result[0][0] == 'Success' and result[1][0] == 'Success':
+ print 'check_updating_db [got old db and updated it]: Success'
+ else:
+ print'check_updating_db [got old db and updated it]: FAILED'
+ '''
+ resT1 = (result[0][0] == 'Success' and result[1][0] == 'Success')
+ assert_equal(resT1,True)
+ else:
+ raise Exception("scapy_server_test: check_updating_db failed")
+
+
+# testing pkt = Ether()/IP()/TCP()/"test" by defualt
+ def test_build_packet(self,original_pkt = json.dumps('Ether()/IP()/TCP()/"test"')):
+ test_pkt = original_pkt
+ original_pkt = eval(json.loads(original_pkt))
+ test_res = build_pkt(test_pkt)
+ test_res = json.loads(test_res)
+ test_pkt_buffer = json.loads(test_res[2])
+ test_pkt_buffer = test_pkt_buffer.decode('base64')
+ '''
+ if test_pkt_buffer == str(original_pkt):
+ print 'build_pkt test [scapy packet and string-defined packet comparison]: Success'
+ else:
+ print 'build_pkt test [scapy packet and string-defined packet comparison]: FAILED'
+ '''
+ resT1 = (test_pkt_buffer == str(original_pkt))
+ assert_equal(resT1,True)
+
+
+#testing offsets of packet IP() by default
+ def test_get_all_offsets(self,original_pkt = json.dumps('IP()')):
+ test_pkt = original_pkt
+ original_pkt = eval(json.loads(original_pkt))
+ tested_offsets_by_layers = get_all_pkt_offsets(test_pkt)
+ tested_offsets_by_layers = json.loads(tested_offsets_by_layers)
+ layers = json.loads(test_pkt).split('/')
+ offsets_by_layers = {}
+ for layer in layers:
+ fields_list = []
+ for f in original_pkt.fields_desc:
+ size = f.get_size_bytes()
+ if f.name is 'load':
+ size = len(original_pkt)
+ fields_list.append([f.name, f.offset, size])
+ original_pkt = original_pkt.payload
+ offsets_by_layers[layer] = fields_list
+ '''
+ if tested_offsets_by_layers == offsets_by_layers:
+ print 'Success'
+ else:
+ print 'test_get_all_offsets[comparison of offsets in given packet]: FAILED'
+ '''
+ resT1 = (tested_offsets_by_layers == offsets_by_layers)
+ assert_equal(resT1,True)
+
+ def test_multi_packet(self):
+ e0 = json.dumps('Ether()')
+ e1 = json.dumps('Ether()/IP()')
+ e2 = json.dumps('TCP()')
+ e3 = json.dumps('UDP()')
+ e4 = json.dumps('Ether()/IP()/TCP()/"test"')
+ e5 = json.dumps('Ether()/IP()/UDP()')
+ packets = [e0,e1,e2,e3,e4,e5]
+
+ for packet in packets:
+ self.test_get_all_offsets(packet)
+
+ for packet in packets:
+ self.test_build_packet(packet)
+
+
+
+
+
+
+
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/scapy_server.py b/scripts/automation/trex_control_plane/stl/examples/scapy_server.py
new file mode 100755
index 00000000..4762f1a6
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/scapy_server.py
@@ -0,0 +1,272 @@
+import stl_path
+import trex_stl_lib
+from trex_stl_lib.api import *
+from copy import deepcopy
+import sys
+import tempfile
+import md5
+
+#print ls()
+
+from cStringIO import StringIO
+import sys
+"""
+old_stdout = sys.stdout
+sys.stdout = mystdout = StringIO()
+
+ls()
+
+sys.stdout = old_stdout
+
+a= mystdout.getvalue()
+
+f = open('scapy_supported_formats.txt','w')
+f.write(a)
+f.close()
+"""
+#-------------------------------------------------------TREE IMPLEMENTATION ------------------------
+class node(object):
+ def __init__(self, value):#, children = []):
+ self.value = value
+ self.children = []
+
+ def __str__(self, level=0):
+ ret = "\t"*level+repr(self.value)+"\n"
+ for child in self.children:
+ ret += child.__str__(level+1)
+ return ret
+#----------------------------------------------------------------------------------------------------
+
+
+def build_lib():
+ lib = protocol_struct()
+ lib = lib.split('\n')
+ all_protocols=[]
+ for entry in lib:
+ entry = entry.split(':')
+ all_protocols.append(entry[0].strip())
+ del all_protocols[len(all_protocols)-1]
+ return all_protocols
+
+def protocol_struct(protocol=''):
+ if '_' in protocol:
+ return []
+ if not protocol=='':
+ if protocol not in all_protocols:
+ return 'protocol not supported'
+ protocol = eval(protocol)
+ old_stdout = sys.stdout
+ sys.stdout = mystdout = StringIO()
+ if not protocol=='':
+ ls(protocol)
+ else:
+ ls()
+ sys.stdout = old_stdout
+ a= mystdout.getvalue()
+# print a
+ return a
+
+def parse_description_line(line):
+ line_arr = [x.strip() for x in re.split(': | = ',line)]
+ return tuple(line_arr)
+
+def parse_entire_description(d):
+ d = d.split('\n')
+ description_list = [parse_description_line(x) for x in d]
+ del description_list[len(description_list)-1]
+ return description_list
+
+def get_protocol_details(p_name):
+ protocol_str = protocol_struct(p_name)
+ if protocol_str=='protocol not supported':
+ return 'protocol not supported'
+ if len(protocol_str) is 0:
+ return []
+ tupled_protocol = parse_entire_description(protocol_str)
+ return tupled_protocol
+
+
+class scapyRegex:
+ def __init__(self,FieldName,regex='empty'):
+ self.FieldName = FieldName
+ self.regex = regex
+
+ def stringRegex(self):
+ return self.regex
+
+all_protocols = build_lib()
+
+Raw = {'Raw':''}
+high_level_protocols = ['Raw']
+transport_protocols = {'TCP':Raw,'UDP':Raw}
+network_protocols = {'IP':transport_protocols ,'ARP':''}
+low_level_protocols = { 'Ether': network_protocols }
+regexDB= {'MACField' : scapyRegex('MACField','^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$'),
+ 'IPField' : scapyRegex('IPField','^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$')}
+
+
+def build_nodes(data,dictionary):
+ n = node(data)
+ if len(dictionary)==0:
+ n.children=[]
+ return n
+ for obj in dictionary.keys():
+ if not (obj==''):
+ x = build_nodes(obj,dictionary[obj])
+ n.children.append(x)
+ return n
+
+def build_tree():
+ root = node('ALL')
+ root.children = []
+ root = build_nodes('ALL',low_level_protocols)
+ return root
+
+protocol_tree = build_tree()
+
+def print_tree(root,level=0):
+ output = "\t"*level+str(root.value)
+ print output
+ if len(root.children)==0:
+ return
+ for child in root.children:
+ print_tree(child,level+1)
+
+def get_all_protocols():
+ return json.dumps(all_protocols)
+
+def get_tree():
+ old_stdout = sys.stdout
+ sys.stdout = mystdout = StringIO()
+ print_tree(t)
+ sys.stdout = old_stdout
+ a= mystdout.getvalue()
+ return json.dumps(a)
+
+def get_all_db():
+ db = {}
+ for pro in all_protocols:
+ details = get_protocol_details(pro)
+ db[pro] = json.dumps(details)
+ return db
+
+def get_all_fields():
+ fields = []
+ for pro in all_protocols:
+ details = get_protocol_details(pro)
+ for i in range(0,len(details),1):
+ if len(details[i]) is 3:
+ fields.append(details[i][1])
+ uniqeFields = list(set(fields))
+ fieldDict = {}
+ for f in uniqeFields:
+ if f in regexDB:
+ fieldDict[f] = regexDB[f].stringRegex()
+ else:
+ fieldDict[f] = scapyRegex(f).stringRegex()
+ return fieldDict
+
+
+class pktResult:
+ def __init__(self,result,errorCode,errorDesc):
+ self.result = result
+ self.errorCode = errorCode
+ self.errorDesc = errorDesc
+
+ def convert2tuple(self):
+ return tuple([self.result,self.errorCode,self.errorDesc])
+
+
+
+# pkt_descriptor in JSON format only!
+# returned tuple in JSON format: (pktResultTuple , show2data, hexdump(buffer))
+# pktResultTuple is: ( result, errorCode, error description )
+
+def build_pkt(pkt_descriptor):
+ try:
+ pkt = eval(json.loads(pkt_descriptor))
+ old_stdout = sys.stdout
+ sys.stdout = mystdout = StringIO()
+ pkt.show2()
+ sys.stdout = old_stdout
+ show2data = mystdout.getvalue() #show2 data
+ bufferData = str(pkt) #pkt buffer
+ bufferData = bufferData.encode('base64')
+ pktRes = pktResult('Success',0,'None').convert2tuple()
+ res = [pktRes,json.dumps(show2data),json.dumps(bufferData)]
+ JSONres = json.dumps(res)
+ return JSONres
+ except:
+ pktRes = pktResult('Pkt build Failed',str(sys.exc_info()[0]),str(sys.exc_info()[1])).convert2tuple()
+ res = [pktRes,[],[]]
+ res = json.dumps(res)
+ return res
+
+#input: container
+#output: md5 in json format encoded in base64
+def getMD5(container):
+ resMD5 = md5.new(json.dumps(container))
+ resMD5 = json.dumps(resMD5.digest().encode('base64'))
+ return resMD5
+
+
+def get_all():
+ fields=get_all_fields()
+ db=get_all_db()
+ fieldMD5 = getMD5(fields)
+ dbMD5 = getMD5(db)
+ res = {}
+ res['db'] = db
+ res['fields'] = fields
+ res['DB_md5'] = dbMD5
+# print dbMD5
+ res['fields_md5'] = fieldMD5
+ return json.dumps(res)
+
+#input in json string encoded base64
+def check_update(dbMD5,fieldMD5):
+ fields=get_all_fields()
+ db=get_all_db()
+ currentDBMD5 = json.loads(getMD5(db))
+ currentFieldMD5 = json.loads(getMD5(fields))
+ dbMD5_parsed = json.loads(dbMD5)
+ fieldMD5_parsed = json.loads(fieldMD5)
+ res = []
+# print 'this is current DB MD5: %s ' % currentDBMD5
+# print 'this is given DB MD5: %s ' % dbMD5_parsed
+ if fieldMD5_parsed == currentFieldMD5:
+ resField = pktResult('Success',0,'None').convert2tuple()
+ else:
+ resField = pktResult('Fail',0,'Field DB is not up to date').convert2tuple()
+ if dbMD5_parsed == currentDBMD5:
+ resDB = pktResult('Success',0,'None').convert2tuple()
+ else:
+ resDB = pktResult('Fail',0,'Protocol DB is not up to date').convert2tuple()
+ return json.dumps([resField,resDB])
+
+#pkt_desc as json
+#dictionary of offsets per protocol. tuple for each field: (name, offset, size) at json format
+def get_all_pkt_offsets(pkt_desc):
+ pkt_desc= json.loads(pkt_desc)
+ pkt_protocols = pkt_desc.split('/')
+ scapy_pkt = eval(pkt_desc)
+ total_protocols = len(pkt_protocols)
+ res = {}
+ for i in range(total_protocols):
+ fields = []
+ for field in scapy_pkt.fields_desc:
+ size = field.get_size_bytes()
+ if field.name is 'load':
+ size = len(scapy_pkt)
+ fields.append([field.name, field.offset, size])
+ res[pkt_protocols[i]] = fields
+ scapy_pkt=scapy_pkt.payload
+ return json.dumps(res)
+
+
+
+def get_supported_cmds():
+
+
+
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py
index 164aae7a..3993ad5e 100755
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py
@@ -1,4 +1,3 @@
-#import requests # need external lib for that
try: # Python2
import Queue
from urllib2 import *
@@ -9,7 +8,7 @@ except: # Python3
import threading
import sys
from time import sleep
-
+from pprint import pprint
"""
GAObjClass is a class destined to send Google Analytics Information.
@@ -26,57 +25,88 @@ A maximum of 20 hits can be specified per request.
The total size of all hit payloads cannot be greater than 16K bytes.
No single hit payload can be greater than 8K bytes.
"""
-
-url_single = 'http://www.google-analytics.com/collect' #sending single event
-url_batched = 'http://www.google-analytics.com/batch' #sending batched events
-url_debug = 'http://www.google-analytics.com/debug/collect' #verifying hit is valid
+url_single = 'https://www.google-analytics.com/collect' #sending single event
+url_batched = 'https://www.google-analytics.com/batch' #sending batched events
+url_debug = 'https://www.google-analytics.com/debug/collect' #verifying hit is valid
url_conn = 'http://172.217.2.196' # testing internet connection to this address (google-analytics server)
+#..................................................................class GA_ObjClass................................................................
+class GA_ObjClass:
+ def __init__(self,cid,trackerID,appName,appVer):
+ self.cid = cid
+ self.trackerID = trackerID
+ self.appName = appName
+ self.appVer = appVer
+ self.payload = ''
+ self.payload = GA_ObjClass.generate_payload(self)
+ self.size = sys.getsizeof(self.payload)
+
+ def generate_payload(self):
+ self.payload+='v=1&t=event&tid='+str(self.trackerID)
+ self.payload+='&cid='+str(self.cid)
+ self.payload+='&an='+str(self.appName)
+ self.payload+='&av='+str(self.appVer)
+ return self.payload
+
#..................................................................class GA_EVENT_ObjClass................................................................
-class GA_EVENT_ObjClass:
+class GA_EVENT_ObjClass(GA_ObjClass):
def __init__(self,cid,trackerID,command,action,label,value,appName,appVer):
- self.cid = cid
- self.trackerID = trackerID
+ GA_ObjClass.__init__(self,cid,trackerID,appName,appVer)
self.command = command
self.action = action
self.label = label
self.value = value
- self.appName = appName
- self.appVer = appVer
- self.generate_payload()
+ self.payload = self.generate_payload()
self.size = sys.getsizeof(self.payload)
def generate_payload(self):
- self.payload ='v=1&t=event&tid='+str(self.trackerID)
- self.payload+='&cid='+str(self.cid)
self.payload+='&ec='+str(self.command)
self.payload+='&ea='+str(self.action)
self.payload+='&el='+str(self.label)
self.payload+='&ev='+str(self.value)
- self.payload+='&an='+str(self.appName)
- self.payload+='&av='+str(self.appVer)
+ return self.payload
#..................................................................class GA_EXCEPTION_ObjClass................................................................
#ExceptionFatal - BOOLEAN
-class GA_EXCEPTION_ObjClass:
+class GA_EXCEPTION_ObjClass(GA_ObjClass):
def __init__(self,cid,trackerID,ExceptionName,ExceptionFatal,appName,appVer):
- self.cid = cid
- self.trackerID = trackerID
+ GA_ObjClass.__init__(self,cid,trackerID,appName,appVer)
self.ExceptionName = ExceptionName
self.ExceptionFatal = ExceptionFatal
- self.appName = appName
- self.appVer = appVer
- self.generate_payload()
+ self.payload = self.generate_payload()
def generate_payload(self):
- self.payload ='v=1&t=exception&tid='+str(self.trackerID)
- self.payload+='&cid='+str(self.cid)
self.payload+='&exd='+str(self.ExceptionName)
self.payload+='&exf='+str(self.ExceptionFatal)
- self.payload+='&an='+str(self.appName)
- self.payload+='&av='+str(self.appVer)
+ return self.payload
+
+
+
+#..................................................................class GA_TESTING_ObjClass................................................................
+class GA_TESTING_ObjClass(GA_ObjClass):
+ def __init__(self,cid,uuid,trackerID,TRexMode,test_name,setup_name,appName,appVer,commitID,bandwidthPerCore,goldenBPC):
+ GA_ObjClass.__init__(self,cid,trackerID,appName,appVer)
+ self.uid = uuid
+ self.TRexMode = TRexMode
+ self.test_name = test_name
+ self.setup_name = setup_name
+ self.commitID = commitID
+ self.bandwidthPerCore = bandwidthPerCore
+ self.goldenBPC = goldenBPC
+ self.payload = self.generate_payload()
+ self.size = sys.getsizeof(self.payload)
+ def generate_payload(self):
+ self.payload+='&ec='+str(self.TRexMode)
+ self.payload+='&ea=RegressionReport'
+ self.payload+='&cd5='+str(self.uid)
+ self.payload+='&cd1='+str(self.test_name)
+ self.payload+='&cd2='+str(self.setup_name)
+ self.payload+='&cd3='+str(self.commitID)
+ self.payload+='&cm1='+str(self.bandwidthPerCore)
+ self.payload+='&cm2='+str(self.goldenBPC)
+ return self.payload
#.....................................................................class ga_Thread.................................................................
"""
@@ -85,7 +115,6 @@ Google analytics thread manager:
will report and empty queue of google analytics items to GA server, every Timeout (parameter given on initialization)
will perform connectivity check every timeout*10 seconds
-
"""
class ga_Thread (threading.Thread):
@@ -93,7 +122,6 @@ class ga_Thread (threading.Thread):
threading.Thread.__init__(self)
self.threadID = threadID
self.gManager = gManager
-
def run(self):
keepAliveCounter=0
#sys.stdout.write('thread started \n')
@@ -116,9 +144,6 @@ class ga_Thread (threading.Thread):
self.gManager.threadLock.release()
# sys.stdout.write('finished \n')
# sys.stdout.flush()
-
-
-
#.....................................................................class GAmanager.................................................................
"""
@@ -145,6 +170,7 @@ BlockingMode - set to 1 if you wish every Google Analytic Object will be submitt
*** Restriction - Google's restriction for amount of packages being sent per session per second is: 1 event per second, per session. session length is 30min ***
"""
+
class GAmanager:
def __init__(self,GoogleID,UserID,appName,appVer,QueueSize,Timeout,UserPermission,BlockingMode):
self.UserID = UserID
@@ -173,13 +199,13 @@ class GAmanager:
self.gaAddObject(GA_EXCEPTION_ObjClass(self.UserID,self.GoogleID,ExceptionName,ExceptionFatal,self.appName,self.appVer))
def gaAddObject(self,Object):
- if self.BlockingMode==1:
- while self.GA_q.full():
+ if (self.BlockingMode==1):
+ while (self.GA_q.full()):
sleep(self.Timeout)
# sys.stdout.write('blocking mode=1 \n queue full - sleeping for timeout \n') # within Timout, the thread will empty part of the queue
# sys.stdout.flush()
lockState = self.threadLock.acquire(self.BlockingMode)
- if lockState==1:
+ if (lockState==1):
# sys.stdout.write('got lock, adding item \n')
# sys.stdout.flush()
try:
@@ -198,21 +224,23 @@ class GAmanager:
obj_list.append(self.GA_q.get_nowait().payload)
items+=1
# print items
+ return obj_list
def reportBatched(self,batched):
req = Request(url_batched, data=batched.encode('ascii'))
urlopen(req)
- #requests.post(url_batched,data=batched)
-
+# pprint(r.json())
+
def emptyAndReportQ(self):
obj_list = []
- self.emptyQueueToList(obj_list)
- if not len(obj_list):
+ obj_list = self.emptyQueueToList(obj_list)
+ if (len(obj_list)==0):
return
batched = '\n'.join(obj_list)
+# print sys.getsizeof(batched)
# print batched # - for debug
self.reportBatched(batched)
-
+
def printSelf(self):
print('remaining in queue:')
while not self.GA_q.empty():
@@ -231,24 +259,65 @@ class GAmanager:
self.thread.start()
+#.....................................................................class GAmanager_Regression.................................................................
+"""
+ *-*-*-*-Google Analytics Regression Manager-*-*-*-*
+ attributes:
+GoogleID - the tracker ID that Google uses in order to track the activity of a property. for regression use: 'UA-75220362-4'
+AnalyticsUserID - text value - used by Google to differ between 2 users sending data. (will not be presented on reports). use only as a way to differ between different users
+UUID - text - will be presented on analysis. put here UUID
+TRexMode - text - will be presented on analysis. put here TRexMode
+appName - text - will be presented on analysis. put here appName as string describing app name
+appVer - text - will be presented on analysis. put here the appVer
+QueueSize - integer - determines the queue size. the queue will hold pending request before submission. RECOMMENDED VALUE: 20
+Timeout - integer (seconds) - the timeout in seconds between automated reports when activating reporting thread
+UserPermission - boolean (1/0) - required in order to send packets, should be 1.
+BlockingMode - boolean (1/0) - required when each tracked event is critical and program should halt until the event is reported
+SetupName - text - will be presented on analysis. put here setup name as string.
+CommitID - text - will be presented on analysis. put here CommitID
+"""
+class GAmanager_Regression(GAmanager):
+ def __init__(self,GoogleID,AnalyticsUserID,UUID,TRexMode,appName,appVer,
+ QueueSize,Timeout,UserPermission,BlockingMode,SetupName,CommitID):
+ GAmanager.__init__(self,GoogleID,AnalyticsUserID,appName,appVer,
+ QueueSize,Timeout,UserPermission,BlockingMode)
+ self.UUID = UUID
+ self.TRexMode = TRexMode
+ self.SetupName = SetupName
+ self.CommitID = CommitID
+
+ def gaAddTestQuery(self,TestName,BandwidthPerCore,GoldenBPC):
+ self.gaAddObject(GA_TESTING_ObjClass(self.UserID,self.UUID,self.GoogleID,
+ self.TRexMode,TestName,self.SetupName,
+ self.appName,self.appVer,self.CommitID,
+ BandwidthPerCore,GoldenBPC))
+
+
+
#***************************************------TEST--------------**************************************
-if __name__ == '__main__':
- g = GAmanager(GoogleID='UA-75220362-4',UserID="Foo",QueueSize=100,Timeout=5,UserPermission=1,BlockingMode=1,appName='TRex',appVer='1.11.232') #timeout in seconds
-#for i in range(0,35,1):
-#i = 42
- g.gaAddAction(Event='stl',action='stl/udp_1pkt_simple.py {packet_count:1000,packet_len:9000}',label='Boo',value=20)
- #g.gaAddAction(Event='test',action='start',label='Boo1',value=20)
+#if __name__ == '__main__':
+
+#g= GAmanager_Regression(GoogleID='UA-75220362-4',AnalyticsUserID=3845,UUID='trex18UUID_GA_TEST',TRexMode='stateFull_GA_TEST',
+# appName='TRex_GA_TEST',appVer='1.1_GA_TEST',QueueSize=20,Timeout=11,UserPermission=1,BlockingMode=0,SetupName='setup1_GA_TEST',CommitID='commitID1_GA_TEST')
+#for j in range(1,3,1):
+#for i in range(100,118,1):
+# g.gaAddTestQuery('test_name_GA_TEST',i+0.5,150)
+# sleep(11)
+# print "finished batch"
+#g.emptyAndReportQ()
+
+#g.printSelf()
+#g.emptyAndReportQ()
+
+#g = GAmanager(GoogleID='UA-75220362-4',UserID=1,QueueSize=100,Timeout=5,UserPermission=1,BlockingMode=0,appName='TRex',appVer='1.11.232') #timeout in seconds
+#for i in range(0,35,1):
+# g.gaAddAction(Event='test',action='start',label='1',value=i)
#g.gaAddException('MEMFAULT',1)
-#g.gaAddException('MEMFAULT',1)
-#g.gaAddException('MEMFAULT',1)
-#g.gaAddException('MEMFAULT',1)
-#g.gaAddException('MEMFAULT',1)
-#g.gaAddException('MEMFAULT',1)
- g.emptyAndReportQ()
-# g.printSelf()
+#g.emptyAndReportQ()
+#g.printSelf()
#print g.payload
#print g.size
@@ -278,12 +347,6 @@ if __name__ == '__main__':
# sys.stdout.flush()
-# add timing mechanism - DONE
-# add exception mechanism - DONE
-# add version mechanism - DONE
-# ask Itay for unique ID generation per user
-
-