summaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
authoritraviv <itraviv@cisco.com>2016-08-18 15:24:21 +0300
committeritraviv <itraviv@cisco.com>2016-08-18 16:26:09 +0300
commitb64ee3961384a4b0ddb9613a5940c58a517de30d (patch)
tree3fd60c06bbe8c8067f94063e0849f465919a21fd /scripts
parenta08d3b9ba1c5010827029bab030ef61d73368fa3 (diff)
parent6796bb99573f15c77a007434feabb30291ac1670 (diff)
Merge branch 'scapy_server'
Diffstat (limited to 'scripts')
-rwxr-xr-xscripts/automation/regression/functional_tests/scapy_server_test.py221
-rwxr-xr-xscripts/automation/trex_control_plane/examples/zmq_server_client.py45
-rwxr-xr-xscripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py334
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_client.py44
-rwxr-xr-xscripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_server.py152
-rwxr-xr-xscripts/automation/trex_control_plane/stl/services/scapy_server/zmq_for_scapy_server_test.py14
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py179
7 files changed, 931 insertions, 58 deletions
diff --git a/scripts/automation/regression/functional_tests/scapy_server_test.py b/scripts/automation/regression/functional_tests/scapy_server_test.py
new file mode 100755
index 00000000..ea3ec0da
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/scapy_server_test.py
@@ -0,0 +1,221 @@
+# scapy server unit test
+
+import sys,os
+from nose.plugins.skip import SkipTest
+if sys.version_info.major == 3:
+ raise SkipTest("Python3 currently not supported")
+scapy_server_path = os.path.abspath(os.path.join(os.pardir, 'trex_control_plane', 'stl', 'services','scapy_server'))
+stl_pathname = os.path.abspath(os.path.join(os.pardir, os.pardir, 'trex_control_plane','stl'))
+sys.path.append(scapy_server_path)
+sys.path.append(stl_pathname)
+
+
+import trex_stl_lib
+from trex_stl_lib.api import *
+from copy import deepcopy
+
+import tempfile
+import hashlib
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+from nose.tools import nottest
+from nose.plugins.attrib import attr
+import binascii
+from scapy_service import *
+from pprint import pprint
+import zmq
+import json
+import scapy_zmq_server
+import threading
+from scapy_zmq_client import Scapy_server_wrapper
+
+class scapy_service_tester(functional_general_test.CGeneralFunctional_Test):
+ def setUp(self):
+ self.s = Scapy_service()
+
+ def tearDown(self):
+ pass
+
+ '''
+ test for db and field update - checking check_update_test()
+ '''
+ def test_check_update(self):
+ allData = self.s.get_all()
+ allDataParsed = allData
+ dbMD5 = allDataParsed['db_md5']
+ fieldMD5 = allDataParsed['fields_md5']
+ resT1 = self.s.check_update(dbMD5,fieldMD5)
+ assert_equal(resT1,True)
+ try:
+ resT2 = False
+ resT2 = self.s.check_update('falseMD5','falseMD5')
+ except Exception as e:
+ if e.message == "Fields DB is not up to date":
+ resT2 = True
+ else:
+ raise
+ assert_equal(resT2,True)
+ try:
+ resT3 = False
+ resT3 = self.s.check_update(dbMD5,'falseMD5')
+ except Exception as e:
+ if e.message == "Fields DB is not up to date":
+ resT3 = True
+ else:
+ raise
+ assert_equal(resT3,True)
+ try:
+ resT4 = False
+ resT4 = self.s.check_update('falseMD5',fieldMD5)
+ except Exception as e:
+ if e.message == "Protocol DB is not up to date":
+ resT4 = True
+ else:
+ raise
+ assert_equal(resT4,True)
+
+
+ def test_check_updating_db(self):
+ #assume i got old db
+ try:
+ result = self.s.check_update('falseMD5','falseMD5')
+ except:
+ newAllData = self.s.get_all()
+ dbMD5 = newAllData['db_md5']
+ fieldMD5 = newAllData['fields_md5']
+ result = self.s.check_update(dbMD5,fieldMD5)
+ assert_equal(result,True)
+ else:
+ raise Exception("scapy_server_test: check_updating_db failed")
+
+
+ def _build_packet_test_method(self,original_pkt):
+ test_pkt = original_pkt
+ original_pkt = eval(original_pkt)
+ test_res = self.s.build_pkt(test_pkt)
+ test_pkt_buffer = test_res['buffer']
+ resT1 = (test_pkt_buffer == binascii.b2a_base64(str(original_pkt)))
+ assert_equal(resT1,True)
+
+
+#testing offsets of a packet
+ def _get_all_offsets_test_method(self,original_pkt):
+ test_pkt = original_pkt
+ original_pkt = eval(original_pkt)
+ original_pkt.build()
+ tested_offsets_by_layers = self.s._get_all_pkt_offsets(test_pkt)
+ layers = (test_pkt).split('/')
+ offsets_by_layers = {}
+ for layer in layers:
+ fields_dict = {}
+ layer_name = layer.partition('(')[0] #clear layer name to include only alpha-numeric
+ layer_name = re.sub(r'\W+', '',layer_name)
+ for f in original_pkt.fields_desc:
+ size = f.get_size_bytes()
+ name = f.name
+ if f.name is 'load':
+ size = len(original_pkt)
+ layer_name = 'Raw'
+ fields_dict[f.name]= [f.offset, size]
+ fields_dict['global_offset'] = original_pkt.offset
+ original_pkt = original_pkt.payload
+ offsets_by_layers[layer_name] = fields_dict
+ resT1 = (tested_offsets_by_layers == offsets_by_layers)
+ assert_equal(resT1,True)
+
+ def _offsets_and_buffer_test_method(self,mac_src,mac_dst,ip_src,ip_dst):
+ pkt = Ether(src=mac_src,dst=mac_dst)/IP(src=ip_src,dst=ip_dst)/TCP()
+ pkt_descriptor = "Ether(src='"+mac_src+"',dst='"+mac_dst+"')/IP(src='"+ip_src+"',dst='"+ip_dst+"')/TCP()"
+ pkt_offsets = self.s._get_all_pkt_offsets(pkt_descriptor)
+ pkt_buffer = str(pkt)
+ #--------------------------Dest-MAC--------------------
+ mac_start_index = pkt_offsets['Ether']['dst'][0]+pkt_offsets['Ether']['global_offset']
+ mac_end_index = mac_start_index+pkt_offsets['Ether']['dst'][1]
+ assert_equal(binascii.b2a_hex(pkt_buffer[mac_start_index:mac_end_index]),mac_dst.translate(None,':'))
+ #--------------------------Src-MAC---------------------
+ mac_start_index = pkt_offsets['Ether']['src'][0]+pkt_offsets['Ether']['global_offset']
+ mac_end_index = mac_start_index+pkt_offsets['Ether']['src'][1]
+ assert_equal(binascii.b2a_hex(pkt_buffer[mac_start_index:mac_end_index]),mac_src.translate(None,':'))
+ #--------------------------Dest-IP---------------------
+ ip_start_index = pkt_offsets['IP']['dst'][0]+pkt_offsets['IP']['global_offset']
+ ip_end_index= ip_start_index+pkt_offsets['IP']['dst'][1]
+ assert_equal(binascii.b2a_hex(pkt_buffer[ip_start_index:ip_end_index]),binascii.hexlify(socket.inet_aton(ip_dst)))
+ #--------------------------Src-IP----------------------
+ ip_start_index = pkt_offsets['IP']['src'][0]+pkt_offsets['IP']['global_offset']
+ ip_end_index= ip_start_index+pkt_offsets['IP']['src'][1]
+ assert_equal(binascii.b2a_hex(pkt_buffer[ip_start_index:ip_end_index]),binascii.hexlify(socket.inet_aton(ip_src)))
+
+ def test_multi_packet(self):
+ packets= [
+ 'Ether()',
+ 'Ether()/IP()',
+ 'TCP()',
+ 'UDP()',
+ 'Ether()/IP()/TCP()/"test"',
+ 'Ether()/IP()/UDP()',
+ 'Ether()/IP(src="16.0.0.1",dst="48.0.0.1")',
+ 'Ether()/IP(src="16.0.0.1",dst="48.0.0.1")',
+ 'Ether()/Dot1Q(vlan=12)/Dot1Q(vlan=12)/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)',
+ 'Ether()/Dot1Q(vlan=12)/IP(src="16.0.0.1",dst="48.0.0.1")/TCP(dport=12,sport=1025)',
+ 'Ether()/Dot1Q(vlan=12)/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)',
+ 'Ether()/Dot1Q(vlan=12)/IPv6(src="::5")/TCP(dport=12,sport=1025)',
+ 'Ether()/IP()/UDP()/IPv6(src="::5")/TCP(dport=12,sport=1025)',
+ 'Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)',
+ 'Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S")',
+ 'Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)',
+ 'Ether() / IP(src = "16.0.0.1", dst = "48.0.0.1") / UDP(dport = 12, sport = 1025)',
+ 'Ether()/IP()/UDP()',
+ 'Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)',
+ 'Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)',
+ 'Ether()/IP(src="16.0.0.2",dst="48.0.0.1")/UDP(dport=12,sport=1025)',
+ 'Ether()/IP(src="16.0.0.3",dst="48.0.0.1")/UDP(dport=12,sport=1025)',
+ r'Ether()/IP()/IPv6()/IP(dst="48.0.0.1",options=IPOption("\x01\x01\x01\x00"))/UDP(dport=12,sport=1025)',
+ r'Ether()/IP(dst="48.0.0.1",options=IPOption("\x01\x01\x01\x00"))/UDP(dport=12,sport=1025)',
+ 'Ether()',
+ 'Ether()/IP()/UDP(sport=1337,dport=4789)/VXLAN(vni=42)/Ether()/IP()/("x"*20)',
+ 'Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=3797,sport=3544)/IPv6(dst="2001:0:4137:9350:8000:f12a:b9c8:2815",src="2001:4860:0:2001::68")/UDP(dport=12,sport=1025)/ICMPv6Unknown()',
+ 'Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(sport=1025)/DNS()',
+ 'Ether()/MPLS(label=17,cos=1,s=0,ttl=255)/MPLS(label=0,cos=1,s=1,ttl=12)/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/("x"*20)',
+ 'Ether()/MPLS(label=17,cos=1,s=0,ttl=255)/MPLS(label=12,cos=1,s=1,ttl=12)/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/("x"*20)',
+ 'Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/ICMP(type=3)',
+ 'Ether()/IP()/GRE()/("x"*2)']
+
+ for packet in packets:
+ self._get_all_offsets_test_method(packet)
+
+ for packet in packets:
+ self._build_packet_test_method(packet)
+
+
+
+ def test_offsets_and_buffer(self):
+ self._offsets_and_buffer_test_method('ab:cd:ef:12:34:56','98:76:54:32:1a:bc','127.1.1.1','192.168.1.1')
+ self._offsets_and_buffer_test_method('bb:bb:bb:bb:bb:bb','aa:aa:aa:aa:aa:aa','1.1.1.1','0.0.0.0')
+
+class scapy_server_thread(threading.Thread):
+ def __init__(self,thread_id,server_port=5555):
+ threading.Thread.__init__(self)
+ self.thread_id = thread_id
+ self.server_port = server_port
+
+ def run(self):
+ print('\nStarted scapy thread server')
+ scapy_zmq_server.main(self.server_port)
+ print('Thread server closed')
+
+# Scapy_server_wrapper is the CLIENT for the scapy server, it wraps the CLIENT: its default port is set to 5555, default server ip set to localhost
+class scapy_server_tester(scapy_service_tester):
+ def setUp(self):
+ self.thread1 = scapy_server_thread(thread_id=1)
+ self.thread1.start()
+ self.s = Scapy_server_wrapper(dest_scapy_port=5555,server_ip_address='localhost')
+
+ def tearDown(self):
+ self.s.call_method('shut_down',[])
+ self.thread1.join()
+
+
+
+
diff --git a/scripts/automation/trex_control_plane/examples/zmq_server_client.py b/scripts/automation/trex_control_plane/examples/zmq_server_client.py
new file mode 100755
index 00000000..15f37f1a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/examples/zmq_server_client.py
@@ -0,0 +1,45 @@
+import sys
+import os
+python2_zmq_path = os.path.abspath(os.path.join(os.pardir,os.pardir,os.pardir,
+ 'external_libs','pyzmq-14.5.0','python2','fedora18','64bit'))
+sys.path.append(python2_zmq_path)
+import zmq
+import json
+from argparse import *
+
+parser = ArgumentParser(description=' Runs a Scapy Server Client example ')
+parser.add_argument('-p','--dest-scapy-port',type=int, default = 4507, dest='dest_scapy_port',
+ help='Select port to which this Scapy Server client will send to.\n default is 4507\n',action='store')
+parser.add_argument('-s','--server',type=str, default = 'localhost', dest='dest_scapy_ip',
+ help='Remote server IP address .\n default is localhost\n',action='store')
+
+args = parser.parse_args()
+
+dest_scapy_port = args.dest_scapy_port
+dest_scapy_ip = args.dest_scapy_ip
+
+context = zmq.Context()
+
+# Socket to talk to server
+print 'Connecting:'
+socket = context.socket(zmq.REQ)
+socket.connect("tcp://"+str(dest_scapy_ip)+":"+str(dest_scapy_port))
+try:
+ while True:
+ command = raw_input("enter RPC command [enter quit to exit]:\n")
+ if (command == 'quit'):
+ break
+ user_parameter = raw_input("input for command [should be left blank if not needed]:\n")
+ json_rpc_req = { "jsonrpc":"2.0","method": command ,"params":[user_parameter], "id":"1"}
+ request = json.dumps(json_rpc_req)
+ print("Sending request in json format %s" % request)
+ socket.send(request)
+
+ # Get the reply.
+ message = socket.recv()
+ print("Received reply %s [ %s ]" % (request, message))
+except KeyboardInterrupt:
+ print('Terminated By Ctrl+C')
+ socket.close()
+ context.destroy()
+
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py
new file mode 100755
index 00000000..311dc8bd
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py
@@ -0,0 +1,334 @@
+
+import os
+import sys
+stl_pathname = os.path.abspath(os.path.join(os.pardir, os.pardir))
+additional_stl_udp_pkts = os.path.abspath(os.path.join(os.pardir, os.pardir,'stl'))
+sys.path.append(stl_pathname)
+sys.path.append(additional_stl_udp_pkts)
+import trex_stl_lib
+from trex_stl_lib.api import *
+from copy import deepcopy
+import sys
+import tempfile
+import hashlib
+import binascii
+from pprint import pprint
+from scapy.layers.dns import *
+from udp_1pkt_vxlan import VXLAN
+from udp_1pkt_mpls import MPLS
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from io import StringIO
+
+
+
+
+class Scapy_service_api():
+ """ get_all(self)
+
+ Sends all the protocols and fields that Scapy Service supports.
+ also sends the md5 of the Protocol DB and Fields DB used to check if the DB's are up to date
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ Dictionary (of protocol DB and scapy fields DB)
+
+ Raises
+ ------
+ Raises an exception when a DB error occurs (i.e a layer is not loaded properly and has missing components)
+ """
+ def get_all(self):
+ pass
+
+ """ check_update(self,db_md5,field_md5)
+ Checks if the Scapy Service running on the server has a newer version of the databases that the client has
+
+ Parameters
+ ----------
+ db_md5 - The md5 that was delivered with the protocol database that the client owns, when first received at the client
+ field_md5 - The md5 that was delivered with the fields database that the client owns, when first received at the client
+
+ Returns
+ -------
+ True/False according the Databases version(determined by their md5)
+
+ Raises
+ ------
+ Raises an exception (ScapyException) when protocol DB/Fields DB is not up to date
+
+ """
+
+ def check_update(self,db_md5,field_md5):
+ pass
+
+ """ build_pkt(self,pkt_descriptor) -> Dictionary (of Offsets,Show2 and Buffer)
+
+ Performs calculations on the given packet and returns results for that packet.
+
+ Parameters
+ ----------
+ pkt_descriptor - A string describing a network packet, in Scapy Format
+
+ Returns
+ -------
+ - The packets offsets: each field in every layer is mapped inside the Offsets Dictionary
+ - The Show2: A description of each field and its value in every layer of the packet
+ - The Buffer: The Hexdump of packet encoded in base64
+
+ Raises
+ ------
+ will raise an exception when the Scapy string format is illegal, contains syntax error, contains non-supported
+ protocl, etc.
+ """
+ def build_pkt(self,pkt_descriptor):
+ pass
+
+ """ get_tree(self) -> Dictionary describing an example of hierarchy in layers
+
+ Scapy service holds a tree of layers that can be stacked to a recommended packet
+ according to the hierarchy
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ Returns an example hierarchy tree of layers that can be stacked to a packet
+
+ Raises
+ ------
+ None
+ """
+ def get_tree(self):
+ pass
+
+
+
+class ScapyException(Exception): pass
+class Scapy_service(Scapy_service_api):
+
+#----------------------------------------------------------------------------------------------------
+ class scapyRegex:
+ def __init__(self,FieldName,regex='empty'):
+ self.FieldName = FieldName
+ self.regex = regex
+
+ def stringRegex(self):
+ return self.regex
+#----------------------------------------------------------------------------------------------------
+ def __init__(self):
+ self.Raw = {'Raw':''}
+ self.high_level_protocols = ['Raw']
+ self.transport_protocols = {'TCP':self.Raw,'UDP':self.Raw}
+ self.network_protocols = {'IP':self.transport_protocols ,'ARP':''}
+ self.low_level_protocols = { 'Ether': self.network_protocols }
+ self.regexDB= {'MACField' : self.scapyRegex('MACField','^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$'),
+ 'IPField' : self.scapyRegex('IPField','^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$')}
+ self.all_protocols = self._build_lib()
+ self.protocol_tree = {'ALL':{'Ether':{'ARP':{},'IP':{'TCP':{'RAW':'payload'},'UDP':{'RAW':'payload'}}}}}
+
+
+ def _all_protocol_structs(self):
+ old_stdout = sys.stdout
+ sys.stdout = mystdout = StringIO()
+ ls()
+ sys.stdout = old_stdout
+ all_protocol_data= mystdout.getvalue()
+ return all_protocol_data
+
+ def _protocol_struct(self,protocol):
+ if '_' in protocol:
+ return []
+ if not protocol=='':
+ if protocol not in self.all_protocols:
+ return 'protocol not supported'
+ protocol = eval(protocol)
+ old_stdout = sys.stdout
+ sys.stdout = mystdout = StringIO()
+ ls(protocol)
+ sys.stdout = old_stdout
+ protocol_data= mystdout.getvalue()
+ return protocol_data
+
+ def _build_lib(self):
+ lib = self._all_protocol_structs()
+ lib = lib.splitlines()
+ all_protocols=[]
+ for entry in lib:
+ entry = entry.split(':')
+ all_protocols.append(entry[0].strip())
+ del all_protocols[len(all_protocols)-1]
+ return all_protocols
+
+ def _parse_description_line(self,line):
+ line_arr = [x.strip() for x in re.split(': | = ',line)]
+ return tuple(line_arr)
+
+ def _parse_entire_description(self,description):
+ description = description.split('\n')
+ description_list = [self._parse_description_line(x) for x in description]
+ del description_list[len(description_list)-1]
+ return description_list
+
+ def _get_protocol_details(self,p_name):
+ protocol_str = self._protocol_struct(p_name)
+ if protocol_str=='protocol not supported':
+ return 'protocol not supported'
+ if len(protocol_str) is 0:
+ return []
+ tupled_protocol = self._parse_entire_description(protocol_str)
+ return tupled_protocol
+
+ def _print_tree(self):
+ pprint(self.protocol_tree)
+
+ def _get_all_db(self):
+ db = {}
+ for pro in self.all_protocols:
+ details = self._get_protocol_details(pro)
+ db[pro] = details
+ return db
+
+ def _get_all_fields(self):
+ fields = []
+ for pro in self.all_protocols:
+ details = self._get_protocol_details(pro)
+ for i in range(0,len(details),1):
+ if len(details[i]) == 3:
+ fields.append(details[i][1])
+ uniqueFields = list(set(fields))
+ fieldDict = {}
+ for f in uniqueFields:
+ if f in self.regexDB:
+ fieldDict[f] = self.regexDB[f].stringRegex()
+ else:
+ fieldDict[f] = self.scapyRegex(f).stringRegex()
+ return fieldDict
+
+ def _show2_to_dict(self,pkt):
+ old_stdout = sys.stdout
+ sys.stdout = mystdout = StringIO()
+ pkt.show2()
+ sys.stdout = old_stdout
+ show2data = mystdout.getvalue() #show2 data
+ listedShow2Data = show2data.split('###')
+ show2Dict = {}
+ for i in range(1,len(listedShow2Data)-1,2):
+ protocol_fields = listedShow2Data[i+1]
+ protocol_fields = protocol_fields.split('\n')[1:-1]
+ protocol_fields = [f.strip() for f in protocol_fields]
+ protocol_fields_dict = {}
+ for f in protocol_fields:
+ field_data = f.split('=')
+ if len(field_data)!= 1 :
+ field_name = field_data[0].strip()
+ protocol_fields_dict[field_name] = field_data[1].strip()
+ layer_name = re.sub(r'\W+', '',listedShow2Data[i]) #clear layer name to include only alpha-numeric
+ show2Dict[layer_name] = protocol_fields_dict
+ return show2Dict
+
+#pkt_desc as string
+#dictionary of offsets per protocol. tuple for each field: (name, offset, size) at json format
+ def _get_all_pkt_offsets(self,pkt_desc):
+ pkt_protocols = pkt_desc.split('/')
+ scapy_pkt = eval(pkt_desc)
+ scapy_pkt.build()
+ total_protocols = len(pkt_protocols)
+ res = {}
+ for i in range(total_protocols):
+ fields = {}
+ for field in scapy_pkt.fields_desc:
+ size = field.get_size_bytes()
+ layer_name = pkt_protocols[i].partition('(')[0] #clear layer name to include only alpha-numeric
+ layer_name = re.sub(r'\W+', '',layer_name)
+ if field.name is 'load':
+ layer_name ='Raw'
+ size = len(scapy_pkt)
+ fields[field.name]=[field.offset, size]
+ fields['global_offset'] = scapy_pkt.offset
+ res[layer_name] = fields
+ scapy_pkt=scapy_pkt.payload
+ return res
+
+#input: container
+#output: md5 encoded in base64
+ def _get_md5(self,container):
+ container = json.dumps(container)
+ m = hashlib.md5()
+ m.update(container.encode('ascii'))
+ res_md5 = binascii.b2a_base64(m.digest())
+ return res_md5
+
+ def get_version(self):
+ return {'built_by':'itraviv','version':'v1.01'}
+
+ def supported_methods(self,method_name=''):
+ if method_name=='':
+ methods = {}
+ for f in dir(Scapy_service):
+ if inspect.ismethod(eval('Scapy_service.'+f)):
+ param_list = inspect.getargspec(eval('Scapy_service.'+f))[0]
+ del param_list[0] #deleting the parameter "self" that appears in every method
+ #because the server automatically operates on an instance,
+ #and this can cause confusion
+ methods[f] = (len(param_list), param_list)
+ return methods
+ if method_name in dir(Scapy_service):
+ return True
+ return False
+
+#--------------------------------------------API implementation-------------
+ def get_tree(self):
+ return self.protocol_tree
+
+# pkt_descriptor in string format
+ def build_pkt(self,pkt_descriptor):
+ pkt = eval(pkt_descriptor)
+ show2data = self._show2_to_dict(pkt)
+ bufferData = str(pkt) #pkt buffer
+ bufferData = binascii.b2a_base64(bufferData)
+ pkt_offsets = self._get_all_pkt_offsets(pkt_descriptor)
+ res = {}
+ res['show2'] = show2data
+ res['buffer'] = bufferData
+ res['offsets'] = pkt_offsets
+ return res
+
+ def get_all(self):
+ fields=self._get_all_fields()
+ db=self._get_all_db()
+ fields_md5 = self._get_md5(fields)
+ db_md5 = self._get_md5(db)
+ res = {}
+ res['db'] = db
+ res['fields'] = fields
+ res['db_md5'] = db_md5
+ res['fields_md5'] = fields_md5
+ return res
+
+#input in string encoded base64
+ def check_update(self,db_md5,field_md5):
+ fields=self._get_all_fields()
+ db=self._get_all_db()
+ current_db_md5 = self._get_md5(db)
+ current_field_md5 = self._get_md5(fields)
+ res = []
+ if (field_md5.decode("base64") == current_field_md5.decode("base64")):
+ if (db_md5.decode("base64") == current_db_md5.decode("base64")):
+ return True
+ else:
+ raise ScapyException("Protocol DB is not up to date")
+ else:
+ raise ScapyException("Fields DB is not up to date")
+
+
+#---------------------------------------------------------------------------
+
+
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_client.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_client.py
new file mode 100644
index 00000000..24e1593e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_client.py
@@ -0,0 +1,44 @@
+
+
+import zmq
+import json
+
+
+class Scapy_server_wrapper():
+ def __init__(self,dest_scapy_port=5555,server_ip_address='localhost'):
+ self.context = zmq.Context()
+ self.socket = self.context.socket(zmq.REQ)
+ self.dest_scapy_port =dest_scapy_port
+ self.socket.connect("tcp://"+str(server_ip_address)+":"+str(self.dest_scapy_port)) #ip address of csi-trex-11
+
+ def call_method(self,method_name,method_params):
+ json_rpc_req = { "jsonrpc":"2.0","method": method_name ,"params": method_params, "id":"1"}
+ request = json.dumps(json_rpc_req)
+ self.socket.send_string(request)
+ # Get the reply.
+ message = self.socket.recv_string()
+ message_parsed = json.loads(message)
+ if 'result' in message_parsed.keys():
+ result = message_parsed['result']
+ else:
+ result = {'error':message_parsed['error']}
+ return result
+
+ def get_all(self):
+ return self.call_method('get_all',[])
+
+ def check_update(self,db_md5,field_md5):
+ result = self.call_method('check_update',[db_md5,field_md5])
+ if result!=True:
+ if 'error' in result.keys():
+ if "Fields DB is not up to date" in result['error']['message:']:
+ raise Exception("Fields DB is not up to date")
+ if "Protocol DB is not up to date" in result['error']['message:']:
+ raise Exception("Protocol DB is not up to date")
+ return result
+
+ def build_pkt(self,pkt_descriptor):
+ return self.call_method('build_pkt',[pkt_descriptor])
+
+ def _get_all_pkt_offsets(self,pkt_desc):
+ return self.call_method('_get_all_pkt_offsets',[pkt_desc])
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_server.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_server.py
new file mode 100755
index 00000000..0b88668a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_server.py
@@ -0,0 +1,152 @@
+
+import time
+import sys
+import os
+
+python2_zmq_path = os.path.abspath(os.path.join(os.pardir,os.pardir,os.pardir,os.pardir,
+ os.pardir,'external_libs','pyzmq-14.5.0','python2','fedora18','64bit'))
+stl_pathname = os.path.abspath(os.path.join(os.pardir, os.pardir))
+sys.path.append(stl_pathname)
+sys.path.append(python2_zmq_path)
+import zmq
+import inspect
+from scapy_service import *
+from argparse import *
+import socket
+
+
+class ParseException(Exception): pass
+class InvalidRequest(Exception): pass
+class MethodNotFound(Exception): pass
+class InvalidParams(Exception): pass
+
+class Scapy_wrapper:
+ def __init__(self):
+ self.scapy_master = Scapy_service()
+
+ def parse_req_msg(self,JSON_req):
+ try:
+ req = json.loads(JSON_req)
+ req_id=b'null'
+ if (type(req)!= type({})):
+ raise ParseException(req_id)
+ json_rpc_keys = ['jsonrpc','id','method']
+ if ((set(req.keys())!=set(json_rpc_keys)) and (set(req.keys())!=set(json_rpc_keys+['params']))) :
+ if 'id' in req.keys():
+ req_id = req['id']
+ raise InvalidRequest(req_id)
+ req_id = req['id']
+ if (req['method']=='shut_down'):
+ return 'shut_down',[],req_id
+ if not (self.scapy_master.supported_methods(req['method'])):
+ raise MethodNotFound(req_id)
+ scapy_method = eval("self.scapy_master."+req['method'])
+ arg_num_for_method = len(inspect.getargspec(scapy_method)[0])
+ if (arg_num_for_method>1) :
+ if not ('params' in req.keys()):
+ raise InvalidRequest(req_id)
+ params_len = len(req['params'])+1 # +1 because "self" is considered parameter in args for method
+ if not (params_len==arg_num_for_method):
+ raise InvalidParams(req_id)
+ return req['method'],req['params'],req_id
+ else:
+ return req['method'],[],req_id
+ except ValueError:
+ raise ParseException(req_id)
+
+ def create_error_response(self,error_code,error_msg,req_id='null'):
+ return {"jsonrpc": "2.0", "error": {"code": error_code, "message:": error_msg}, "id": req_id}
+
+ def create_success_response(self,result,req_id=b'null'):
+ return {"jsonrpc": "2.0", "result": result, "id": req_id }
+
+ def get_exception(self):
+ return sys.exc_info()
+
+
+ def execute(self,method,params):
+ if len(params)>0:
+ result = eval('self.scapy_master.'+method+'(*'+str(params)+')')
+ else:
+ result = eval('self.scapy_master.'+method+'()')
+ return result
+
+
+ def error_handler(self,e,req_id):
+ try:
+ raise e
+ except ParseException as e:
+ response = self.create_error_response(-32700,'Parse error ',req_id)
+ except InvalidRequest as e:
+ response = self.create_error_response(-32600,'Invalid Request',req_id)
+ except MethodNotFound as e:
+ response = self.create_error_response(-32601,'Method not found',req_id)
+ except InvalidParams as e:
+ response = self.create_error_response(-32603,'Invalid params',req_id)
+ except SyntaxError as e:
+ response = self.create_error_response(-32097,'SyntaxError',req_id)
+ except Exception as e:
+ if hasattr(e,'message'):
+ response = self.create_error_response(-32098,'Scapy Server: '+str(e.message),req_id)
+ else:
+ response = self.create_error_response(-32096,'Scapy Server: Unknown Error',req_id)
+ finally:
+ return response
+
+class Scapy_server():
+ def __init__(self, port=4507):
+ self.scapy_wrapper = Scapy_wrapper()
+ self.port = port
+ self.context = zmq.Context()
+ self.socket = self.context.socket(zmq.REP)
+ self.socket.bind("tcp://*:"+str(port))
+ self.IP_address = socket.gethostbyname(socket.gethostname())
+
+ def activate(self):
+ print ('***Scapy Server Started***\nListening on port: %d' % self.port)
+ print ('Server IP address: %s' % self.IP_address)
+ try:
+ while True:
+ message = self.socket.recv_string()
+ try:
+ req_id = 'null'
+ method,params,req_id = self.scapy_wrapper.parse_req_msg(message)
+ if (method == 'shut_down'):
+ print ('Shut down by remote user')
+ result = 'Server shut down command received - server had shut down'
+ else:
+ result = self.scapy_wrapper.execute(method,params)
+ response = self.scapy_wrapper.create_success_response(result,req_id)
+ except Exception as e:
+ response = self.scapy_wrapper.error_handler(e,req_id)
+ finally:
+ json_response = json.dumps(response)
+ # Send reply back to client
+ self.socket.send_string(json_response)
+ if (method == 'shut_down'):
+ break
+
+ except KeyboardInterrupt:
+ print(b'Terminated By Ctrl+C')
+
+ finally:
+ self.socket.close()
+ self.context.destroy()
+
+
+
+#arg1 is port number for the server to listen to
+def main(arg1=4507):
+ s = Scapy_server(arg1)
+ s.activate()
+
+if __name__=='__main__':
+ if len(sys.argv)>1:
+ parser = ArgumentParser(description=' Runs Scapy Server ')
+ parser.add_argument('-s','--scapy-port',type=int, default = 4507, dest='scapy_port',
+ help='Select port to which Scapy Server will listen to.\n default is 4507\n',action='store')
+ args = parser.parse_args()
+ port = args.scapy_port
+ sys.exit(main(port))
+ else:
+ sys.exit(main())
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/zmq_for_scapy_server_test.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/zmq_for_scapy_server_test.py
new file mode 100755
index 00000000..8f7f7b01
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/zmq_for_scapy_server_test.py
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py
index 164aae7a..3993ad5e 100755
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py
@@ -1,4 +1,3 @@
-#import requests # need external lib for that
try: # Python2
import Queue
from urllib2 import *
@@ -9,7 +8,7 @@ except: # Python3
import threading
import sys
from time import sleep
-
+from pprint import pprint
"""
GAObjClass is a class destined to send Google Analytics Information.
@@ -26,57 +25,88 @@ A maximum of 20 hits can be specified per request.
The total size of all hit payloads cannot be greater than 16K bytes.
No single hit payload can be greater than 8K bytes.
"""
-
-url_single = 'http://www.google-analytics.com/collect' #sending single event
-url_batched = 'http://www.google-analytics.com/batch' #sending batched events
-url_debug = 'http://www.google-analytics.com/debug/collect' #verifying hit is valid
+url_single = 'https://www.google-analytics.com/collect' #sending single event
+url_batched = 'https://www.google-analytics.com/batch' #sending batched events
+url_debug = 'https://www.google-analytics.com/debug/collect' #verifying hit is valid
url_conn = 'http://172.217.2.196' # testing internet connection to this address (google-analytics server)
+#..................................................................class GA_ObjClass................................................................
+class GA_ObjClass:
+ def __init__(self,cid,trackerID,appName,appVer):
+ self.cid = cid
+ self.trackerID = trackerID
+ self.appName = appName
+ self.appVer = appVer
+ self.payload = ''
+ self.payload = GA_ObjClass.generate_payload(self)
+ self.size = sys.getsizeof(self.payload)
+
+ def generate_payload(self):
+ self.payload+='v=1&t=event&tid='+str(self.trackerID)
+ self.payload+='&cid='+str(self.cid)
+ self.payload+='&an='+str(self.appName)
+ self.payload+='&av='+str(self.appVer)
+ return self.payload
+
#..................................................................class GA_EVENT_ObjClass................................................................
-class GA_EVENT_ObjClass:
+class GA_EVENT_ObjClass(GA_ObjClass):
def __init__(self,cid,trackerID,command,action,label,value,appName,appVer):
- self.cid = cid
- self.trackerID = trackerID
+ GA_ObjClass.__init__(self,cid,trackerID,appName,appVer)
self.command = command
self.action = action
self.label = label
self.value = value
- self.appName = appName
- self.appVer = appVer
- self.generate_payload()
+ self.payload = self.generate_payload()
self.size = sys.getsizeof(self.payload)
def generate_payload(self):
- self.payload ='v=1&t=event&tid='+str(self.trackerID)
- self.payload+='&cid='+str(self.cid)
self.payload+='&ec='+str(self.command)
self.payload+='&ea='+str(self.action)
self.payload+='&el='+str(self.label)
self.payload+='&ev='+str(self.value)
- self.payload+='&an='+str(self.appName)
- self.payload+='&av='+str(self.appVer)
+ return self.payload
#..................................................................class GA_EXCEPTION_ObjClass................................................................
#ExceptionFatal - BOOLEAN
-class GA_EXCEPTION_ObjClass:
+class GA_EXCEPTION_ObjClass(GA_ObjClass):
def __init__(self,cid,trackerID,ExceptionName,ExceptionFatal,appName,appVer):
- self.cid = cid
- self.trackerID = trackerID
+ GA_ObjClass.__init__(self,cid,trackerID,appName,appVer)
self.ExceptionName = ExceptionName
self.ExceptionFatal = ExceptionFatal
- self.appName = appName
- self.appVer = appVer
- self.generate_payload()
+ self.payload = self.generate_payload()
def generate_payload(self):
- self.payload ='v=1&t=exception&tid='+str(self.trackerID)
- self.payload+='&cid='+str(self.cid)
self.payload+='&exd='+str(self.ExceptionName)
self.payload+='&exf='+str(self.ExceptionFatal)
- self.payload+='&an='+str(self.appName)
- self.payload+='&av='+str(self.appVer)
+ return self.payload
+
+
+
+#..................................................................class GA_TESTING_ObjClass................................................................
+class GA_TESTING_ObjClass(GA_ObjClass):
+ def __init__(self,cid,uuid,trackerID,TRexMode,test_name,setup_name,appName,appVer,commitID,bandwidthPerCore,goldenBPC):
+ GA_ObjClass.__init__(self,cid,trackerID,appName,appVer)
+ self.uid = uuid
+ self.TRexMode = TRexMode
+ self.test_name = test_name
+ self.setup_name = setup_name
+ self.commitID = commitID
+ self.bandwidthPerCore = bandwidthPerCore
+ self.goldenBPC = goldenBPC
+ self.payload = self.generate_payload()
+ self.size = sys.getsizeof(self.payload)
+ def generate_payload(self):
+ self.payload+='&ec='+str(self.TRexMode)
+ self.payload+='&ea=RegressionReport'
+ self.payload+='&cd5='+str(self.uid)
+ self.payload+='&cd1='+str(self.test_name)
+ self.payload+='&cd2='+str(self.setup_name)
+ self.payload+='&cd3='+str(self.commitID)
+ self.payload+='&cm1='+str(self.bandwidthPerCore)
+ self.payload+='&cm2='+str(self.goldenBPC)
+ return self.payload
#.....................................................................class ga_Thread.................................................................
"""
@@ -85,7 +115,6 @@ Google analytics thread manager:
will report and empty queue of google analytics items to GA server, every Timeout (parameter given on initialization)
will perform connectivity check every timeout*10 seconds
-
"""
class ga_Thread (threading.Thread):
@@ -93,7 +122,6 @@ class ga_Thread (threading.Thread):
threading.Thread.__init__(self)
self.threadID = threadID
self.gManager = gManager
-
def run(self):
keepAliveCounter=0
#sys.stdout.write('thread started \n')
@@ -116,9 +144,6 @@ class ga_Thread (threading.Thread):
self.gManager.threadLock.release()
# sys.stdout.write('finished \n')
# sys.stdout.flush()
-
-
-
#.....................................................................class GAmanager.................................................................
"""
@@ -145,6 +170,7 @@ BlockingMode - set to 1 if you wish every Google Analytic Object will be submitt
*** Restriction - Google's restriction for amount of packages being sent per session per second is: 1 event per second, per session. session length is 30min ***
"""
+
class GAmanager:
def __init__(self,GoogleID,UserID,appName,appVer,QueueSize,Timeout,UserPermission,BlockingMode):
self.UserID = UserID
@@ -173,13 +199,13 @@ class GAmanager:
self.gaAddObject(GA_EXCEPTION_ObjClass(self.UserID,self.GoogleID,ExceptionName,ExceptionFatal,self.appName,self.appVer))
def gaAddObject(self,Object):
- if self.BlockingMode==1:
- while self.GA_q.full():
+ if (self.BlockingMode==1):
+ while (self.GA_q.full()):
sleep(self.Timeout)
# sys.stdout.write('blocking mode=1 \n queue full - sleeping for timeout \n') # within Timout, the thread will empty part of the queue
# sys.stdout.flush()
lockState = self.threadLock.acquire(self.BlockingMode)
- if lockState==1:
+ if (lockState==1):
# sys.stdout.write('got lock, adding item \n')
# sys.stdout.flush()
try:
@@ -198,21 +224,23 @@ class GAmanager:
obj_list.append(self.GA_q.get_nowait().payload)
items+=1
# print items
+ return obj_list
def reportBatched(self,batched):
req = Request(url_batched, data=batched.encode('ascii'))
urlopen(req)
- #requests.post(url_batched,data=batched)
-
+# pprint(r.json())
+
def emptyAndReportQ(self):
obj_list = []
- self.emptyQueueToList(obj_list)
- if not len(obj_list):
+ obj_list = self.emptyQueueToList(obj_list)
+ if (len(obj_list)==0):
return
batched = '\n'.join(obj_list)
+# print sys.getsizeof(batched)
# print batched # - for debug
self.reportBatched(batched)
-
+
def printSelf(self):
print('remaining in queue:')
while not self.GA_q.empty():
@@ -231,24 +259,65 @@ class GAmanager:
self.thread.start()
+#.....................................................................class GAmanager_Regression.................................................................
+"""
+ *-*-*-*-Google Analytics Regression Manager-*-*-*-*
+ attributes:
+GoogleID - the tracker ID that Google uses in order to track the activity of a property. for regression use: 'UA-75220362-4'
+AnalyticsUserID - text value - used by Google to differ between 2 users sending data. (will not be presented on reports). use only as a way to differ between different users
+UUID - text - will be presented on analysis. put here UUID
+TRexMode - text - will be presented on analysis. put here TRexMode
+appName - text - will be presented on analysis. put here appName as string describing app name
+appVer - text - will be presented on analysis. put here the appVer
+QueueSize - integer - determines the queue size. the queue will hold pending request before submission. RECOMMENDED VALUE: 20
+Timeout - integer (seconds) - the timeout in seconds between automated reports when activating reporting thread
+UserPermission - boolean (1/0) - required in order to send packets, should be 1.
+BlockingMode - boolean (1/0) - required when each tracked event is critical and program should halt until the event is reported
+SetupName - text - will be presented on analysis. put here setup name as string.
+CommitID - text - will be presented on analysis. put here CommitID
+"""
+class GAmanager_Regression(GAmanager):
+ def __init__(self,GoogleID,AnalyticsUserID,UUID,TRexMode,appName,appVer,
+ QueueSize,Timeout,UserPermission,BlockingMode,SetupName,CommitID):
+ GAmanager.__init__(self,GoogleID,AnalyticsUserID,appName,appVer,
+ QueueSize,Timeout,UserPermission,BlockingMode)
+ self.UUID = UUID
+ self.TRexMode = TRexMode
+ self.SetupName = SetupName
+ self.CommitID = CommitID
+
+ def gaAddTestQuery(self,TestName,BandwidthPerCore,GoldenBPC):
+ self.gaAddObject(GA_TESTING_ObjClass(self.UserID,self.UUID,self.GoogleID,
+ self.TRexMode,TestName,self.SetupName,
+ self.appName,self.appVer,self.CommitID,
+ BandwidthPerCore,GoldenBPC))
+
+
+
#***************************************------TEST--------------**************************************
-if __name__ == '__main__':
- g = GAmanager(GoogleID='UA-75220362-4',UserID="Foo",QueueSize=100,Timeout=5,UserPermission=1,BlockingMode=1,appName='TRex',appVer='1.11.232') #timeout in seconds
-#for i in range(0,35,1):
-#i = 42
- g.gaAddAction(Event='stl',action='stl/udp_1pkt_simple.py {packet_count:1000,packet_len:9000}',label='Boo',value=20)
- #g.gaAddAction(Event='test',action='start',label='Boo1',value=20)
+#if __name__ == '__main__':
+
+#g= GAmanager_Regression(GoogleID='UA-75220362-4',AnalyticsUserID=3845,UUID='trex18UUID_GA_TEST',TRexMode='stateFull_GA_TEST',
+# appName='TRex_GA_TEST',appVer='1.1_GA_TEST',QueueSize=20,Timeout=11,UserPermission=1,BlockingMode=0,SetupName='setup1_GA_TEST',CommitID='commitID1_GA_TEST')
+#for j in range(1,3,1):
+#for i in range(100,118,1):
+# g.gaAddTestQuery('test_name_GA_TEST',i+0.5,150)
+# sleep(11)
+# print "finished batch"
+#g.emptyAndReportQ()
+
+#g.printSelf()
+#g.emptyAndReportQ()
+
+#g = GAmanager(GoogleID='UA-75220362-4',UserID=1,QueueSize=100,Timeout=5,UserPermission=1,BlockingMode=0,appName='TRex',appVer='1.11.232') #timeout in seconds
+#for i in range(0,35,1):
+# g.gaAddAction(Event='test',action='start',label='1',value=i)
#g.gaAddException('MEMFAULT',1)
-#g.gaAddException('MEMFAULT',1)
-#g.gaAddException('MEMFAULT',1)
-#g.gaAddException('MEMFAULT',1)
-#g.gaAddException('MEMFAULT',1)
-#g.gaAddException('MEMFAULT',1)
- g.emptyAndReportQ()
-# g.printSelf()
+#g.emptyAndReportQ()
+#g.printSelf()
#print g.payload
#print g.size
@@ -278,12 +347,6 @@ if __name__ == '__main__':
# sys.stdout.flush()
-# add timing mechanism - DONE
-# add exception mechanism - DONE
-# add version mechanism - DONE
-# ask Itay for unique ID generation per user
-
-