summaryrefslogtreecommitdiffstats
path: root/test/template_bd.py
blob: 080b2e6bcc57f1ef41ad56f6f22bf83e145cbb57 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
#!/usr/bin/env python

from abc import abstractmethod, ABCMeta

from scapy.layers.l2 import Ether, Raw
from scapy.layers.inet import IP, UDP

from util import ip4_range


class BridgeDomain(object):
    """ Bridge domain abstraction """
    __metaclass__ = ABCMeta

    @property
    def frame_request(self):
        """ Ethernet frame modeling a generic request """
        return (Ether(src='00:00:00:00:00:01', dst='00:00:00:00:00:02') /
                IP(src='1.2.3.4', dst='4.3.2.1') /
                UDP(sport=10000, dport=20000) /
                Raw('\xa5' * 100))

    @property
    def frame_reply(self):
        """ Ethernet frame modeling a generic reply """
        return (Ether(src='00:00:00:00:00:02', dst='00:00:00:00:00:01') /
                IP(src='4.3.2.1', dst='1.2.3.4') /
                UDP(sport=20000, dport=10000) /
                Raw('\xa5' * 100))

    @abstractmethod
    def encap_mcast(self, pkt, src_ip, src_mac, vni):
        """ Encapsulate mcast packet """
        pass

    @abstractmethod
    def encapsulate(self, pkt, vni):
        """ Encapsulate packet """
        pass

    @abstractmethod
    def decapsulate(self, pkt):
        """ Decapsulate packet """
        pass

    @abstractmethod
    def check_encapsulation(self, pkt, vni, local_only=False):
        """ Verify the encapsulation """
        pass

    def assert_eq_pkts(self, pkt1, pkt2):
        """ Verify the Ether, IP, UDP, payload are equal in both
        packets
        """
        self.assertEqual(pkt1[Ether].src, pkt2[Ether].src)
        self.assertEqual(pkt1[Ether].dst, pkt2[Ether].dst)
        self.assertEqual(pkt1[IP].src, pkt2[IP].src)
        self.assertEqual(pkt1[IP].dst, pkt2[IP].dst)
        self.assertEqual(pkt1[UDP].sport, pkt2[UDP].sport)
        self.assertEqual(pkt1[UDP].dport, pkt2[UDP].dport)
        self.assertEqual(pkt1[Raw], pkt2[Raw])

    def test_decap(self):
        """ Decapsulation test
        Send encapsulated frames from pg0
        Verify receipt of decapsulated frames on pg1
        """

        encapsulated_pkt = self.encapsulate(self.frame_request,
                                            self.single_tunnel_bd)

        self.pg0.add_stream([encapsulated_pkt, ])

        self.pg1.enable_capture()

        self.pg_start()

        # Pick first received frame and check if it's the non-encapsulated
        # frame
        out = self.pg1.get_capture(1)
        pkt = out[0]
        self.assert_eq_pkts(pkt, self.frame_request)

    def test_encap(self):
        """ Encapsulation test
        Send frames from pg1
        Verify receipt of encapsulated frames on pg0
        """
        self.pg1.add_stream([self.frame_reply])

        self.pg0.enable_capture()

        self.pg_start()

        # Pick first received frame and check if it's corectly encapsulated.
        out = self.pg0.get_capture(1)
        pkt = out[0]
        self.check_encapsulation(pkt, self.single_tunnel_bd)

        payload = self.decapsulate(pkt)
        self.assert_eq_pkts(payload, self.frame_reply)

    def test_ucast_flood(self):
        """ Unicast flood test
        Send frames from pg3
        Verify receipt of encapsulated frames on pg0
        """
        self.pg3.add_stream([self.frame_reply])

        self.pg0.enable_capture()

        self.pg_start()

        # Get packet from each tunnel and assert it's corectly encapsulated.
        out = self.pg0.get_capture(self.n_ucast_tunnels)
        for pkt in out:
            self.check_encapsulation(pkt, self.ucast_flood_bd, True)
            payload = self.decapsulate(pkt)
            self.assert_eq_pkts(payload, self.frame_reply)

    def test_mcast_flood(self):
        """ Multicast flood test
        Send frames from pg2
        Verify receipt of encapsulated frames on pg0
        """
        self.pg2.add_stream([self.frame_reply])

        self.pg0.enable_capture()

        self.pg_start()

        # Pick first received frame and check if it's corectly encapsulated.
        out = self.pg0.get_capture(1)
        pkt = out[0]
        self.check_encapsulation(pkt, self.mcast_flood_bd,
                                 local_only=False, mcast_pkt=True)

        payload = self.decapsulate(pkt)
        self.assert_eq_pkts(payload, self.frame_reply)

    def test_mcast_rcv(self):
        """ Multicast receive test
        Send 20 encapsulated frames from pg0 only 10 match unicast tunnels
        Verify receipt of 10 decap frames on pg2
        """
        mac = self.pg0.remote_mac
        ip_range_start = 10
        ip_range_end = 30
        mcast_stream = [
            self.encap_mcast(self.frame_request, ip, mac, self.mcast_flood_bd)
            for ip in ip4_range(self.pg0.remote_ip4,
                                ip_range_start, ip_range_end)]
        self.pg0.add_stream(mcast_stream)
        self.pg2.enable_capture()
        self.pg_start()
        out = self.pg2.get_capture(10)
        for pkt in out:
            self.assert_eq_pkts(pkt, self.frame_request)
pan class="fm">__call__(self, **kwargs): return self._func(**kwargs) class VPP(): """VPP interface. This class provides the APIs to VPP. The APIs are loaded from provided .api.json files and makes functions accordingly. These functions are documented in the VPP .api files, as they are dynamically created. Additionally, VPP can send callback messages; this class provides a means to register a callback function to receive these messages in a background thread. """ def process_json_file(self, apidef_file): api = json.load(apidef_file) types = {} for t in api['enums']: t[0] = 'vl_api_' + t[0] + '_t' types[t[0]] = {'type': 'enum', 'data': t} for t in api['unions']: t[0] = 'vl_api_' + t[0] + '_t' types[t[0]] = {'type': 'union', 'data': t} for t in api['types']: t[0] = 'vl_api_' + t[0] + '_t' types[t[0]] = {'type': 'type', 'data': t} i = 0 while True: unresolved = {} for k, v in types.items(): t = v['data'] if not vpp_get_type(t[0]): if v['type'] == 'enum': try: VPPEnumType(t[0], t[1:]) except ValueError: unresolved[k] = v elif v['type'] == 'union': try: VPPUnionType(t[0], t[1:]) except ValueError: unresolved[k] = v elif v['type'] == 'type': try: VPPType(t[0], t[1:]) except ValueError: unresolved[k] = v if len(unresolved) == 0: break if i > 3: raise ValueError('Unresolved type definitions {}' .format(unresolved)) types = unresolved i += 1 for m in api['messages']: try: self.messages[m[0]] = VPPMessage(m[0], m[1:]) except NotImplementedError: self.logger.error('Not implemented error for {}'.format(m[0])) def __init__(self, apifiles=None, testmode=False, async_thread=True, logger=None, loglevel=None, read_timeout=5, use_socket=False, server_address='/run/vpp-api.sock'): """Create a VPP API object. apifiles is a list of files containing API descriptions that will be loaded - methods will be dynamically created reflecting these APIs. If not provided this will load the API files from VPP's default install location. logger, if supplied, is the logging logger object to log to. loglevel, if supplied, is the log level this logger is set to report at (from the loglevels in the logging module). """ if logger is None: logger = logging.getLogger(__name__) if loglevel is not None: logger.setLevel(loglevel) self.logger = logger self.messages = {} self.id_names = [] self.id_msgdef = [] self.header = VPPType('header', [['u16', 'msgid'], ['u32', 'client_index']]) self.apifiles = [] self.event_callback = None self.message_queue = queue.Queue() self.read_timeout = read_timeout self.async_thread = async_thread if use_socket: from . vpp_transport_socket import VppTransport else: from . vpp_transport_shmem import VppTransport if not apifiles: # Pick up API definitions from default directory try: apifiles = self.find_api_files() except RuntimeError: # In test mode we don't care that we can't find the API files if testmode: apifiles = [] else: raise for file in apifiles: with open(file) as apidef_file: self.process_json_file(apidef_file) self.apifiles = apifiles # Basic sanity check if len(self.messages) == 0 and not testmode: raise ValueError(1, 'Missing JSON message definitions') self.transport = VppTransport(self, read_timeout=read_timeout, server_address=server_address) # Make sure we allow VPP to clean up the message rings. atexit.register(vpp_atexit, weakref.ref(self)) class ContextId(object): """Thread-safe provider of unique context IDs.""" def __init__(self): self.context = 0 self.lock = threading.Lock() def __call__(self): """Get a new unique (or, at least, not recently used) context.""" with self.lock: self.context += 1 return self.context get_context = ContextId() def get_type(self, name): return vpp_get_type(name) @classmethod def find_api_dir(cls): """Attempt to find the best directory in which API definition files may reside. If the value VPP_API_DIR exists in the environment then it is first on the search list. If we're inside a recognized location in a VPP source tree (src/scripts and src/vpp-api/python) then entries from there to the likely locations in build-root are added. Finally the location used by system packages is added. :returns: A single directory name, or None if no such directory could be found. """ dirs = [] if 'VPP_API_DIR' in os.environ: dirs.append(os.environ['VPP_API_DIR']) # perhaps we're in the 'src/scripts' or 'src/vpp-api/python' dir; # in which case, plot a course to likely places in the src tree import __main__ as main if hasattr(main, '__file__'): # get the path of the calling script localdir = os.path.dirname(os.path.realpath(main.__file__)) else: # use cwd if there is no calling script localdir = os.getcwd() localdir_s = localdir.split(os.path.sep) def dmatch(dir): """Match dir against right-hand components of the script dir""" d = dir.split('/') # param 'dir' assumes a / separator length = len(d) return len(localdir_s) > length and localdir_s[-length:] == d def sdir(srcdir, variant): """Build a path from srcdir to the staged API files of 'variant' (typically '' or '_debug')""" # Since 'core' and 'plugin' files are staged # in separate directories, we target the parent dir. return os.path.sep.join(( srcdir, 'build-root', 'install-vpp%s-native' % variant, 'vpp', 'share', 'vpp', 'api', )) srcdir = None if dmatch('src/scripts'): srcdir = os.path.sep.join(localdir_s[:-2]) elif dmatch('src/vpp-api/python'): srcdir = os.path.sep.join(localdir_s[:-3]) elif dmatch('test'): # we're apparently running tests srcdir = os.path.sep.join(localdir_s[:-1]) if srcdir: # we're in the source tree, try both the debug and release # variants. dirs.append(sdir(srcdir, '_debug')) dirs.append(sdir(srcdir, '')) # Test for staged copies of the scripts # For these, since we explicitly know if we're running a debug versus # release variant, target only the relevant directory if dmatch('build-root/install-vpp_debug-native/vpp/bin'): srcdir = os.path.sep.join(localdir_s[:-4]) dirs.append(sdir(srcdir, '_debug')) if dmatch('build-root/install-vpp-native/vpp/bin'): srcdir = os.path.sep.join(localdir_s[:-4]) dirs.append(sdir(srcdir, '')) # finally, try the location system packages typically install into dirs.append(os.path.sep.join(('', 'usr', 'share', 'vpp', 'api'))) # check the directories for existance; first one wins for dir in dirs: if os.path.isdir(dir): return dir return None @classmethod def find_api_files(cls, api_dir=None, patterns='*'): """Find API definition files from the given directory tree with the given pattern. If no directory is given then find_api_dir() is used to locate one. If no pattern is given then all definition files found in the directory tree are used. :param api_dir: A directory tree in which to locate API definition files; subdirectories are descended into. If this is None then find_api_dir() is called to discover it. :param patterns: A list of patterns to use in each visited directory when looking for files. This can be a list/tuple object or a comma-separated string of patterns. Each value in the list will have leading/trialing whitespace stripped. The pattern specifies the first part of the filename, '.api.json' is appended. The results are de-duplicated, thus overlapping patterns are fine. If this is None it defaults to '*' meaning "all API files". :returns: A list of file paths for the API files found. """ if api_dir is None: api_dir = cls.find_api_dir() if api_dir is None: raise RuntimeError("api_dir cannot be located") if isinstance(patterns, list) or isinstance(patterns, tuple): patterns = [p.strip() + '.api.json' for p in patterns] else: patterns = [p.strip() + '.api.json' for p in patterns.split(",")] api_files = [] for root, dirnames, files in os.walk(api_dir): # iterate all given patterns and de-dup the result files = set(sum([fnmatch.filter(files, p) for p in patterns], [])) for filename in files: api_files.append(os.path.join(root, filename)) return api_files @property def api(self): if not hasattr(self, "_api"): raise Exception("Not connected, api definitions not available") return self._api def make_function(self, msg, i, multipart, do_async): if (do_async): def f(**kwargs): return self._call_vpp_async(i, msg, **kwargs) else: def f(**kwargs): return self._call_vpp(i, msg, multipart, **kwargs) f.__name__ = str(msg.name) f.__doc__ = ", ".join(["%s %s" % (msg.fieldtypes[j], k) for j, k in enumerate(msg.fields)]) return f def _register_functions(self, do_async=False): self.id_names = [None] * (self.vpp_dictionary_maxid + 1) self.id_msgdef = [None] * (self.vpp_dictionary_maxid + 1) self._api = VppApiDynamicMethodHolder() for name, msg in vpp_iterator(self.messages): n = name + '_' + msg.crc[2:] i = self.transport.get_msg_index(n.encode()) if i > 0: self.id_msgdef[i] = msg self.id_names[i] = name # TODO: Fix multipart (use services) multipart = True if name.find('_dump') > 0 else False f = self.make_function(msg, i, multipart, do_async) setattr(self._api, name, FuncWrapper(f)) else: self.logger.debug( 'No such message type or failed CRC checksum: %s', n) def connect_internal(self, name, msg_handler, chroot_prefix, rx_qlen, do_async): pfx = chroot_prefix.encode() if chroot_prefix else None rv = self.transport.connect(name.encode(), pfx, msg_handler, rx_qlen) if rv != 0: raise IOError(2, 'Connect failed') self.vpp_dictionary_maxid = self.transport.msg_table_max_index() self._register_functions(do_async=do_async) # Initialise control ping crc = self.messages['control_ping'].crc self.control_ping_index = self.transport.get_msg_index( ('control_ping' + '_' + crc[2:]).encode()) self.control_ping_msgdef = self.messages['control_ping'] if self.async_thread: self.event_thread = threading.Thread( target=self.thread_msg_handler) self.event_thread.daemon = True self.event_thread.start() return rv def connect(self, name, chroot_prefix=None, do_async=False, rx_qlen=32): """Attach to VPP. name - the name of the client. chroot_prefix - if VPP is chroot'ed, the prefix of the jail do_async - if true, messages are sent without waiting for a reply rx_qlen - the length of the VPP message receive queue between client and server. """ msg_handler = self.transport.get_callback(do_async) return self.connect_internal(name, msg_handler, chroot_prefix, rx_qlen, do_async) def connect_sync(self, name, chroot_prefix=None, rx_qlen=32): """Attach to VPP in synchronous mode. Application must poll for events. name - the name of the client. chroot_prefix - if VPP is chroot'ed, the prefix of the jail rx_qlen - the length of the VPP message receive queue between client and server. """ return self.connect_internal(name, None, chroot_prefix, rx_qlen, do_async=False) def disconnect(self): """Detach from VPP.""" rv = self.transport.disconnect() self.message_queue.put("terminate event thread") return rv def msg_handler_sync(self, msg): """Process an incoming message from VPP in sync mode. The message may be a reply or it may be an async notification. """ r = self.decode_incoming_msg(msg) if r is None: return # If we have a context, then use the context to find any # request waiting for a reply context = 0 if hasattr(r, 'context') and r.context > 0: context = r.context if context == 0: # No context -> async notification that we feed to the callback self.message_queue.put_nowait(r) else: raise IOError(2, 'RPC reply message received in event handler') def decode_incoming_msg(self, msg): if not msg: self.logger.warning('vpp_api.read failed') return (i, ci), size = self.header.unpack(msg, 0) if self.id_names[i] == 'rx_thread_exit': return # # Decode message and returns a tuple. # msgobj = self.id_msgdef[i] if not msgobj: raise IOError(2, 'Reply message undefined') r, size = msgobj.unpack(msg) return r def msg_handler_async(self, msg): """Process a message from VPP in async mode. In async mode, all messages are returned to the callback. """ r = self.decode_incoming_msg(msg) if r is None: return msgname = type(r).__name__ if self.event_callback: self.event_callback(msgname, r) def _control_ping(self, context): """Send a ping command.""" self._call_vpp_async(self.control_ping_index, self.control_ping_msgdef, context=context) def validate_args(self, msg, kwargs): d = set(kwargs.keys()) - set(msg.field_by_name.keys()) if d: raise ValueError('Invalid argument {} to {}' .format(list(d), msg.name)) def _call_vpp(self, i, msg, multipart, **kwargs): """Given a message, send the message and await a reply. msgdef - the message packing definition i - the message type index multipart - True if the message returns multiple messages in return. context - context number - chosen at random if not supplied. The remainder of the kwargs are the arguments to the API call. The return value is the message or message array containing the response. It will raise an IOError exception if there was no response within the timeout window. """ if 'context' not in kwargs: context = self.get_context() kwargs['context'] = context else: context = kwargs['context'] kwargs['_vl_msg_id'] = i try: if self.transport.socket_index: kwargs['client_index'] = self.transport.socket_index except AttributeError: pass self.validate_args(msg, kwargs) b = msg.pack(kwargs) self.transport.suspend() self.transport.write(b) if multipart: # Send a ping after the request - we use its response # to detect that we have seen all results. self._control_ping(context) # Block until we get a reply. rl = [] while (True): msg = self.transport.read() if not msg: raise IOError(2, 'VPP API client: read failed') r = self.decode_incoming_msg(msg) msgname = type(r).__name__ if context not in r or r.context == 0 or context != r.context: # Message being queued self.message_queue.put_nowait(r) continue if not multipart: rl = r break if msgname == 'control_ping_reply': break rl.append(r) self.transport.resume() return rl def _call_vpp_async(self, i, msg, **kwargs): """Given a message, send the message and await a reply. msgdef - the message packing definition i - the message type index context - context number - chosen at random if not supplied. The remainder of the kwargs are the arguments to the API call. """ if 'context' not in kwargs: context = self.get_context() kwargs['context'] = context else: context = kwargs['context'] try: if self.transport.socket_index: kwargs['client_index'] = self.transport.socket_index except AttributeError: kwargs['client_index'] = 0 kwargs['_vl_msg_id'] = i b = msg.pack(kwargs) self.transport.write(b) def register_event_callback(self, callback): """Register a callback for async messages. This will be called for async notifications in sync mode, and all messages in async mode. In sync mode, replies to requests will not come here. callback is a fn(msg_type_name, msg_type) that will be called when a message comes in. While this function is executing, note that (a) you are in a background thread and may wish to use threading.Lock to protect your datastructures, and (b) message processing from VPP will stop (so if you take a long while about it you may provoke reply timeouts or cause VPP to fill the RX buffer). Passing None will disable the callback. """ self.event_callback = callback def thread_msg_handler(self): """Python thread calling the user registered message handler. This is to emulate the old style event callback scheme. Modern clients should provide their own thread to poll the event queue. """ while True: r = self.message_queue.get() if r == "terminate event thread": break msgname = type(r).__name__ if self.event_callback: self.event_callback(msgname, r) # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4