From 61debaaaf2230c75d2435a6c866293e2d6359c44 Mon Sep 17 00:00:00 2001 From: Ole Trøan Date: Mon, 18 Jun 2018 18:30:09 +0000 Subject: Revert "Python API: Add enum and union support." This reverts commit a5ee900fb75201bbfceaf13c8bc57a13ed094988. Some of the unit tests breaks. Backing out until fixed. Change-Id: I1846fb417db44a2a772f7b59cda8bcfe6d39f8c3 Signed-off-by: Ole Troan --- .gitignore | 2 + extras/rpm/vpp-suse.spec | 2 +- extras/rpm/vpp.spec | 2 +- src/vpp-api/python/setup.py | 11 +- src/vpp-api/python/tests/test_vpp_serializer.py | 114 --- src/vpp-api/python/vpp_papi.py | 891 ++++++++++++++++++++++++ src/vpp-api/python/vpp_papi/__init__.py | 1 - src/vpp-api/python/vpp_papi/vpp_papi.py | 678 ------------------ src/vpp-api/python/vpp_papi/vpp_serializer.py | 332 --------- test/test_papi.py | 452 +++++++++++- test/vpp_papi_provider.py | 2 +- 11 files changed, 1352 insertions(+), 1135 deletions(-) delete mode 100755 src/vpp-api/python/tests/test_vpp_serializer.py create mode 100644 src/vpp-api/python/vpp_papi.py delete mode 100644 src/vpp-api/python/vpp_papi/__init__.py delete mode 100644 src/vpp-api/python/vpp_papi/vpp_papi.py delete mode 100644 src/vpp-api/python/vpp_papi/vpp_serializer.py diff --git a/.gitignore b/.gitignore index 38c117dbdbd..04a3b91d6ca 100644 --- a/.gitignore +++ b/.gitignore @@ -92,6 +92,8 @@ GTAGS /src/vpp-api/python/build /src/vpp-api/python/dist /src/vpp-api/python/vpp_papi.egg-info +/src/vpp-api/python/vpp_papi/memclnt.py +/src/vpp-api/python/vpp_papi/vpe.py # Build files in the test directory /test/*.ok diff --git a/extras/rpm/vpp-suse.spec b/extras/rpm/vpp-suse.spec index 2649081d5e9..cc53e86ae95 100644 --- a/extras/rpm/vpp-suse.spec +++ b/extras/rpm/vpp-suse.spec @@ -330,7 +330,7 @@ export NO_BRP_CHECK_RPATH=true %files api-python %dir %{python_sitelib}/vpp_papi* -%{python_sitelib}/vpp_* +%{python_sitelib}/vpp_papi* %files devel %dir %{python_sitelib}/jvppgen diff --git a/extras/rpm/vpp.spec b/extras/rpm/vpp.spec index c6951bf4fab..e49d7e3770a 100644 --- a/extras/rpm/vpp.spec +++ b/extras/rpm/vpp.spec @@ -397,7 +397,7 @@ fi %files api-python %defattr(644,root,root) -%{python2_sitelib}/vpp_* +%{python2_sitelib}/vpp_papi* %files selinux-policy %defattr(-,root,root,0755) diff --git a/src/vpp-api/python/setup.py b/src/vpp-api/python/setup.py index 20b9901d521..abda43de606 100644 --- a/src/vpp-api/python/setup.py +++ b/src/vpp-api/python/setup.py @@ -13,20 +13,21 @@ # limitations under the License. try: - from setuptools import setup, find_packages + from setuptools import setup except ImportError: - from distutils.core import setup, find_packages + from distutils.core import setup setup (name = 'vpp_papi', - version = '1.5', + version = '1.4', description = 'VPP Python binding', author = 'Ole Troan', author_email = 'ot@cisco.com', url = 'https://wiki.fd.io/view/VPP/Python_API', + python_requires='>=2.7, >=3.3', license = 'Apache-2.0', test_suite = 'tests', - install_requires=['cffi >= 1.6', 'enum34'], - packages=find_packages(), + install_requires=['cffi >= 1.6'], + py_modules=['vpp_papi'], long_description = '''VPP Python language binding.''', zip_safe = True, ) diff --git a/src/vpp-api/python/tests/test_vpp_serializer.py b/src/vpp-api/python/tests/test_vpp_serializer.py deleted file mode 100755 index 6b867f9e6fe..00000000000 --- a/src/vpp-api/python/tests/test_vpp_serializer.py +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/env python3 - -import unittest -from vpp_serializer import VPPType, VPPEnumType, VPPUnionType -from socket import inet_pton, AF_INET, AF_INET6 -import logging - -logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG) -es_logger = logging.getLogger('vpp_serializer') -es_logger.setLevel(logging.DEBUG) - -class VPPMessage(VPPType): - pass - - -class TestAddType(unittest.TestCase): - - def test_union(self): - un = VPPUnionType('test_union', - [['u8', 'is_bool'], - ['u32', 'is_int']]) - - b = un.pack({'is_int': 0x1234}) - self.assertEqual(len(b), 4) - nt = un.unpack(b) - self.assertEqual(nt.is_bool, 52) - self.assertEqual(nt.is_int, 0x1234) - - def test_address(self): - af = VPPEnumType('vl_api_address_family_t', [["ADDRESS_IP4", 0], - ["ADDRESS_IP6", 1], - {"enumtype": "u32"}]) - ip4 = VPPType('vl_api_ip4_address_t', [['u8', 'address', 4]]) - ip6 = VPPType('vl_api_ip6_address_t', [['u8', 'address', 16]]) - VPPUnionType('vl_api_address_union_t', - [["vl_api_ip4_address_t", "ip4"], - ["vl_api_ip6_address_t", "ip6"]]) - - address = VPPType('address', [['vl_api_address_family_t', 'af'], - ['vl_api_address_union_t', 'un']]) - - b = ip4.pack({'address': inet_pton(AF_INET, '1.1.1.1')}) - self.assertEqual(len(b), 4) - nt = ip4.unpack(b) - self.assertEqual(nt.address, inet_pton(AF_INET, '1.1.1.1')) - - b = ip6.pack({'address': inet_pton(AF_INET6, '1::1')}) - self.assertEqual(len(b), 16) - - b = address.pack({'af': af.ADDRESS_IP4, - 'un': - {'ip4': - {'address': inet_pton(AF_INET, '2.2.2.2')}}}) - self.assertEqual(len(b), 20) - - nt = address.unpack(b) - print('NT union', nt) - self.assertEqual(nt.af, af.ADDRESS_IP4) - self.assertEqual(nt.un.ip4.address, - inet_pton(AF_INET, '2.2.2.2')) - self.assertEqual(nt.un.ip6.address, - inet_pton(AF_INET6, '::0202:0202')) - - def test_arrays(self): - # Test cases - # 1. Fixed list - # 2. Fixed list of variable length sub type - # 3. Variable length type - # - ip4 = VPPType('ip4_address', [['u8', 'address', 4]]) - listip4 = VPPType('list_ip4_t', [['ip4_address', 'addresses', 4]]) - valistip4 = VPPType('list_ip4_t', - [['u8', 'count'], - ['ip4_address', 'addresses', 0, 'count']]) - - valistip4_legacy = VPPType('list_ip4_t', - [['u8', 'foo'], - ['ip4_address', 'addresses', 0]]) - - addresses = [] - for i in range(4): - addresses.append({'address': inet_pton(AF_INET, '2.2.2.2')}) - b = listip4.pack({'addresses': addresses}) - self.assertEqual(len(b), 16) - nt = listip4.unpack(b) - - self.assertEqual(nt.addresses[0].address, - inet_pton(AF_INET, '2.2.2.2')) - - b = valistip4.pack({'count': len(addresses), 'addresses': addresses}) - self.assertEqual(len(b), 17) - - nt = valistip4.unpack(b) - print('NT', nt) - - b = valistip4_legacy.pack({'foo': 1, 'addresses': addresses}) - self.assertEqual(len(b), 17) - nt = valistip4_legacy.unpack(b) - print('NT', nt) - - - def test_message(self): - foo = VPPMessage('foo', [['u16', '_vl_msg_id'], - ['u8', 'client_index'], - ['u8', 'something'], - {"crc": "0x559b9f3c"}]) - b = foo.pack({'_vl_msg_id': 1, 'client_index': 5, - 'something': 200}) - self.assertEqual(len(b), 4) - nt = foo.unpack(b) - print('NT', nt) - -if __name__ == '__main__': - unittest.main() diff --git a/src/vpp-api/python/vpp_papi.py b/src/vpp-api/python/vpp_papi.py new file mode 100644 index 00000000000..ece0e4ee52d --- /dev/null +++ b/src/vpp-api/python/vpp_papi.py @@ -0,0 +1,891 @@ +#!/usr/bin/env python +# +# Copyright (c) 2016 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from __future__ import print_function +import sys +import os +import logging +import collections +import struct +import json +import threading +import fnmatch +import weakref +import atexit +from cffi import FFI +import cffi + +if sys.version[0] == '2': + import Queue as queue +else: + import queue as queue + +ffi = FFI() +ffi.cdef(""" +typedef void (*vac_callback_t)(unsigned char * data, int len); +typedef void (*vac_error_callback_t)(void *, unsigned char *, int); +int vac_connect(char * name, char * chroot_prefix, vac_callback_t cb, + int rx_qlen); +int vac_disconnect(void); +int vac_read(char **data, int *l, unsigned short timeout); +int vac_write(char *data, int len); +void vac_free(void * msg); + +int vac_get_msg_index(unsigned char * name); +int vac_msg_table_size(void); +int vac_msg_table_max_index(void); + +void vac_rx_suspend (void); +void vac_rx_resume (void); +void vac_set_error_handler(vac_error_callback_t); + """) + +# Barfs on failure, no need to check success. +vpp_api = ffi.dlopen('libvppapiclient.so') + + +def vpp_atexit(vpp_weakref): + """Clean up VPP connection on shutdown.""" + vpp_instance = vpp_weakref() + if vpp_instance.connected: + vpp_instance.logger.debug('Cleaning up VPP on exit') + vpp_instance.disconnect() + + +vpp_object = None + + +def vpp_iterator(d): + if sys.version[0] == '2': + return d.iteritems() + else: + return d.items() + + +@ffi.callback("void(unsigned char *, int)") +def vac_callback_sync(data, len): + vpp_object.msg_handler_sync(ffi.buffer(data, len)) + + +@ffi.callback("void(unsigned char *, int)") +def vac_callback_async(data, len): + vpp_object.msg_handler_async(ffi.buffer(data, len)) + + +@ffi.callback("void(void *, unsigned char *, int)") +def vac_error_handler(arg, msg, msg_len): + vpp_object.logger.warning("VPP API client:: %s", ffi.string(msg, msg_len)) + + +class Empty(object): + pass + + +class FuncWrapper(object): + def __init__(self, func): + self._func = func + self.__name__ = func.__name__ + + def __call__(self, **kwargs): + return self._func(**kwargs) + + +class VPP(): + """VPP interface. + + This class provides the APIs to VPP. The APIs are loaded + from provided .api.json files and makes functions accordingly. + These functions are documented in the VPP .api files, as they + are dynamically created. + + Additionally, VPP can send callback messages; this class + provides a means to register a callback function to receive + these messages in a background thread. + """ + def __init__(self, apifiles=None, testmode=False, async_thread=True, + logger=None, loglevel=None, + read_timeout=0): + """Create a VPP API object. + + apifiles is a list of files containing API + descriptions that will be loaded - methods will be + dynamically created reflecting these APIs. If not + provided this will load the API files from VPP's + default install location. + + logger, if supplied, is the logging logger object to log to. + loglevel, if supplied, is the log level this logger is set + to report at (from the loglevels in the logging module). + """ + global vpp_object + vpp_object = self + + if logger is None: + logger = logging.getLogger(__name__) + if loglevel is not None: + logger.setLevel(loglevel) + + self.logger = logger + + self.messages = {} + self.id_names = [] + self.id_msgdef = [] + self.connected = False + self.header = struct.Struct('>HI') + self.apifiles = [] + self.event_callback = None + self.message_queue = queue.Queue() + self.read_timeout = read_timeout + self.vpp_api = vpp_api + self.async_thread = async_thread + + if not apifiles: + # Pick up API definitions from default directory + try: + apifiles = self.find_api_files() + except RuntimeError: + # In test mode we don't care that we can't find the API files + if testmode: + apifiles = [] + else: + raise + + for file in apifiles: + with open(file) as apidef_file: + api = json.load(apidef_file) + for t in api['types']: + self.add_type(t[0], t[1:]) + + for m in api['messages']: + self.add_message(m[0], m[1:]) + self.apifiles = apifiles + + # Basic sanity check + if len(self.messages) == 0 and not testmode: + raise ValueError(1, 'Missing JSON message definitions') + + # Make sure we allow VPP to clean up the message rings. + atexit.register(vpp_atexit, weakref.ref(self)) + + # Register error handler + vpp_api.vac_set_error_handler(vac_error_handler) + + # Support legacy CFFI + # from_buffer supported from 1.8.0 + (major, minor, patch) = [int(s) for s in + cffi.__version__.split('.', 3)] + if major >= 1 and minor >= 8: + self._write = self._write_new_cffi + else: + self._write = self._write_legacy_cffi + + class ContextId(object): + """Thread-safe provider of unique context IDs.""" + def __init__(self): + self.context = 0 + self.lock = threading.Lock() + + def __call__(self): + """Get a new unique (or, at least, not recently used) context.""" + with self.lock: + self.context += 1 + return self.context + get_context = ContextId() + + @classmethod + def find_api_dir(cls): + """Attempt to find the best directory in which API definition + files may reside. If the value VPP_API_DIR exists in the environment + then it is first on the search list. If we're inside a recognized + location in a VPP source tree (src/scripts and src/vpp-api/python) + then entries from there to the likely locations in build-root are + added. Finally the location used by system packages is added. + + :returns: A single directory name, or None if no such directory + could be found. + """ + dirs = [] + + if 'VPP_API_DIR' in os.environ: + dirs.append(os.environ['VPP_API_DIR']) + + # perhaps we're in the 'src/scripts' or 'src/vpp-api/python' dir; + # in which case, plot a course to likely places in the src tree + import __main__ as main + if hasattr(main, '__file__'): + # get the path of the calling script + localdir = os.path.dirname(os.path.realpath(main.__file__)) + else: + # use cwd if there is no calling script + localdir = os.getcwd() + localdir_s = localdir.split(os.path.sep) + + def dmatch(dir): + """Match dir against right-hand components of the script dir""" + d = dir.split('/') # param 'dir' assumes a / separator + length = len(d) + return len(localdir_s) > length and localdir_s[-length:] == d + + def sdir(srcdir, variant): + """Build a path from srcdir to the staged API files of + 'variant' (typically '' or '_debug')""" + # Since 'core' and 'plugin' files are staged + # in separate directories, we target the parent dir. + return os.path.sep.join(( + srcdir, + 'build-root', + 'install-vpp%s-native' % variant, + 'vpp', + 'share', + 'vpp', + 'api', + )) + + srcdir = None + if dmatch('src/scripts'): + srcdir = os.path.sep.join(localdir_s[:-2]) + elif dmatch('src/vpp-api/python'): + srcdir = os.path.sep.join(localdir_s[:-3]) + elif dmatch('test'): + # we're apparently running tests + srcdir = os.path.sep.join(localdir_s[:-1]) + + if srcdir: + # we're in the source tree, try both the debug and release + # variants. + dirs.append(sdir(srcdir, '_debug')) + dirs.append(sdir(srcdir, '')) + + # Test for staged copies of the scripts + # For these, since we explicitly know if we're running a debug versus + # release variant, target only the relevant directory + if dmatch('build-root/install-vpp_debug-native/vpp/bin'): + srcdir = os.path.sep.join(localdir_s[:-4]) + dirs.append(sdir(srcdir, '_debug')) + if dmatch('build-root/install-vpp-native/vpp/bin'): + srcdir = os.path.sep.join(localdir_s[:-4]) + dirs.append(sdir(srcdir, '')) + + # finally, try the location system packages typically install into + dirs.append(os.path.sep.join(('', 'usr', 'share', 'vpp', 'api'))) + + # check the directories for existance; first one wins + for dir in dirs: + if os.path.isdir(dir): + return dir + + return None + + @classmethod + def find_api_files(cls, api_dir=None, patterns='*'): + """Find API definition files from the given directory tree with the + given pattern. If no directory is given then find_api_dir() is used + to locate one. If no pattern is given then all definition files found + in the directory tree are used. + + :param api_dir: A directory tree in which to locate API definition + files; subdirectories are descended into. + If this is None then find_api_dir() is called to discover it. + :param patterns: A list of patterns to use in each visited directory + when looking for files. + This can be a list/tuple object or a comma-separated string of + patterns. Each value in the list will have leading/trialing + whitespace stripped. + The pattern specifies the first part of the filename, '.api.json' + is appended. + The results are de-duplicated, thus overlapping patterns are fine. + If this is None it defaults to '*' meaning "all API files". + :returns: A list of file paths for the API files found. + """ + if api_dir is None: + api_dir = cls.find_api_dir() + if api_dir is None: + raise RuntimeError("api_dir cannot be located") + + if isinstance(patterns, list) or isinstance(patterns, tuple): + patterns = [p.strip() + '.api.json' for p in patterns] + else: + patterns = [p.strip() + '.api.json' for p in patterns.split(",")] + + api_files = [] + for root, dirnames, files in os.walk(api_dir): + # iterate all given patterns and de-dup the result + files = set(sum([fnmatch.filter(files, p) for p in patterns], [])) + for filename in files: + api_files.append(os.path.join(root, filename)) + + return api_files + + def status(self): + """Debug function: report current VPP API status to stdout.""" + print('Connected') if self.connected else print('Not Connected') + print('Read API definitions from', ', '.join(self.apifiles)) + + def __struct(self, t, n=None, e=-1, vl=None): + """Create a packing structure for a message.""" + base_types = {'u8': 'B', + 'u16': 'H', + 'u32': 'I', + 'i32': 'i', + 'u64': 'Q', + 'f64': 'd', } + + if t in base_types: + if not vl: + if e > 0 and t == 'u8': + # Fixed byte array + s = struct.Struct('>' + str(e) + 's') + return s.size, s + if e > 0: + # Fixed array of base type + s = struct.Struct('>' + base_types[t]) + return s.size, [e, s] + elif e == 0: + # Old style variable array + s = struct.Struct('>' + base_types[t]) + return s.size, [-1, s] + else: + # Variable length array + if t == 'u8': + s = struct.Struct('>s') + return s.size, [vl, s] + else: + s = struct.Struct('>' + base_types[t]) + return s.size, [vl, s] + + s = struct.Struct('>' + base_types[t]) + return s.size, s + + if t in self.messages: + size = self.messages[t]['sizes'][0] + + # Return a list in case of array + if e > 0 and not vl: + return size, [e, lambda self, encode, buf, offset, args: ( + self.__struct_type(encode, self.messages[t], buf, offset, + args))] + if vl: + return size, [vl, lambda self, encode, buf, offset, args: ( + self.__struct_type(encode, self.messages[t], buf, offset, + args))] + elif e == 0: + # Old style VLA + raise NotImplementedError(1, + 'No support for compound types ' + t) + return size, lambda self, encode, buf, offset, args: ( + self.__struct_type(encode, self.messages[t], buf, offset, args) + ) + + raise ValueError(1, 'Invalid message type: ' + t) + + def __struct_type(self, encode, msgdef, buf, offset, kwargs): + """Get a message packer or unpacker.""" + if encode: + return self.__struct_type_encode(msgdef, buf, offset, kwargs) + else: + return self.__struct_type_decode(msgdef, buf, offset) + + def __struct_type_encode(self, msgdef, buf, offset, kwargs): + off = offset + size = 0 + + for k in kwargs: + if k not in msgdef['args']: + raise ValueError(1, 'Non existing argument [' + k + ']' + + ' used in call to: ' + + self.id_names[kwargs['_vl_msg_id']] + '()') + + for k, v in vpp_iterator(msgdef['args']): + off += size + if k in kwargs: + if type(v) is list: + if callable(v[1]): + e = kwargs[v[0]] if v[0] in kwargs else v[0] + if e != len(kwargs[k]): + raise (ValueError(1, + 'Input list length mismatch: ' + '%s (%s != %s)' % + (k, e, len(kwargs[k])))) + size = 0 + for i in range(e): + size += v[1](self, True, buf, off + size, + kwargs[k][i]) + else: + if v[0] in kwargs: + kwargslen = kwargs[v[0]] + if kwargslen != len(kwargs[k]): + raise ValueError(1, + 'Input list length mismatch:' + ' %s (%s != %s)' % + (k, kwargslen, + len(kwargs[k]))) + else: + kwargslen = len(kwargs[k]) + if v[1].size == 1: + buf[off:off + kwargslen] = bytearray(kwargs[k]) + size = kwargslen + else: + size = 0 + for i in kwargs[k]: + v[1].pack_into(buf, off + size, i) + size += v[1].size + else: + if callable(v): + size = v(self, True, buf, off, kwargs[k]) + else: + if type(kwargs[k]) is str and v.size < len(kwargs[k]): + raise ValueError(1, + 'Input list length mismatch: ' + '%s (%s < %s)' % + (k, v.size, len(kwargs[k]))) + v.pack_into(buf, off, kwargs[k]) + size = v.size + else: + size = v.size if not type(v) is list else 0 + + return off + size - offset + + def __getitem__(self, name): + if name in self.messages: + return self.messages[name] + return None + + def get_size(self, sizes, kwargs): + total_size = sizes[0] + for e in sizes[1]: + if e in kwargs and type(kwargs[e]) is list: + total_size += len(kwargs[e]) * sizes[1][e] + return total_size + + def encode(self, msgdef, kwargs): + # Make suitably large buffer + size = self.get_size(msgdef['sizes'], kwargs) + buf = bytearray(size) + offset = 0 + size = self.__struct_type(True, msgdef, buf, offset, kwargs) + return buf[:offset + size] + + def decode(self, msgdef, buf): + return self.__struct_type(False, msgdef, buf, 0, None)[1] + + def __struct_type_decode(self, msgdef, buf, offset): + res = [] + off = offset + size = 0 + for k, v in vpp_iterator(msgdef['args']): + off += size + if type(v) is list: + lst = [] + if callable(v[1]): # compound type + size = 0 + if v[0] in msgdef['args']: # vla + e = res[v[2]] + else: # fixed array + e = v[0] + res.append(lst) + for i in range(e): + (s, l) = v[1](self, False, buf, off + size, None) + lst.append(l) + size += s + continue + if v[1].size == 1: + if type(v[0]) is int: + size = len(buf) - off + else: + size = res[v[2]] + res.append(buf[off:off + size]) + else: + e = v[0] if type(v[0]) is int else res[v[2]] + if e == -1: + e = (len(buf) - off) / v[1].size + lst = [] + res.append(lst) + size = 0 + for i in range(e): + lst.append(v[1].unpack_from(buf, off + size)[0]) + size += v[1].size + else: + if callable(v): + size = 0 + (s, l) = v(self, False, buf, off, None) + res.append(l) + size += s + else: + res.append(v.unpack_from(buf, off)[0]) + size = v.size + + return off + size - offset, msgdef['return_tuple']._make(res) + + def ret_tup(self, name): + if name in self.messages and 'return_tuple' in self.messages[name]: + return self.messages[name]['return_tuple'] + return None + + def duplicate_check_ok(self, name, msgdef): + crc = None + for c in msgdef: + if type(c) is dict and 'crc' in c: + crc = c['crc'] + break + if crc: + # We can get duplicates because of imports + if crc == self.messages[name]['crc']: + return True + return False + + def add_message(self, name, msgdef, typeonly=False): + if name in self.messages: + if typeonly: + if self.duplicate_check_ok(name, msgdef): + return + raise ValueError('Duplicate message name: ' + name) + + args = collections.OrderedDict() + argtypes = collections.OrderedDict() + fields = [] + msg = {} + total_size = 0 + sizes = {} + for i, f in enumerate(msgdef): + if type(f) is dict and 'crc' in f: + msg['crc'] = f['crc'] + continue + field_type = f[0] + field_name = f[1] + if len(f) == 3 and f[2] == 0 and i != len(msgdef) - 2: + raise ValueError('Variable Length Array must be last: ' + name) + size, s = self.__struct(*f) + args[field_name] = s + if type(s) == list and type(s[0]) == int and \ + type(s[1]) == struct.Struct: + if s[0] < 0: + sizes[field_name] = size + else: + sizes[field_name] = size + total_size += s[0] * size + else: + sizes[field_name] = size + total_size += size + + argtypes[field_name] = field_type + if len(f) == 4: # Find offset to # elements field + idx = list(args.keys()).index(f[3]) - i + args[field_name].append(idx) + fields.append(field_name) + msg['return_tuple'] = collections.namedtuple(name, fields, + rename=True) + self.messages[name] = msg + self.messages[name]['args'] = args + self.messages[name]['argtypes'] = argtypes + self.messages[name]['typeonly'] = typeonly + self.messages[name]['sizes'] = [total_size, sizes] + return self.messages[name] + + def add_type(self, name, typedef): + return self.add_message('vl_api_' + name + '_t', typedef, + typeonly=True) + + def make_function(self, name, i, msgdef, multipart, async): + if (async): + def f(**kwargs): + return self._call_vpp_async(i, msgdef, **kwargs) + else: + def f(**kwargs): + return self._call_vpp(i, msgdef, multipart, **kwargs) + args = self.messages[name]['args'] + argtypes = self.messages[name]['argtypes'] + f.__name__ = str(name) + f.__doc__ = ", ".join(["%s %s" % + (argtypes[k], k) for k in args.keys()]) + return f + + @property + def api(self): + if not hasattr(self, "_api"): + raise Exception("Not connected, api definitions not available") + return self._api + + def _register_functions(self, async=False): + self.id_names = [None] * (self.vpp_dictionary_maxid + 1) + self.id_msgdef = [None] * (self.vpp_dictionary_maxid + 1) + self._api = Empty() + for name, msgdef in vpp_iterator(self.messages): + if self.messages[name]['typeonly']: + continue + crc = self.messages[name]['crc'] + n = name + '_' + crc[2:] + i = vpp_api.vac_get_msg_index(n.encode()) + if i > 0: + self.id_msgdef[i] = msgdef + self.id_names[i] = name + multipart = True if name.find('_dump') > 0 else False + f = self.make_function(name, i, msgdef, multipart, async) + setattr(self._api, name, FuncWrapper(f)) + else: + self.logger.debug( + 'No such message type or failed CRC checksum: %s', n) + + def _write_new_cffi(self, buf): + """Send a binary-packed message to VPP.""" + if not self.connected: + raise IOError(1, 'Not connected') + return vpp_api.vac_write(ffi.from_buffer(buf), len(buf)) + + def _write_legacy_cffi(self, buf): + """Send a binary-packed message to VPP.""" + if not self.connected: + raise IOError(1, 'Not connected') + return vpp_api.vac_write(bytes(buf), len(buf)) + + def _read(self): + if not self.connected: + raise IOError(1, 'Not connected') + mem = ffi.new("char **") + size = ffi.new("int *") + rv = vpp_api.vac_read(mem, size, self.read_timeout) + if rv: + raise IOError(rv, 'vac_read failed') + msg = bytes(ffi.buffer(mem[0], size[0])) + vpp_api.vac_free(mem[0]) + return msg + + def connect_internal(self, name, msg_handler, chroot_prefix, rx_qlen, + async): + pfx = chroot_prefix.encode() if chroot_prefix else ffi.NULL + rv = vpp_api.vac_connect(name.encode(), pfx, msg_handler, rx_qlen) + if rv != 0: + raise IOError(2, 'Connect failed') + self.connected = True + + self.vpp_dictionary_maxid = vpp_api.vac_msg_table_max_index() + self._register_functions(async=async) + + # Initialise control ping + crc = self.messages['control_ping']['crc'] + self.control_ping_index = vpp_api.vac_get_msg_index( + ('control_ping' + '_' + crc[2:]).encode()) + self.control_ping_msgdef = self.messages['control_ping'] + if self.async_thread: + self.event_thread = threading.Thread( + target=self.thread_msg_handler) + self.event_thread.daemon = True + self.event_thread.start() + return rv + + def connect(self, name, chroot_prefix=None, async=False, rx_qlen=32): + """Attach to VPP. + + name - the name of the client. + chroot_prefix - if VPP is chroot'ed, the prefix of the jail + async - if true, messages are sent without waiting for a reply + rx_qlen - the length of the VPP message receive queue between + client and server. + """ + msg_handler = vac_callback_sync if not async else vac_callback_async + return self.connect_internal(name, msg_handler, chroot_prefix, rx_qlen, + async) + + def connect_sync(self, name, chroot_prefix=None, rx_qlen=32): + """Attach to VPP in synchronous mode. Application must poll for events. + + name - the name of the client. + chroot_prefix - if VPP is chroot'ed, the prefix of the jail + rx_qlen - the length of the VPP message receive queue between + client and server. + """ + + return self.connect_internal(name, ffi.NULL, chroot_prefix, rx_qlen, + async=False) + + def disconnect(self): + """Detach from VPP.""" + rv = vpp_api.vac_disconnect() + self.connected = False + self.message_queue.put("terminate event thread") + return rv + + def msg_handler_sync(self, msg): + """Process an incoming message from VPP in sync mode. + + The message may be a reply or it may be an async notification. + """ + r = self.decode_incoming_msg(msg) + if r is None: + return + + # If we have a context, then use the context to find any + # request waiting for a reply + context = 0 + if hasattr(r, 'context') and r.context > 0: + context = r.context + + if context == 0: + # No context -> async notification that we feed to the callback + self.message_queue.put_nowait(r) + else: + raise IOError(2, 'RPC reply message received in event handler') + + def decode_incoming_msg(self, msg): + if not msg: + self.logger.warning('vpp_api.read failed') + return + + i, ci = self.header.unpack_from(msg, 0) + if self.id_names[i] == 'rx_thread_exit': + return + + # + # Decode message and returns a tuple. + # + msgdef = self.id_msgdef[i] + if not msgdef: + raise IOError(2, 'Reply message undefined') + + r = self.decode(msgdef, msg) + + return r + + def msg_handler_async(self, msg): + """Process a message from VPP in async mode. + + In async mode, all messages are returned to the callback. + """ + r = self.decode_incoming_msg(msg) + if r is None: + return + + msgname = type(r).__name__ + + if self.event_callback: + self.event_callback(msgname, r) + + def _control_ping(self, context): + """Send a ping command.""" + self._call_vpp_async(self.control_ping_index, + self.control_ping_msgdef, + context=context) + + def _call_vpp(self, i, msgdef, multipart, **kwargs): + """Given a message, send the message and await a reply. + + msgdef - the message packing definition + i - the message type index + multipart - True if the message returns multiple + messages in return. + context - context number - chosen at random if not + supplied. + The remainder of the kwargs are the arguments to the API call. + + The return value is the message or message array containing + the response. It will raise an IOError exception if there was + no response within the timeout window. + """ + + if 'context' not in kwargs: + context = self.get_context() + kwargs['context'] = context + else: + context = kwargs['context'] + kwargs['_vl_msg_id'] = i + b = self.encode(msgdef, kwargs) + + vpp_api.vac_rx_suspend() + self._write(b) + + if multipart: + # Send a ping after the request - we use its response + # to detect that we have seen all results. + self._control_ping(context) + + # Block until we get a reply. + rl = [] + while (True): + msg = self._read() + if not msg: + raise IOError(2, 'VPP API client: read failed') + + r = self.decode_incoming_msg(msg) + msgname = type(r).__name__ + if context not in r or r.context == 0 or context != r.context: + self.message_queue.put_nowait(r) + continue + + if not multipart: + rl = r + break + if msgname == 'control_ping_reply': + break + + rl.append(r) + + vpp_api.vac_rx_resume() + + return rl + + def _call_vpp_async(self, i, msgdef, **kwargs): + """Given a message, send the message and await a reply. + + msgdef - the message packing definition + i - the message type index + context - context number - chosen at random if not + supplied. + The remainder of the kwargs are the arguments to the API call. + """ + if 'context' not in kwargs: + context = self.get_context() + kwargs['context'] = context + else: + context = kwargs['context'] + kwargs['_vl_msg_id'] = i + b = self.encode(msgdef, kwargs) + + self._write(b) + + def register_event_callback(self, callback): + """Register a callback for async messages. + + This will be called for async notifications in sync mode, + and all messages in async mode. In sync mode, replies to + requests will not come here. + + callback is a fn(msg_type_name, msg_type) that will be + called when a message comes in. While this function is + executing, note that (a) you are in a background thread and + may wish to use threading.Lock to protect your datastructures, + and (b) message processing from VPP will stop (so if you take + a long while about it you may provoke reply timeouts or cause + VPP to fill the RX buffer). Passing None will disable the + callback. + """ + self.event_callback = callback + + def thread_msg_handler(self): + """Python thread calling the user registerd message handler. + + This is to emulate the old style event callback scheme. Modern + clients should provide their own thread to poll the event + queue. + """ + while True: + r = self.message_queue.get() + if r == "terminate event thread": + break + msgname = type(r).__name__ + if self.event_callback: + self.event_callback(msgname, r) + + +# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 diff --git a/src/vpp-api/python/vpp_papi/__init__.py b/src/vpp-api/python/vpp_papi/__init__.py deleted file mode 100644 index f9afcf17f29..00000000000 --- a/src/vpp-api/python/vpp_papi/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .vpp_papi import * diff --git a/src/vpp-api/python/vpp_papi/vpp_papi.py b/src/vpp-api/python/vpp_papi/vpp_papi.py deleted file mode 100644 index 5ff8064d425..00000000000 --- a/src/vpp-api/python/vpp_papi/vpp_papi.py +++ /dev/null @@ -1,678 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2016 Cisco and/or its affiliates. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from __future__ import print_function -import sys -import os -import logging -import collections -import struct -import json -import threading -import fnmatch -import weakref -import atexit -from cffi import FFI -import cffi -from vpp_serializer import VPPType, VPPEnumType, VPPUnionType, BaseTypes - -if sys.version[0] == '2': - import Queue as queue -else: - import queue as queue - -ffi = FFI() -ffi.cdef(""" -typedef void (*vac_callback_t)(unsigned char * data, int len); -typedef void (*vac_error_callback_t)(void *, unsigned char *, int); -int vac_connect(char * name, char * chroot_prefix, vac_callback_t cb, - int rx_qlen); -int vac_disconnect(void); -int vac_read(char **data, int *l, unsigned short timeout); -int vac_write(char *data, int len); -void vac_free(void * msg); - -int vac_get_msg_index(unsigned char * name); -int vac_msg_table_size(void); -int vac_msg_table_max_index(void); - -void vac_rx_suspend (void); -void vac_rx_resume (void); -void vac_set_error_handler(vac_error_callback_t); - """) - -# Barfs on failure, no need to check success. -vpp_api = ffi.dlopen('libvppapiclient.so') - - -def vpp_atexit(vpp_weakref): - """Clean up VPP connection on shutdown.""" - vpp_instance = vpp_weakref() - if vpp_instance.connected: - vpp_instance.logger.debug('Cleaning up VPP on exit') - vpp_instance.disconnect() - - -vpp_object = None - - -def vpp_iterator(d): - if sys.version[0] == '2': - return d.iteritems() - else: - return d.items() - - -@ffi.callback("void(unsigned char *, int)") -def vac_callback_sync(data, len): - vpp_object.msg_handler_sync(ffi.buffer(data, len)) - - -@ffi.callback("void(unsigned char *, int)") -def vac_callback_async(data, len): - vpp_object.msg_handler_async(ffi.buffer(data, len)) - - -@ffi.callback("void(void *, unsigned char *, int)") -def vac_error_handler(arg, msg, msg_len): - vpp_object.logger.warning("VPP API client:: %s", ffi.string(msg, msg_len)) - - -class Empty(object): - pass - - -class FuncWrapper(object): - def __init__(self, func): - self._func = func - self.__name__ = func.__name__ - - def __call__(self, **kwargs): - return self._func(**kwargs) - - -class VPPMessage(VPPType): - pass - -class VPP(): - """VPP interface. - - This class provides the APIs to VPP. The APIs are loaded - from provided .api.json files and makes functions accordingly. - These functions are documented in the VPP .api files, as they - are dynamically created. - - Additionally, VPP can send callback messages; this class - provides a means to register a callback function to receive - these messages in a background thread. - """ - - def process_json_file(self, apidef_file): - api = json.load(apidef_file) - types = {} - for t in api['enums']: - t[0] = 'vl_api_' + t[0] + '_t' - types[t[0]] = {'type': 'enum', 'data': t} - for t in api['unions']: - t[0] = 'vl_api_' + t[0] + '_t' - types[t[0]] = {'type': 'union', 'data': t} - for t in api['types']: - t[0] = 'vl_api_' + t[0] + '_t' - types[t[0]] = {'type': 'type', 'data': t} - - i = 0 - while True: - unresolved = {} - for k, v in types.items(): - t = v['data'] - if v['type'] == 'enum': - try: - VPPEnumType(t[0], t[1:]) - except ValueError: - unresolved[k] = v - elif v['type'] == 'union': - try: - VPPUnionType(t[0], t[1:]) - except ValueError: - unresolved[k] = v - elif v['type'] == 'type': - try: - VPPType(t[0], t[1:]) - except ValueError: - unresolved[k] = v - if len(unresolved) == 0: - break - if i > 3: - raise ValueError('Unresolved type definitions {}' - .format(unresolved)) - types = unresolved - i += 1 - - for m in api['messages']: - try: - self.messages[m[0]] = VPPMessage(m[0], m[1:]) - except NotImplementedError: - self.logger.error('Not implemented error for {}'.format(m[0])) - - def __init__(self, apifiles=None, testmode=False, async_thread=True, - logger=logging.getLogger('vpp_papi'), loglevel='debug', - read_timeout=0): - """Create a VPP API object. - - apifiles is a list of files containing API - descriptions that will be loaded - methods will be - dynamically created reflecting these APIs. If not - provided this will load the API files from VPP's - default install location. - - logger, if supplied, is the logging logger object to log to. - loglevel, if supplied, is the log level this logger is set - to report at (from the loglevels in the logging module). - """ - global vpp_object - vpp_object = self - - if logger is None: - logger = logging.getLogger(__name__) - if loglevel is not None: - logger.setLevel(loglevel) - self.logger = logger - - self.messages = {} - self.id_names = [] - self.id_msgdef = [] - self.connected = False - self.header = VPPType('header', [['u16', 'msgid'], - ['u32', 'client_index']]) - self.apifiles = [] - self.event_callback = None - self.message_queue = queue.Queue() - self.read_timeout = read_timeout - self.vpp_api = vpp_api - self.async_thread = async_thread - - if not apifiles: - # Pick up API definitions from default directory - try: - apifiles = self.find_api_files() - except RuntimeError: - # In test mode we don't care that we can't find the API files - if testmode: - apifiles = [] - else: - raise - - for file in apifiles: - with open(file) as apidef_file: - self.process_json_file(apidef_file) - - self.apifiles = apifiles - - # Basic sanity check - if len(self.messages) == 0 and not testmode: - raise ValueError(1, 'Missing JSON message definitions') - - # Make sure we allow VPP to clean up the message rings. - atexit.register(vpp_atexit, weakref.ref(self)) - - # Register error handler - if not testmode: - vpp_api.vac_set_error_handler(vac_error_handler) - - # Support legacy CFFI - # from_buffer supported from 1.8.0 - (major, minor, patch) = [int(s) for s in - cffi.__version__.split('.', 3)] - if major >= 1 and minor >= 8: - self._write = self._write_new_cffi - else: - self._write = self._write_legacy_cffi - - class ContextId(object): - """Thread-safe provider of unique context IDs.""" - def __init__(self): - self.context = 0 - self.lock = threading.Lock() - - def __call__(self): - """Get a new unique (or, at least, not recently used) context.""" - with self.lock: - self.context += 1 - return self.context - get_context = ContextId() - - @classmethod - def find_api_dir(cls): - """Attempt to find the best directory in which API definition - files may reside. If the value VPP_API_DIR exists in the environment - then it is first on the search list. If we're inside a recognized - location in a VPP source tree (src/scripts and src/vpp-api/python) - then entries from there to the likely locations in build-root are - added. Finally the location used by system packages is added. - - :returns: A single directory name, or None if no such directory - could be found. - """ - dirs = [] - - if 'VPP_API_DIR' in os.environ: - dirs.append(os.environ['VPP_API_DIR']) - - # perhaps we're in the 'src/scripts' or 'src/vpp-api/python' dir; - # in which case, plot a course to likely places in the src tree - import __main__ as main - if hasattr(main, '__file__'): - # get the path of the calling script - localdir = os.path.dirname(os.path.realpath(main.__file__)) - else: - # use cwd if there is no calling script - localdir = os.getcwd() - localdir_s = localdir.split(os.path.sep) - - def dmatch(dir): - """Match dir against right-hand components of the script dir""" - d = dir.split('/') # param 'dir' assumes a / separator - length = len(d) - return len(localdir_s) > length and localdir_s[-length:] == d - - def sdir(srcdir, variant): - """Build a path from srcdir to the staged API files of - 'variant' (typically '' or '_debug')""" - # Since 'core' and 'plugin' files are staged - # in separate directories, we target the parent dir. - return os.path.sep.join(( - srcdir, - 'build-root', - 'install-vpp%s-native' % variant, - 'vpp', - 'share', - 'vpp', - 'api', - )) - - srcdir = None - if dmatch('src/scripts'): - srcdir = os.path.sep.join(localdir_s[:-2]) - elif dmatch('src/vpp-api/python'): - srcdir = os.path.sep.join(localdir_s[:-3]) - elif dmatch('test'): - # we're apparently running tests - srcdir = os.path.sep.join(localdir_s[:-1]) - - if srcdir: - # we're in the source tree, try both the debug and release - # variants. - dirs.append(sdir(srcdir, '_debug')) - dirs.append(sdir(srcdir, '')) - - # Test for staged copies of the scripts - # For these, since we explicitly know if we're running a debug versus - # release variant, target only the relevant directory - if dmatch('build-root/install-vpp_debug-native/vpp/bin'): - srcdir = os.path.sep.join(localdir_s[:-4]) - dirs.append(sdir(srcdir, '_debug')) - if dmatch('build-root/install-vpp-native/vpp/bin'): - srcdir = os.path.sep.join(localdir_s[:-4]) - dirs.append(sdir(srcdir, '')) - - # finally, try the location system packages typically install into - dirs.append(os.path.sep.join(('', 'usr', 'share', 'vpp', 'api'))) - - # check the directories for existance; first one wins - for dir in dirs: - if os.path.isdir(dir): - return dir - - return None - - @classmethod - def find_api_files(cls, api_dir=None, patterns='*'): - """Find API definition files from the given directory tree with the - given pattern. If no directory is given then find_api_dir() is used - to locate one. If no pattern is given then all definition files found - in the directory tree are used. - - :param api_dir: A directory tree in which to locate API definition - files; subdirectories are descended into. - If this is None then find_api_dir() is called to discover it. - :param patterns: A list of patterns to use in each visited directory - when looking for files. - This can be a list/tuple object or a comma-separated string of - patterns. Each value in the list will have leading/trialing - whitespace stripped. - The pattern specifies the first part of the filename, '.api.json' - is appended. - The results are de-duplicated, thus overlapping patterns are fine. - If this is None it defaults to '*' meaning "all API files". - :returns: A list of file paths for the API files found. - """ - if api_dir is None: - api_dir = cls.find_api_dir() - if api_dir is None: - raise RuntimeError("api_dir cannot be located") - - if isinstance(patterns, list) or isinstance(patterns, tuple): - patterns = [p.strip() + '.api.json' for p in patterns] - else: - patterns = [p.strip() + '.api.json' for p in patterns.split(",")] - - api_files = [] - for root, dirnames, files in os.walk(api_dir): - # iterate all given patterns and de-dup the result - files = set(sum([fnmatch.filter(files, p) for p in patterns], [])) - for filename in files: - api_files.append(os.path.join(root, filename)) - - return api_files - - def status(self): - """Debug function: report current VPP API status to stdout.""" - print('Connected') if self.connected else print('Not Connected') - print('Read API definitions from', ', '.join(self.apifiles)) - - @property - def api(self): - if not hasattr(self, "_api"): - raise Exception("Not connected, api definitions not available") - return self._api - - def make_function(self, msg, i, multipart, async): - if (async): - def f(**kwargs): - return self._call_vpp_async(i, msg, **kwargs) - else: - def f(**kwargs): - return self._call_vpp(i, msg, multipart, **kwargs) - - f.__name__ = str(msg.name) - f.__doc__ = ", ".join(["%s %s" % - (msg.fieldtypes[j], k) for j, k in enumerate(msg.fields)]) - return f - - def _register_functions(self, async=False): - self.id_names = [None] * (self.vpp_dictionary_maxid + 1) - self.id_msgdef = [None] * (self.vpp_dictionary_maxid + 1) - self._api = Empty() - for name, msg in vpp_iterator(self.messages): - n = name + '_' + msg.crc[2:] - i = vpp_api.vac_get_msg_index(n.encode()) - if i > 0: - self.id_msgdef[i] = msg - self.id_names[i] = name - # TODO: Fix multipart (use services) - multipart = True if name.find('_dump') > 0 else False - f = self.make_function(msg, i, multipart, async) - setattr(self._api, name, FuncWrapper(f)) - else: - self.logger.debug( - 'No such message type or failed CRC checksum: %s', n) - - def _write_new_cffi(self, buf): - """Send a binary-packed message to VPP.""" - if not self.connected: - raise IOError(1, 'Not connected') - return vpp_api.vac_write(ffi.from_buffer(buf), len(buf)) - - def _write_legacy_cffi(self, buf): - """Send a binary-packed message to VPP.""" - if not self.connected: - raise IOError(1, 'Not connected') - return vpp_api.vac_write(bytes(buf), len(buf)) - - def _read(self): - if not self.connected: - raise IOError(1, 'Not connected') - mem = ffi.new("char **") - size = ffi.new("int *") - rv = vpp_api.vac_read(mem, size, self.read_timeout) - if rv: - raise IOError(rv, 'vac_read failed') - msg = bytes(ffi.buffer(mem[0], size[0])) - vpp_api.vac_free(mem[0]) - return msg - - def connect_internal(self, name, msg_handler, chroot_prefix, rx_qlen, - async): - pfx = chroot_prefix.encode() if chroot_prefix else ffi.NULL - rv = vpp_api.vac_connect(name.encode(), pfx, msg_handler, rx_qlen) - if rv != 0: - raise IOError(2, 'Connect failed') - self.connected = True - self.vpp_dictionary_maxid = vpp_api.vac_msg_table_max_index() - self._register_functions(async=async) - - # Initialise control ping - crc = self.messages['control_ping'].crc - self.control_ping_index = vpp_api.vac_get_msg_index( - ('control_ping' + '_' + crc[2:]).encode()) - self.control_ping_msgdef = self.messages['control_ping'] - if self.async_thread: - self.event_thread = threading.Thread( - target=self.thread_msg_handler) - self.event_thread.daemon = True - self.event_thread.start() - return rv - - def connect(self, name, chroot_prefix=None, async=False, rx_qlen=32): - """Attach to VPP. - - name - the name of the client. - chroot_prefix - if VPP is chroot'ed, the prefix of the jail - async - if true, messages are sent without waiting for a reply - rx_qlen - the length of the VPP message receive queue between - client and server. - """ - msg_handler = vac_callback_sync if not async else vac_callback_async - return self.connect_internal(name, msg_handler, chroot_prefix, rx_qlen, - async) - - def connect_sync(self, name, chroot_prefix=None, rx_qlen=32): - """Attach to VPP in synchronous mode. Application must poll for events. - - name - the name of the client. - chroot_prefix - if VPP is chroot'ed, the prefix of the jail - rx_qlen - the length of the VPP message receive queue between - client and server. - """ - - return self.connect_internal(name, ffi.NULL, chroot_prefix, rx_qlen, - async=False) - - def disconnect(self): - """Detach from VPP.""" - rv = vpp_api.vac_disconnect() - self.connected = False - self.message_queue.put("terminate event thread") - return rv - - def msg_handler_sync(self, msg): - """Process an incoming message from VPP in sync mode. - - The message may be a reply or it may be an async notification. - """ - r = self.decode_incoming_msg(msg) - if r is None: - return - - # If we have a context, then use the context to find any - # request waiting for a reply - context = 0 - if hasattr(r, 'context') and r.context > 0: - context = r.context - - if context == 0: - # No context -> async notification that we feed to the callback - self.message_queue.put_nowait(r) - else: - raise IOError(2, 'RPC reply message received in event handler') - - def decode_incoming_msg(self, msg): - if not msg: - self.logger.warning('vpp_api.read failed') - return - - i, ci = self.header.unpack(msg, 0) - if self.id_names[i] == 'rx_thread_exit': - return - - # - # Decode message and returns a tuple. - # - msgobj = self.id_msgdef[i] - if not msgobj: - raise IOError(2, 'Reply message undefined') - - r = msgobj.unpack(msg) - - return r - - def msg_handler_async(self, msg): - """Process a message from VPP in async mode. - - In async mode, all messages are returned to the callback. - """ - r = self.decode_incoming_msg(msg) - if r is None: - return - - msgname = type(r).__name__ - - if self.event_callback: - self.event_callback(msgname, r) - - def _control_ping(self, context): - """Send a ping command.""" - self._call_vpp_async(self.control_ping_index, - self.control_ping_msgdef, - context=context) - - def validate_args(self, msg, kwargs): - d = set(kwargs.keys()) - set(msg.field_by_name.keys()) - if d: - raise ValueError('Invalid argument {} to {}'.format(list(d), msg.name)) - - def _call_vpp(self, i, msg, multipart, **kwargs): - """Given a message, send the message and await a reply. - - msgdef - the message packing definition - i - the message type index - multipart - True if the message returns multiple - messages in return. - context - context number - chosen at random if not - supplied. - The remainder of the kwargs are the arguments to the API call. - - The return value is the message or message array containing - the response. It will raise an IOError exception if there was - no response within the timeout window. - """ - - if 'context' not in kwargs: - context = self.get_context() - kwargs['context'] = context - else: - context = kwargs['context'] - kwargs['_vl_msg_id'] = i - - self.validate_args(msg, kwargs) - b = msg.pack(kwargs) - vpp_api.vac_rx_suspend() - self._write(b) - - if multipart: - # Send a ping after the request - we use its response - # to detect that we have seen all results. - self._control_ping(context) - - # Block until we get a reply. - rl = [] - while (True): - msg = self._read() - if not msg: - raise IOError(2, 'VPP API client: read failed') - r = self.decode_incoming_msg(msg) - msgname = type(r).__name__ - if context not in r or r.context == 0 or context != r.context: - self.message_queue.put_nowait(r) - continue - - if not multipart: - rl = r - break - if msgname == 'control_ping_reply': - break - - rl.append(r) - - vpp_api.vac_rx_resume() - - return rl - - def _call_vpp_async(self, i, msg, **kwargs): - """Given a message, send the message and await a reply. - - msgdef - the message packing definition - i - the message type index - context - context number - chosen at random if not - supplied. - The remainder of the kwargs are the arguments to the API call. - """ - if 'context' not in kwargs: - context = self.get_context() - kwargs['context'] = context - else: - context = kwargs['context'] - kwargs['client_index'] = 0 - kwargs['_vl_msg_id'] = i - b = msg.pack(kwargs) - - self._write(b) - - def register_event_callback(self, callback): - """Register a callback for async messages. - - This will be called for async notifications in sync mode, - and all messages in async mode. In sync mode, replies to - requests will not come here. - - callback is a fn(msg_type_name, msg_type) that will be - called when a message comes in. While this function is - executing, note that (a) you are in a background thread and - may wish to use threading.Lock to protect your datastructures, - and (b) message processing from VPP will stop (so if you take - a long while about it you may provoke reply timeouts or cause - VPP to fill the RX buffer). Passing None will disable the - callback. - """ - self.event_callback = callback - - def thread_msg_handler(self): - """Python thread calling the user registerd message handler. - - This is to emulate the old style event callback scheme. Modern - clients should provide their own thread to poll the event - queue. - """ - while True: - r = self.message_queue.get() - if r == "terminate event thread": - break - msgname = type(r).__name__ - if self.event_callback: - self.event_callback(msgname, r) - - -# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 diff --git a/src/vpp-api/python/vpp_papi/vpp_serializer.py b/src/vpp-api/python/vpp_papi/vpp_serializer.py deleted file mode 100644 index 146a8f6919a..00000000000 --- a/src/vpp-api/python/vpp_papi/vpp_serializer.py +++ /dev/null @@ -1,332 +0,0 @@ -# -# Copyright (c) 2018 Cisco and/or its affiliates. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import struct -import collections -from enum import IntEnum -import logging - -# -# Set log-level in application by doing e.g.: -# logger = logging.getLogger('vpp_serializer') -# logger.setLevel(logging.DEBUG) -# -logger = logging.getLogger(__name__) -FORMAT = "[%(filename)s:%(lineno)s - %(funcName)s() ] %(message)s" -logging.basicConfig(format=FORMAT) - - -class BaseTypes(): - def __init__(self, type, elements=0): - base_types = {'u8': '>B', - 'u16': '>H', - 'u32': '>I', - 'i32': '>i', - 'u64': '>Q', - 'f64': '>d', - 'header': '>HI'} - - if elements > 0 and type == 'u8': - self.packer = struct.Struct('>%ss' % elements) - else: - self.packer = struct.Struct(base_types[type]) - self.size = self.packer.size - logger.debug('Adding {} with format: {}' - .format(type, base_types[type])) - - def pack(self, data, kwargs=None): - logger.debug("Data: {} Format: {}".format(data, self.packer.format)) - return self.packer.pack(data) - - def unpack(self, data, offset, result=None): - logger.debug("@ {} Format: {}".format(offset, self.packer.format)) - return self.packer.unpack_from(data, offset)[0] - - -types = {} -types['u8'] = BaseTypes('u8') -types['u16'] = BaseTypes('u16') -types['u32'] = BaseTypes('u32') -types['i32'] = BaseTypes('i32') -types['u64'] = BaseTypes('u64') -types['f64'] = BaseTypes('f64') - - -class FixedList_u8(): - def __init__(self, name, field_type, num): - self.name = name - self.num = num - self.packer = BaseTypes(field_type, num) - self.size = self.packer.size - - def pack(self, list, kwargs): - logger.debug("Data: {}".format(list)) - - if len(list) > self.num: - raise ValueError('Fixed list length error for "{}", got: {}' - ' expected: {}' - .format(self.name, len(list), self.num)) - return self.packer.pack(list) - - def unpack(self, data, offset=0, result=None): - if len(data[offset:]) < self.num: - raise ValueError('Invalid array length for "{}" got {}' - ' expected {}' - .format(self.name, len(data), self.num)) - return self.packer.unpack(data, offset) - - -class FixedList(): - def __init__(self, name, field_type, num): - self.num = num - self.packer = types[field_type] - self.size = self.packer.size * num - - def pack(self, list, kwargs): - logger.debug("Data: {}".format(list)) - - if len(list) != self.num: - raise ValueError('Fixed list length error, got: {} expected: {}' - .format(len(list), self.num)) - b = bytes() - for e in list: - b += self.packer.pack(e) - return b - - def unpack(self, data, offset=0, result=None): - # Return a list of arguments - result = [] - for e in range(self.num): - x = self.packer.unpack(data, offset) - result.append(x) - offset += self.packer.size - return result - - -class VLAList(): - def __init__(self, name, field_type, len_field_name, index): - self.index = index - self.packer = types[field_type] - self.size = self.packer.size - self.length_field = len_field_name - - def pack(self, list, kwargs=None): - logger.debug("Data: {}".format(list)) - if len(list) != kwargs[self.length_field]: - raise ValueError('Variable length error, got: {} expected: {}' - .format(len(list), kwargs[self.length_field])) - b = bytes() - - # u8 array - if self.packer.size == 1: - p = BaseTypes('u8', len(list)) - return p.pack(list) - - for e in list: - b += self.packer.pack(e) - return b - - def unpack(self, data, offset=0, result=None): - logger.debug("Data: {} @ {} Result: {}" - .format(list, offset, result[self.index])) - # Return a list of arguments - - # u8 array - if self.packer.size == 1: - if result[self.index] == 0: - return b'' - p = BaseTypes('u8', result[self.index]) - r = p.unpack(data, offset) - return r - - r = [] - for e in range(result[self.index]): - x = self.packer.unpack(data, offset) - r.append(x) - offset += self.packer.size - return r - - -class VLAList_legacy(): - def __init__(self, name, field_type): - self.packer = types[field_type] - self.size = self.packer.size - - def pack(self, list, kwargs=None): - logger.debug("Data: {}".format(list)) - b = bytes() - for e in list: - b += self.packer.pack(e) - return b - - def unpack(self, data, offset=0, result=None): - # Return a list of arguments - if (len(data) - offset) % self.packer.size: - raise ValueError('Legacy Variable Length Array length mismatch.') - elements = int((len(data) - offset) / self.packer.size) - r = [] - logger.debug("Legacy VLA: {} elements of size {}" - .format(elements, self.packer.size)) - for e in range(elements): - x = self.packer.unpack(data, offset) - r.append(x) - offset += self.packer.size - return r - - -class VPPEnumType(): - def __init__(self, name, msgdef): - self.size = types['u32'].size - e_hash = {} - for f in msgdef: - if type(f) is dict and 'enumtype' in f: - if f['enumtype'] != 'u32': - raise NotImplementedError - continue - ename, evalue = f - e_hash[ename] = evalue - self.enum = IntEnum(name, e_hash) - types[name] = self - logger.debug('Adding enum {}'.format(name)) - - def __getattr__(self, name): - return self.enum[name] - - def pack(self, data, kwargs=None): - logger.debug("Data: {}".format(data)) - return types['u32'].pack(data, kwargs) - - def unpack(self, data, offset=0, result=None): - x = types['u32'].unpack(data, offset) - return self.enum(x) - - -class VPPUnionType(): - def __init__(self, name, msgdef): - self.name = name - self.size = 0 - self.maxindex = 0 - fields = [] - self.packers = collections.OrderedDict() - for i, f in enumerate(msgdef): - if type(f) is dict and 'crc' in f: - self.crc = f['crc'] - continue - f_type, f_name = f - if f_type not in types: - logger.debug('Unknown union type {}'.format(f_type)) - raise ValueError('Unknown message type {}'.format(f_type)) - fields.append(f_name) - size = types[f_type].size - self.packers[f_name] = types[f_type] - if size > self.size: - self.size = size - self.maxindex = i - - types[name] = self - self.tuple = collections.namedtuple(name, fields, rename=True) - logger.debug('Adding union {}'.format(name)) - - def pack(self, data, kwargs=None): - logger.debug("Data: {}".format(data)) - for k, v in data.items(): - logger.debug("Key: {} Value: {}".format(k, v)) - b = self.packers[k].pack(v, kwargs) - offset = self.size - self.packers[k].size - break - r = bytearray(self.size) - r[offset:] = b - return r - - def unpack(self, data, offset=0, result=None): - r = [] - for k, p in self.packers.items(): - union_offset = self.size - p.size - r.append(p.unpack(data, offset + union_offset)) - return self.tuple._make(r) - - -class VPPType(): - # Set everything up to be able to pack / unpack - def __init__(self, name, msgdef): - self.name = name - self.msgdef = msgdef - self.packers = [] - self.fields = [] - self.fieldtypes = [] - self.field_by_name = {} - size = 0 - for i, f in enumerate(msgdef): - if type(f) is dict and 'crc' in f: - self.crc = f['crc'] - continue - f_type, f_name = f[:2] - self.fields.append(f_name) - self.field_by_name[f_name] = None - self.fieldtypes.append(f_type) - if f_type not in types: - logger.debug('Unknown type {}'.format(f_type)) - raise ValueError('Unknown message type {}'.format(f_type)) - if len(f) == 3: # list - list_elements = f[2] - if list_elements == 0: - p = VLAList_legacy(f_name, f_type) - self.packers.append(p) - elif f_type == 'u8': - p = FixedList_u8(f_name, f_type, list_elements) - self.packers.append(p) - size += p.size - else: - p = FixedList(f_name, f_type, list_elements) - self.packers.append(p) - size += p.size - elif len(f) == 4: # Variable length list - # Find index of length field - length_index = self.fields.index(f[3]) - p = VLAList(f_name, f_type, f[3], length_index) - self.packers.append(p) - else: - self.packers.append(types[f_type]) - size += types[f_type].size - - self.size = size - self.tuple = collections.namedtuple(name, self.fields, rename=True) - types[name] = self - logger.debug('Adding type {}'.format(name)) - - def pack(self, data, kwargs=None): - if not kwargs: - kwargs = data - logger.debug("Data: {}".format(data)) - b = bytes() - for i, a in enumerate(self.fields): - if a not in data: - logger.debug("Argument {} not given, defaulting to 0" - .format(a)) - b += b'\x00' * self.packers[i].size - continue - b += self.packers[i].pack(data[a], kwargs) - return b - - def unpack(self, data, offset=0, result=None): - # Return a list of arguments - result = [] - for p in self.packers: - x = p.unpack(data, offset, result) - if type(x) is tuple and len(x) == 1: - x = x[0] - result.append(x) - offset += p.size - return self.tuple._make(result) diff --git a/test/test_papi.py b/test/test_papi.py index 81a8f48c290..e28352f9b9b 100644 --- a/test/test_papi.py +++ b/test/test_papi.py @@ -1,8 +1,6 @@ import binascii from framework import VppTestCase from vpp_papi import VPP -from socket import inet_pton, AF_INET, AF_INET6 - import json """ TestPAPI is a subclass of VPPTestCase classes. @@ -36,3 +34,453 @@ class TestPAPI(VppTestCase): node_name = 'X' * 100 self.assertRaises(ValueError, self.v.get_node_index, node_name=node_name) + + +class TestPAPIMessageParsing(VppTestCase): + """ PAPI Message parsing Test Case """ + + show_version_msg = '''["show_version", + ["u16", "_vl_msg_id"], + ["u32", "client_index"], + ["u32", "context"], + {"crc" : "0xf18f9480"} + ]''' + + ip_address_details_msg = '''["ip_address_details", + ["u16", "_vl_msg_id"], + ["u32", "client_index"], + ["u32", "context"], + ["u8", "ip", 16], + ["u8", "prefix_length"], + {"crc" : "0x87d522a1"} + ]''' + + cli_inband_msg = '''["cli_inband", + ["u16", "_vl_msg_id"], + ["u32", "client_index"], + ["u32", "context"], + ["u32", "length"], + ["u8", "cmd", 0, "length"], + {"crc" : "0x22345937"} + ]''' + + def test_adding_new_message_object(self): + """ Add new message object """ + p = json.loads(TestPAPIMessageParsing.show_version_msg) + msglist = VPP(testmode=json) + msgdef = msglist.add_message(p[0], p[1:]) + + # Verify that message can be retrieved + self.assertTrue(msglist['show_version']) + self.assertFalse(msglist['foobar']) + + # Test duplicate + self.assertRaises(ValueError, msglist.add_message, p[0], p[1:]) + + # Look at return tuple + self.assertTrue(msglist.ret_tup('show_version')) + + def test_adding_new_message_object_with_array(self): + """ New message with array """ + p = json.loads(TestPAPIMessageParsing.ip_address_details_msg) + msglist = VPP(testmode=True) + msglist.add_message(p[0], p[1:]) + + self.assertTrue(msglist['ip_address_details']) + + def test_message_to_bytes(self): + """ Message to byte encoding """ + msglist = VPP(testmode=True) + p = json.loads(TestPAPIMessageParsing.show_version_msg) + msgdef = msglist.add_message(p[0], p[1:]) + + # Give me a byte string for given message and given arguments + + b = msglist.encode(msgdef, {'_vl_msg_id': 50, 'context': 123}) + self.assertEqual(10, len(b)) + rv = msglist.decode(msgdef, b) + self.assertEqual(rv._0, 50) + self.assertEqual(rv.context, 123) + + p = json.loads(TestPAPIMessageParsing.ip_address_details_msg) + msgdef = msglist.add_message(p[0], p[1:]) + + # Give me a byte string for given message and given arguments + b = msglist.encode(msgdef, {'_vl_msg_id': 50, 'context': 123, + 'ip': b'\xf0\xf1\xf2', + 'prefix_length': 12}) + self.assertEqual(27, len(b)) + rv = msglist.decode(msgdef, b) + + self.assertEqual(rv.context, 123) + self.assertEqual(rv.ip, b'\xf0\xf1\xf2\x00\x00\x00' + + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00') + self.assertEqual(rv.prefix_length, 12) + + p = json.loads(TestPAPIMessageParsing.cli_inband_msg) + msgdef = msglist.add_message(p[0], p[1:]) + + # Give me a byte string for given message and given arguments + b = msglist.encode(msgdef, {'_vl_msg_id': 50, 'context': 123, + 'length': 20, + 'cmd': 'show version verbose'}) + self.assertEqual(34, len(b)) + rv = msglist.decode(msgdef, b) + self.assertEqual(rv._0, 50) + self.assertEqual(rv.context, 123) + self.assertEqual(rv.cmd.decode('ascii'), 'show version verbose') + + variable_array_16_msg = '''["variable_array_16", + ["u32", "length"], + ["u16", "list", 0, "length"] + ]''' + + p = json.loads(variable_array_16_msg) + msgdef = msglist.add_message(p[0], p[1:]) + + # Give me a byte string for given message and given arguments + b = msglist.encode(msgdef, {'list': [1, 2], 'length': 2}) + self.assertEqual(8, len(b)) + rv = msglist.decode(msgdef, b) + self.assertEqual(2, rv.length) + self.assertEqual([1, 2], rv.list) + + def test_add_new_types(self): + """ Add new types """ + counter_type = '''["ip4_fib_counter", + ["u32", "address"], + ["u8", "address_length"], + ["u64", "packets"], + ["u64", "bytes"], + {"crc" : "0xb2739495"} + ]''' + + with_type_msg = '''["with_type_msg", + ["u32", "length"], + ["u16", "list", 0, "length"], + ["vl_api_ip4_fib_counter_t", "counter"] + ]''' + + # Add new type + msglist = VPP(testmode=True) + p = json.loads(counter_type) + msglist.add_type(p[0], p[1:]) + p = json.loads(with_type_msg) + msgdef = msglist.add_message(p[0], p[1:]) + b = msglist.encode(msgdef, {'length': 2, 'list': [1, 2], + 'counter': {'address': 4, + 'address_length': 12, + 'packets': 1235, + 'bytes': 5678}}) + self.assertEqual(29, len(b)) + rv = msglist.decode(msgdef, b) + self.assertEqual(2, rv.length) + self.assertEqual(5678, rv.counter.bytes) + + def test_add_two_new_types(self): + """ Add new types 2 """ + mock_r1 = '''["mock_r1", + ["u32", "a1"], + {"crc" : "0xb2739495"} + ]''' + mock_r2 = '''["mock_r2", + ["u32", "a1"], + {"crc" : "0xb2739495"} + ]''' + + mock_msg = '''["mock_msg", + ["u32", "context"], + ["i32", "retval"], + ["vl_api_mock_r1_t", "r1"], + ["vl_api_mock_r2_t", "r2"], + {"crc" : "0xb2739495"} + ]''' + + # Add new type + msglist = VPP(testmode=True) + p = json.loads(mock_r1) + msglist.add_type(p[0], p[1:]) + p = json.loads(mock_r2) + msglist.add_type(p[0], p[1:]) + p = json.loads(mock_msg) + msgdef = msglist.add_message(p[0], p[1:]) + b = msglist.encode(msgdef, {'context': 2, 'retval': 0, + 'r1': {'a1': 4}, 'r2': {'a1': 12}}) + + self.assertEqual(16, len(b)) + rv = msglist.decode(msgdef, b) + self.assertEqual(4, rv.r1.a1) + + def test_nested_array_type(self): + """ Nested array type """ + bier_type = '''["bier_table_id", + ["u8", "bt_set"], + ["u8", "bt_sub_domain"], + ["u8", "bt_hdr_len_id"], + {"crc" : "0xb2739495"} + ]''' + fib_path3 = '''["fib_path3", + ["u32", "sw_if_index"], + ["u8", "n_labels"], + ["u32", "label_stack", 0, "n_labels"], + {"crc" : "0xb2739495"} + ]''' + + bier_route_details = '''["bier_route_details", + ["u32", "client_index"], + ["vl_api_bier_table_id_t", "br_tbl_id"], + ["u32", "br_n_paths"], + ["vl_api_fib_path3_t", "br_paths", 0, "br_n_paths"], + {"crc" : "0xb2739495"} + ]''' + + # Add new type + msglist = VPP(testmode=True) + + p = json.loads(bier_type) + msglist.add_type(p[0], p[1:]) + p = json.loads(fib_path3) + msglist.add_type(p[0], p[1:]) + + p = json.loads(bier_route_details) + msgdef = msglist.add_message(p[0], p[1:]) + + bt_tbl_id = {'bt_set': 1, 'bt_sub_domain': 2, 'bt_hdr_len_id': 3} + fib_path = {'sw_if_index': 1, 'n_labels': 2, + 'label_stack': [123, 456]} + + b = msglist.encode(msgdef, {'client_index': 2, + 'br_tbl_id': bt_tbl_id, + 'br_n_paths': 2, + 'br_paths': [fib_path, fib_path]}) + self.assertEqual(37, len(b)) + rv = msglist.decode(msgdef, b) + self.assertEqual([123, 456], rv.br_paths[1].label_stack) + self.assertEqual(bt_tbl_id['bt_set'], rv.br_tbl_id.bt_set) + + def test_add_new_compound_type_with_array(self): + """ New compound type with array """ + counter_type = '''["ip4_fib_counter", + ["u32", "address"], + ["u8", "address_length"], + ["u64", "packets"], + ["u64", "bytes"], + {"crc" : "0xb2739495"} + ]''' + + with_type_msg = '''["with_type_msg", + ["u32", "length"], + ["u16", "list", 0, "length"], + ["vl_api_ip4_fib_counter_t", "counter", 2] + + ]''' + + # Add new type + msglist = VPP(testmode=True) + p = json.loads(counter_type) + msglist.add_type(p[0], p[1:]) + p = json.loads(with_type_msg) + msgdef = msglist.add_message(p[0], p[1:]) + b = msglist.encode(msgdef, {'length': 2, 'list': [1, 2], + 'counter': [{'address': 4, + 'address_length': 12, + 'packets': 1235, + 'bytes': 5678}, + {'address': 111, + 'address_length': 222, + 'packets': 333, + 'bytes': 444}]}) + self.assertEqual(50, len(b)) + rv = msglist.decode(msgdef, b) + self.assertEqual([1, 2], rv.list) + self.assertEqual(1235, rv.counter[0].packets) + + with_type_variable_msg = '''["with_type_variable_msg", + ["u32", "length"], + ["vl_api_ip4_fib_counter_t", "counter", 0, "length"] + + ]''' + + p = json.loads(with_type_variable_msg) + msgdef = msglist.add_message(p[0], p[1:]) + b = msglist.encode(msgdef, {'length': 2, + 'counter': [{'address': 4, + 'address_length': 12, + 'packets': 1235, + 'bytes': 5678}, + {'address': 111, + 'address_length': 222, + 'packets': 333, + 'bytes': 444}]}) + self.assertEqual(46, len(b)) + rv = msglist.decode(msgdef, b) + self.assertEqual(2, rv.length) + self.assertEqual(1235, rv.counter[0].packets) + self.assertEqual(333, rv.counter[1].packets) + + def test_simple_array(self): + """ Simple array """ + msglist = VPP(testmode=True) + + simple_byte_array = '''["simple_byte_array", + ["u32", "length"], + ["u8", "namecommand", 64] + + ]''' + p = json.loads(simple_byte_array) + msgdef = msglist.add_message(p[0], p[1:]) + b = msglist.encode(msgdef, {'length': 2, 'namecommand': 'foobar'}) + self.assertEqual(68, len(b)) + rv = msglist.decode(msgdef, b) + self.assertEqual(2, rv.length) + + simple_array = '''["simple_array", + ["u32", "length"], + ["u32", "list", 2] + + ]''' + p = json.loads(simple_array) + msgdef = msglist.add_message(p[0], p[1:]) + b = msglist.encode(msgdef, {'length': 2, 'list': [1, 2]}) + self.assertEqual(12, len(b)) + rv = msglist.decode(msgdef, b) + self.assertEqual(2, rv.length) + self.assertEqual([1, 2], rv.list) + + simple_variable_array = '''["simple_variable_array", + ["u32", "length"], + ["u32", "list", 0, "length"] + + ]''' + p = json.loads(simple_variable_array) + msgdef = msglist.add_message(p[0], p[1:]) + b = msglist.encode(msgdef, {'length': 2, 'list': [1, 2]}) + self.assertEqual(12, len(b)) + rv = msglist.decode(msgdef, b) + self.assertEqual(2, rv.length) + self.assertEqual([1, 2], rv.list) + + simple_variable_byte_array = '''["simple_variable_byte_array", + ["u32", "length"], + ["u8", "list", 0, "length"] + ]''' + p = json.loads(simple_variable_byte_array) + msgdef = msglist.add_message(p[0], p[1:]) + b = msglist.encode(msgdef, {'length': 6, 'list': 'foobar'}) + self.assertEqual(10, len(b)) + rv = msglist.decode(msgdef, b) + self.assertEqual(6, rv.length) + self.assertEqual('foobar', rv.list) + + def test_old_vla_array(self): + """ Old style VLA array """ + msglist = VPP(testmode=True) + + # VLA + vla_byte_array = '''["vla_byte_array", + ["u32", "foobar"], + ["u32", "list", 2], + ["u32", "propercount"], + ["u8", "propermask", 0, "propercount"], + ["u8", "oldmask", 0], + {"crc" : "0xb2739495"} + ]''' + p = json.loads(vla_byte_array) + msgdef = msglist.add_message(p[0], p[1:]) + b = msglist.encode(msgdef, {'list': [123, 456], 'oldmask': b'foobar', + 'propercount': 2, + 'propermask': [8, 9]}) + self.assertEqual(24, len(b)) + rv = msglist.decode(msgdef, b) + self.assertEqual(b'foobar', rv.oldmask) + + def test_old_vla_array_not_last_member(self): + """ Old VLA array arbitrary placement """ + msglist = VPP(testmode=True) + + # VLA + vla_byte_array = '''["vla_byte_array", + ["u8", "oldmask", 0], + ["u32", "foobar"], + {"crc" : "0xb2739495"} + ]''' + p = json.loads(vla_byte_array) + self.assertRaises(ValueError, msglist.add_message, p[0], p[1:]) + + def test_old_vla_array_u32(self): + """ Old VLA u32 """ + msglist = VPP(testmode=True) + + # VLA + vla_byte_array = '''["vla_byte_array", + ["u32", "foobar"], + ["u32", "oldmask", 0], + {"crc" : "0xb2739495"} + ]''' + p = json.loads(vla_byte_array) + msgdef = msglist.add_message(p[0], p[1:]) + b = msglist.encode(msgdef, {'foobar': 123, + 'oldmask': [123, 456, 789]}) + self.assertEqual(16, len(b)) + rv = msglist.decode(msgdef, b) + self.assertEqual([123, 456, 789], rv.oldmask) + + def test_old_vla_array_compound(self): + """ Old VLA compound type """ + msglist = VPP(testmode=True) + + # VLA + counter_type = '''["ip4_fib_counter", + ["u32", "address"], + ["u8", "address_length"], + ["u64", "packets"], + ["u64", "bytes"], + {"crc" : "0xb2739495"} + ]''' + + vla_byte_array = '''["vla_byte_array", + ["vl_api_ip4_fib_counter_t", "counter", 0], + {"crc" : "0xb2739495"} + ]''' + + p = json.loads(counter_type) + msglist.add_type(p[0], p[1:]) + + p = json.loads(vla_byte_array) + with self.assertRaises(NotImplementedError): + msgdef = msglist.add_message(p[0], p[1:]) + + def test_array_count_not_previous(self): + """ VLA with aribtrary length field placement """ + msglist = VPP(testmode=True) + + # VLA + vla_byte_array = '''["vla_byte_array", + ["u32", "count"], + ["u32", "filler"], + ["u32", "lst", 0, "count"], + {"crc" : "0xb2739495"} + ]''' + + p = json.loads(vla_byte_array) + msgdef = msglist.add_message(p[0], p[1:]) + b = msglist.encode(msgdef, {'count': 3, 'lst': [1, 2, 3], + 'filler': 1}) + rv = msglist.decode(msgdef, b) + self.assertEqual(rv.lst, [1, 2, 3]) + + def test_argument_name(self): + """ Argument name """ + msglist = VPP(testmode=True) + + simple_name = '''["simple_name", + ["u32", "length"], + ["u8", "name"] + ]''' + p = json.loads(simple_name) + msgdef = msglist.add_message(p[0], p[1:]) + b = msglist.encode(msgdef, {'length': 6, 'name': 1}) + self.assertEqual(5, len(b)) + rv = msglist.decode(msgdef, b) + self.assertEqual(6, rv.length) + self.assertEqual(1, rv.name) diff --git a/test/vpp_papi_provider.py b/test/vpp_papi_provider.py index 81c8802944f..34010125e3a 100644 --- a/test/vpp_papi_provider.py +++ b/test/vpp_papi_provider.py @@ -70,7 +70,7 @@ class VppPapiProvider(object): for filename in fnmatch.filter(filenames, '*.api.json'): jsonfiles.append(os.path.join(root, filename)) - self.vpp = VPP(jsonfiles, logger=test_class.logger, read_timeout=5) + self.vpp = VPP(jsonfiles, logger=test_class.logger) self._events = deque() def __enter__(self): -- cgit 1.2.3-korg