diff options
author | Ole Troan <ot@cisco.com> | 2019-07-30 15:38:13 +0200 |
---|---|---|
committer | Dave Barach <openvpp@barachs.net> | 2019-08-08 23:01:18 +0000 |
commit | edfe2c0079a756f5fb1108037c39450e3521c8bd (patch) | |
tree | 224db0f0abe2ef2610ac111674c3885867f830fe | |
parent | c54235776c08ec1e10d80d8c91e6e45e2d2f6831 (diff) |
api: vppapitrace JSON/API trace converter
usage: vppapitrace.py [-h] [--debug] [--apidir APIDIR] {convert,replay} ...
optional arguments:
-h, --help show this help message and exit
--debug enable debug mode
--apidir APIDIR Location of JSON API definitions
subcommands:
valid subcommands
{convert,replay} additional help
convert Convert API trace to JSON or Python and back
replay Replay messages to running VPP instance
To convert an API trace file to JSON:
vppapitrace convert /tmp/api.trace trace.json
To convert an (edited) JSON file back to API trace for replay:
vppapitrace convert trace.json api-edited.trace
To generate a Python file that can be replayed:
vppapitrace convert /tmp/api.trace trace.py
vppapitrace convert trace.json trace.py
Replay it to a running VPP instance:
vppapitrace replay --socket /tmp/api.trace
In VPP that file can be replayed with:
vpp# api trace replay api-edited.trace
This patch also modifies the API binary trace format, to include the
message id to message name table.
Change-Id: Ie6441efb53c1c93c9f778f6ae9c1758bccc8dd87
Type: refactor
Signed-off-by: Ole Troan <ot@cisco.com>
-rw-r--r-- | MAINTAINERS | 5 | ||||
l--------- | src/tools/vppapitrace/vppapitrace | 1 | ||||
-rwxr-xr-x | src/tools/vppapitrace/vppapitrace.py | 434 | ||||
-rw-r--r-- | src/vlibapi/api.h | 4 | ||||
-rw-r--r-- | src/vlibapi/api_shared.c | 36 | ||||
-rw-r--r-- | src/vlibmemory/socket_api.c | 10 | ||||
-rw-r--r-- | src/vlibmemory/vlib_api.c | 23 | ||||
-rwxr-xr-x | src/vlibmemory/vlib_api_cli.c | 61 | ||||
-rw-r--r-- | src/vnet/ip/ip_api.c | 2 | ||||
-rw-r--r-- | src/vpp-api/python/vpp_papi/__init__.py | 1 | ||||
-rw-r--r-- | src/vpp-api/python/vpp_papi/vpp_papi.py | 293 |
11 files changed, 668 insertions, 202 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 01882c5a3d7..cef160bb482 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -435,6 +435,11 @@ I: vppapigen M: Ole Troan <otroan@employees.org> F: src/tools/vppapigen/ +API trace tool +I: vppapitrace +M: Ole Troan <otroan@employees.org> +F: src/tools/vppapitrace/ + Binary API Compiler for C and C++ I: vapi M: Ole Troan <ot@cisco.com> diff --git a/src/tools/vppapitrace/vppapitrace b/src/tools/vppapitrace/vppapitrace new file mode 120000 index 00000000000..d0ece85a809 --- /dev/null +++ b/src/tools/vppapitrace/vppapitrace @@ -0,0 +1 @@ +vppapitrace.py
\ No newline at end of file diff --git a/src/tools/vppapitrace/vppapitrace.py b/src/tools/vppapitrace/vppapitrace.py new file mode 100755 index 00000000000..df07580714d --- /dev/null +++ b/src/tools/vppapitrace/vppapitrace.py @@ -0,0 +1,434 @@ +#!/usr/bin/env python3 + +# +# Copyright (c) 2019 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# Convert from VPP API trace to JSON. + +import argparse +import struct +import sys +import logging +import json +from ipaddress import * +from collections import namedtuple +from vpp_papi import MACAddress, VPPApiJSONFiles +import base64 +import os + + +def serialize_likely_small_unsigned_integer(x): + r = x + + # Low bit set means it fits into 1 byte. + if r < (1 << 7): + return struct.pack("B", 1 + 2 * r) + + # Low 2 bits 1 0 means it fits into 2 bytes. + r -= (1 << 7) + if r < (1 << 14): + return struct.pack("<H", 4 * r + 2) + + r -= (1 << 14) + if r < (1 << 29): + return struct.pack("<I", 8 * r + 4) + + return struct.pack("<BQ", 0, x) + + +def unserialize_likely_small_unsigned_integer(data, offset): + y = struct.unpack_from("B", data, offset)[0] + if y & 1: + return y // 2, 1 + r = 1 << 7 + if y & 2: + p = struct.unpack_from("B", data, offset + 1)[0] + r += (y // 4) + (p << 6) + return r, 2 + r += 1 << 14 + if y & 4: + (p1, p2, p3) = struct.unpack_from("BBB", data, offset+1) + r += ((y // 8) + (p1 << (5 + 8 * 0)) + + (p2 << (5 + 8 * 1)) + (p3 << (5 + 8 * 2))) + return r, 3 + return struct.unpack_from(">Q", data, offset+1)[0], 8 + + +def serialize_cstring(s): + bstring = s.encode('utf8') + l = len(bstring) + b = serialize_likely_small_unsigned_integer(l)) + b += struct.pack('{}s'.format(l), bstring) + return b + + +def unserialize_cstring(data, offset): + l, size = unserialize_likely_small_unsigned_integer(data, offset) + name = struct.unpack_from('{}s'.format(l), data, offset+size)[0] + return name.decode('utf8'), size + len(name) + + +def unserialize_msgtbl(data, offset): + msgtable_by_id = {} + msgtable_by_name = {} + i = 0 + nmsg = struct.unpack_from(">I", data, offset)[0] + o = 4 + while i < nmsg: + (msgid, size) = unserialize_likely_small_unsigned_integer( + data, offset + o) + o += size + (name, size) = unserialize_cstring(data, offset + o) + o += size + msgtable_by_id[msgid] = name + msgtable_by_name[name] = msgid + + i += 1 + return msgtable_by_id, msgtable_by_name, o + + +def serialize_msgtbl(messages): + offset = 0 + data = bytearray(100000) + nmsg = len(messages) + data = struct.pack(">I", nmsg) + + for k, v in messages.items(): + name = k + '_' + v.crc[2:] + data += serialize_likely_small_unsigned_integer(v._vl_msg_id) + data += serialize_cstring(name) + return data + + +def apitrace2json(messages, filename): + result = [] + with open(filename, 'rb') as file: + bytes_read = file.read() + # Read header + (nitems, msgtbl_size, wrapped) = struct.unpack_from(">IIB", + bytes_read, 0) + logging.debug('nitems: {} message table size: {} wrapped: {}' + .format(nitems, msgtbl_size, wrapped)) + if wrapped: + sys.stdout.write('Wrapped/incomplete trace, results may vary') + offset = 9 + + msgtbl_by_id, msgtbl_by_name, size = unserialize_msgtbl(bytes_read, + offset) + offset += size + + i = 0 + while i < nitems: + size = struct.unpack_from(">I", bytes_read, offset)[0] + offset += 4 + if size == 0: + break + msgid = struct.unpack_from(">H", bytes_read, offset)[0] + name = msgtbl_by_id[msgid] + n = name[:name.rfind("_")] + msgobj = messages[n] + if n + '_' + msgobj.crc[2:] != name: + sys.exit("CRC Mismatch between JSON API definition " + "and trace. {}".format(name)) + + x, s = msgobj.unpack(bytes_read[offset:offset+size]) + msgname = type(x).__name__ + offset += size + # Replace named tuple illegal _0 + y = x._asdict() + y.pop('_0') + result.append({'name': msgname, 'args': y}) + i += 1 + + file.close() + return result + + +def json2apitrace(messages, filename): + """Input JSON file and API message definition. Output API trace + bytestring.""" + + msgs = [] + with open(filename, 'r') as file: + msgs = json.load(file, object_hook=vpp_decode) + result = b'' + for m in msgs: + name = m['name'] + msgobj = messages[name] + m['args']['_vl_msg_id'] = messages[name]._vl_msg_id + b = msgobj.pack(m['args']) + + result += struct.pack('>I', len(b)) + result += b + return len(msgs), result + + +class VPPEncoder(json.JSONEncoder): + def default(self, o): + if type(o) is bytes: + return "base64:" + base64.b64encode(o).decode('utf-8') + # Let the base class default method raise the TypeError + return json.JSONEncoder.default(self, o) + + def encode(self, obj): + def hint_tuples(item): + if isinstance(item, tuple): + return hint_tuples(item._asdict()) + if isinstance(item, list): + return [hint_tuples(e) for e in item] + if isinstance(item, dict): + return {key: hint_tuples(value) for key, value in item.items()} + else: + return item + + return super(VPPEncoder, self).encode(hint_tuples(obj)) + + +def vpp_decode(obj): + for k, v in obj.items(): + if type(v) is str and v.startswith('base64:'): + s = v.lstrip('base64:') + obj[k] = base64.b64decode(v[7:]) + return obj + + +def vpp_encoder(obj): + if isinstance(obj, IPv6Network): + return str(obj) + if isinstance(obj, IPv4Network): + return str(obj) + if isinstance(obj, IPv6Address): + return str(obj) + if isinstance(obj, IPv4Address): + return str(obj) + if isinstance(obj, MACAddress): + return str(obj) + if type(obj) is bytes: + return "base64:" + base64.b64encode(obj).decode('ascii') + raise TypeError('Unknown object {} {}\n'.format(type(obj), obj)) + +message_filter = { + 'control_ping', + 'memclnt_create', + 'memclnt_delete', + 'get_first_msg_id', +} + +argument_filter = { + 'client_index', + 'context', +} + +def topython(messages, services): + import pprint + pp = pprint.PrettyPrinter() + + s = '''\ +#!/usr/bin/env python3 +from vpp_papi import VPP, VppEnum +vpp = VPP(use_socket=True) +vpp.connect(name='vppapitrace') +''' + + for m in messages: + if m['name'] not in services: + s += '# ignoring reply message: {}\n'.format(m['name']) + continue + if m['name'] in message_filter: + s += '# ignoring message {}\n'.format(m['name']) + continue + for k in argument_filter: + try: + m['args'].pop(k) + except KeyError: + pass + a = pp.pformat(m['args']) + s += 'rv = vpp.api.{}(**{})\n'.format(m['name'], a) + s += 'print("RV:", rv)\n' + s += 'vpp.disconnect()\n' + + return s + + +def init_api(apidir): + # Read API definitions + apifiles = VPPApiJSONFiles.find_api_files(api_dir=apidir) + messages = {} + services = {} + for file in apifiles: + with open(file) as apidef_file: + m, s = VPPApiJSONFiles.process_json_file(apidef_file) + messages.update(m) + services.update(s) + return messages, services + + +def replaymsgs(vpp, msgs): + for m in msgs: + name = m['name'] + if name not in vpp.services: + continue + if name == 'control_ping': + continue + try: + m['args'].pop('client_index') + except KeyError: + pass + if m['args']['context'] == 0: + m['args']['context'] = 1 + f = vpp.get_function(name) + rv = f(**m['args']) + print('RV {}'.format(rv)) + + +def replay(args): + """Replay into running VPP instance""" + + from vpp_papi import VPP + + JSON = 1 + APITRACE = 2 + + filename, file_extension = os.path.splitext(args.input) + input_type = JSON if file_extension == '.json' else APITRACE + + vpp = VPP(use_socket=args.socket) + rv = vpp.connect(name='vppapireplay', chroot_prefix=args.shmprefix) + if rv != 0: + sys.exit('Cannot connect to VPP') + + if input_type == JSON: + with open(args.input, 'r') as file: + msgs = json.load(file, object_hook=vpp_decode) + else: + msgs = apitrace2json(messages, args.input) + + replaymsgs(vpp, msgs) + + vpp.disconnect() + + +def generate(args): + """Generate JSON""" + + JSON = 1 + APITRACE = 2 + PYTHON = 3 + + filename, file_extension = os.path.splitext(args.input) + input_type = JSON if file_extension == '.json' else APITRACE + + filename, file_extension = os.path.splitext(args.output) + if file_extension == '.json' or filename == '-': + output_type = JSON + elif file_extension == '.py': + output_type = PYTHON + else: + output_type = APITRACE + + if input_type == output_type: + sys.exit("error: Nothing to convert between") + + if input_type == JSON and output_type == APITRACE: + sys.exit("error: Input file must be JSON file: {}".format(args.input)) + + messages, services = init_api(args.apidir) + + if input_type == JSON and output_type == APITRACE: + i = 0 + for k, v in messages.items(): + v._vl_msg_id = i + i += 1 + + n, result = json2apitrace(messages, args.input) + print('API messages: {}'.format(n)) + header = struct.pack(">IIB", n, len(messages), 0) + + i = 0 + msgtbl = serialize_msgtbl(messages) + with open(args.output, 'wb') as outfile: + outfile.write(header) + outfile.write(msgtbl) + outfile.write(result) + + return + + if input_type == APITRACE: + result = apitrace2json(messages, args.input) + if output_type == PYTHON: + s = json.dumps(result, cls=VPPEncoder, default=vpp_encoder) + x = json.loads(s, object_hook=vpp_decode) + s = topython(x, services) + else: + s = json.dumps(result, cls=VPPEncoder, + default=vpp_encoder, indent=4 * ' ') + elif output_type == PYTHON: + with open(args.input, 'r') as file: + x = json.load(file, object_hook=vpp_decode) + s = topython(x, services) + else: + sys.exit('Input file must be API trace file: {}'.format(args.input)) + + if args.output == '-': + sys.stdout.write(s + '\n') + else: + print('Generating {} from API trace: {}' + .format(args.output, args.input)) + with open(args.output, 'w') as outfile: + outfile.write(s) + +def general(args): + return + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--debug', action='store_true', + help='enable debug mode') + parser.add_argument('--apidir', + help='Location of JSON API definitions') + + parser.set_defaults(func=general) + subparsers = parser.add_subparsers(title='subcommands', + description='valid subcommands', + help='additional help') + + parser_convert = subparsers.add_parser('convert', + help='Convert API trace to JSON or Python and back') + parser_convert.add_argument('input', + help='Input file (API trace | JSON)') + parser_convert.add_argument('output', + help='Output file (Python | JSON | API trace)') + parser_convert.set_defaults(func=generate) + + + parser_replay = subparsers.add_parser('replay', + help='Replay messages to running VPP instance') + parser_replay.add_argument('input', help='Input file (API trace | JSON)') + parser_replay.add_argument('--socket', action='store_true', + help='use default socket to connect to VPP') + parser_replay.add_argument('--shmprefix', + help='connect to VPP on shared memory prefix') + parser_replay.set_defaults(func=replay) + + args = parser.parse_args() + + if args.debug: + logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) + + args.func(args) + + +main() diff --git a/src/vlibapi/api.h b/src/vlibapi/api.h index 7238a31f2f3..3eef0503310 100644 --- a/src/vlibapi/api.h +++ b/src/vlibapi/api.h @@ -30,9 +30,9 @@ /* *INDENT-OFF* */ typedef CLIB_PACKED ( struct { - u8 endian; - u8 wrapped; u32 nitems; + u32 msgtbl_size; + u8 wrapped; }) vl_api_trace_file_header_t; /* *INDENT-ON* */ diff --git a/src/vlibapi/api_shared.c b/src/vlibapi/api_shared.c index 355be35c677..ce7c4aec712 100644 --- a/src/vlibapi/api_shared.c +++ b/src/vlibapi/api_shared.c @@ -189,6 +189,29 @@ vl_msg_api_trace_free (api_main_t * am, vl_api_trace_which_t which) return 0; } +u8 * +vl_api_serialize_message_table (api_main_t * am, u8 * vector) +{ + serialize_main_t _sm, *sm = &_sm; + hash_pair_t *hp; + u32 nmsg = hash_elts (am->msg_index_by_name_and_crc); + + serialize_open_vector (sm, vector); + + /* serialize the count */ + serialize_integer (sm, nmsg, sizeof (u32)); + + /* *INDENT-OFF* */ + hash_foreach_pair (hp, am->msg_index_by_name_and_crc, + ({ + serialize_likely_small_unsigned_integer (sm, hp->value[0]); + serialize_cstring (sm, (char *) hp->key); + })); + /* *INDENT-ON* */ + + return serialize_close_vector (sm); +} + int vl_msg_api_trace_save (api_main_t * am, vl_api_trace_which_t which, FILE * fp) { @@ -223,15 +246,24 @@ vl_msg_api_trace_save (api_main_t * am, vl_api_trace_which_t which, FILE * fp) } /* Write the file header */ - fh.nitems = vec_len (tp->traces); - fh.endian = tp->endian; fh.wrapped = tp->wrapped; + fh.nitems = clib_host_to_net_u32 (vec_len (tp->traces)); + u8 *m = vl_api_serialize_message_table (am, 0); + clib_warning ("Message table length %d", vec_len (m)); + fh.msgtbl_size = clib_host_to_net_u32 (vec_len (m)); if (fwrite (&fh, sizeof (fh), 1, fp) != 1) { return (-10); } + /* Write the message table */ + if (fwrite (m, vec_len (m), 1, fp) != 1) + { + return (-14); + } + vec_free (m); + /* No-wrap case */ if (tp->wrapped == 0) { diff --git a/src/vlibmemory/socket_api.c b/src/vlibmemory/socket_api.c index 5aad8a9598f..868298ccc85 100644 --- a/src/vlibmemory/socket_api.c +++ b/src/vlibmemory/socket_api.c @@ -692,9 +692,9 @@ reply: } #define foreach_vlib_api_msg \ -_(SOCKCLNT_CREATE, sockclnt_create) \ -_(SOCKCLNT_DELETE, sockclnt_delete) \ -_(SOCK_INIT_SHM, sock_init_shm) + _(SOCKCLNT_CREATE, sockclnt_create, 1) \ + _(SOCKCLNT_DELETE, sockclnt_delete, 1) \ + _(SOCK_INIT_SHM, sock_init_shm, 1) clib_error_t * vl_sock_api_init (vlib_main_t * vm) @@ -710,13 +710,13 @@ vl_sock_api_init (vlib_main_t * vm) if (sm->socket_name == 0) return 0; -#define _(N,n) \ +#define _(N,n,t) \ vl_msg_api_set_handlers(VL_API_##N, #n, \ vl_api_##n##_t_handler, \ vl_noop_handler, \ vl_api_##n##_t_endian, \ vl_api_##n##_t_print, \ - sizeof(vl_api_##n##_t), 1); + sizeof(vl_api_##n##_t), t); foreach_vlib_api_msg; #undef _ diff --git a/src/vlibmemory/vlib_api.c b/src/vlibmemory/vlib_api.c index 73dd5bda33c..e1a6bd18d55 100644 --- a/src/vlibmemory/vlib_api.c +++ b/src/vlibmemory/vlib_api.c @@ -68,29 +68,6 @@ vl_api_trace_plugin_msg_ids_t_print (vl_api_trace_plugin_msg_ids_t * a, #include <vlibmemory/vl_memory_api_h.h> #undef vl_endianfun -u8 * -vl_api_serialize_message_table (api_main_t * am, u8 * vector) -{ - serialize_main_t _sm, *sm = &_sm; - hash_pair_t *hp; - u32 nmsg = hash_elts (am->msg_index_by_name_and_crc); - - serialize_open_vector (sm, vector); - - /* serialize the count */ - serialize_integer (sm, nmsg, sizeof (u32)); - - /* *INDENT-OFF* */ - hash_foreach_pair (hp, am->msg_index_by_name_and_crc, - ({ - serialize_likely_small_unsigned_integer (sm, hp->value[0]); - serialize_cstring (sm, (char *) hp->key); - })); - /* *INDENT-ON* */ - - return serialize_close_vector (sm); -} - static void vl_api_get_first_msg_id_t_handler (vl_api_get_first_msg_id_t * mp) { diff --git a/src/vlibmemory/vlib_api_cli.c b/src/vlibmemory/vlib_api_cli.c index b5fe151a2c0..0d5ce96fbdf 100755 --- a/src/vlibmemory/vlib_api_cli.c +++ b/src/vlibmemory/vlib_api_cli.c @@ -402,10 +402,9 @@ vl_msg_api_process_file (vlib_main_t * vm, u8 * filename, struct stat statb; size_t file_size; u8 *msg; - u8 endian_swap_needed = 0; api_main_t *am = &api_main; u8 *tmpbuf = 0; - u32 nitems; + u32 nitems, nitems_msgtbl; void **saved_print_handlers = 0; fd = open ((char *) filename, O_RDONLY); @@ -443,14 +442,7 @@ vl_msg_api_process_file (vlib_main_t * vm, u8 * filename, } close (fd); - if ((clib_arch_is_little_endian && hp->endian == VL_API_BIG_ENDIAN) - || (clib_arch_is_big_endian && hp->endian == VL_API_LITTLE_ENDIAN)) - endian_swap_needed = 1; - - if (endian_swap_needed) - nitems = ntohl (hp->nitems); - else - nitems = hp->nitems; + nitems = ntohl (hp->nitems); if (last_index == (u32) ~ 0) { @@ -473,9 +465,26 @@ vl_msg_api_process_file (vlib_main_t * vm, u8 * filename, saved_print_handlers = (void **) vec_dup (am->msg_print_handlers); vl_msg_api_custom_dump_configure (am); } + msg = (u8 *) (hp + 1); + u16 *msgid_vec = 0; + serialize_main_t _sm, *sm = &_sm; + u32 msgtbl_size = ntohl (hp->msgtbl_size); + u8 *name_and_crc; - msg = (u8 *) (hp + 1); + unserialize_open_data (sm, msg, msgtbl_size); + unserialize_integer (sm, &nitems_msgtbl, sizeof (u32)); + + for (i = 0; i < nitems_msgtbl; i++) + { + u16 msg_index = unserialize_likely_small_unsigned_integer (sm); + unserialize_cstring (sm, (char **) &name_and_crc); + u16 msg_index2 = vl_msg_api_get_msg_index (name_and_crc); + vec_validate (msgid_vec, msg_index); + msgid_vec[msg_index] = msg_index2; + } + + msg += msgtbl_size; for (i = 0; i < first_index; i++) { @@ -486,11 +495,9 @@ vl_msg_api_process_file (vlib_main_t * vm, u8 * filename, size = clib_host_to_net_u32 (*(u32 *) msg); msg += sizeof (u32); - if (clib_arch_is_little_endian) - msg_id = ntohs (*((u16 *) msg)); - else - msg_id = *((u16 *) msg); - + msg_id = ntohs (*((u16 *) msg)); + if (msg_id < vec_len (msgid_vec)) + msg_id = msgid_vec[msg_id]; cfgp = am->api_trace_cfg + msg_id; if (!cfgp) { @@ -507,7 +514,6 @@ vl_msg_api_process_file (vlib_main_t * vm, u8 * filename, for (; i <= last_index; i++) { trace_cfg_t *cfgp; - u16 *msg_idp; u16 msg_id; int size; @@ -517,10 +523,11 @@ vl_msg_api_process_file (vlib_main_t * vm, u8 * filename, size = clib_host_to_net_u32 (*(u32 *) msg); msg += sizeof (u32); - if (clib_arch_is_little_endian) - msg_id = ntohs (*((u16 *) msg)); - else - msg_id = *((u16 *) msg); + msg_id = ntohs (*((u16 *) msg)); + if (msg_id < vec_len (msgid_vec)) + { + msg_id = msgid_vec[msg_id]; + } cfgp = am->api_trace_cfg + msg_id; if (!cfgp) @@ -538,12 +545,10 @@ vl_msg_api_process_file (vlib_main_t * vm, u8 * filename, clib_memset (tmpbuf, 0xf, sizeof (uword)); /* - * Endian swap if needed. All msg data is supposed to be - * in network byte order. All msg handlers are supposed to - * know that. The generic message dumpers don't know that. - * One could fix apigen, I suppose. + * Endian swap if needed. All msg data is supposed to be in + * network byte order. */ - if ((which == DUMP && clib_arch_is_little_endian) || endian_swap_needed) + if ((which == DUMP && clib_arch_is_little_endian)) { void (*endian_fp) (void *); if (msg_id >= vec_len (am->msg_endian_handlers) @@ -562,7 +567,7 @@ vl_msg_api_process_file (vlib_main_t * vm, u8 * filename, /* msg_id always in network byte order */ if (clib_arch_is_little_endian) { - msg_idp = (u16 *) (tmpbuf + sizeof (uword)); + u16 *msg_idp = (u16 *) (tmpbuf + sizeof (uword)); *msg_idp = msg_id; } @@ -1051,7 +1056,7 @@ dump_api_table_file_command_fn (vlib_main_t * vm, item->crc = extract_crc (name_and_crc); item->which = 0; /* file */ } - serialize_close (sm); + unserialize_close (sm); /* Compare with the current image? */ if (compare_current) diff --git a/src/vnet/ip/ip_api.c b/src/vnet/ip/ip_api.c index bcb53888621..fd4c1521a22 100644 --- a/src/vnet/ip/ip_api.c +++ b/src/vnet/ip/ip_api.c @@ -2545,7 +2545,7 @@ vl_api_ip_probe_neighbor_t_handler (vl_api_ip_probe_neighbor_t * mp) BAD_SW_IF_INDEX_LABEL; - REPLY_MACRO (VL_API_PROXY_ARP_INTFC_ENABLE_DISABLE_REPLY); + REPLY_MACRO (VL_API_IP_PROBE_NEIGHBOR_REPLY); } static void diff --git a/src/vpp-api/python/vpp_papi/__init__.py b/src/vpp-api/python/vpp_papi/__init__.py index 957468a5baf..e1b77811aef 100644 --- a/src/vpp-api/python/vpp_papi/__init__.py +++ b/src/vpp-api/python/vpp_papi/__init__.py @@ -2,6 +2,7 @@ from .vpp_papi import FuncWrapper, VPP, VppApiDynamicMethodHolder # noqa: F401 from .vpp_papi import VppEnum, VppEnumType # noqa: F401 from .vpp_papi import VPPIOError, VPPRuntimeError, VPPValueError # noqa: F401 from .vpp_papi import VPPApiClient # noqa: F401 +from .vpp_papi import VPPApiJSONFiles # noqa: F401 from . macaddress import MACAddress, mac_pton, mac_ntop # noqa: F401 # sorted lexicographically diff --git a/src/vpp-api/python/vpp_papi/vpp_papi.py b/src/vpp-api/python/vpp_papi/vpp_papi.py index 818a55f52f3..b3f2a156939 100644 --- a/src/vpp-api/python/vpp_papi/vpp_papi.py +++ b/src/vpp-api/python/vpp_papi/vpp_papi.py @@ -112,29 +112,133 @@ class VPPRuntimeError(RuntimeError): class VPPValueError(ValueError): pass +class VPPApiJSONFiles(object): + @classmethod + def find_api_dir(cls, dirs): + """Attempt to find the best directory in which API definition + files may reside. If the value VPP_API_DIR exists in the environment + then it is first on the search list. If we're inside a recognized + location in a VPP source tree (src/scripts and src/vpp-api/python) + then entries from there to the likely locations in build-root are + added. Finally the location used by system packages is added. -class VPPApiClient(object): - """VPP interface. + :returns: A single directory name, or None if no such directory + could be found. + """ - This class provides the APIs to VPP. The APIs are loaded - from provided .api.json files and makes functions accordingly. - These functions are documented in the VPP .api files, as they - are dynamically created. + # perhaps we're in the 'src/scripts' or 'src/vpp-api/python' dir; + # in which case, plot a course to likely places in the src tree + import __main__ as main + if hasattr(main, '__file__'): + # get the path of the calling script + localdir = os.path.dirname(os.path.realpath(main.__file__)) + else: + # use cwd if there is no calling script + localdir = os.getcwd() + localdir_s = localdir.split(os.path.sep) - Additionally, VPP can send callback messages; this class - provides a means to register a callback function to receive - these messages in a background thread. - """ - apidir = None - VPPApiError = VPPApiError - VPPRuntimeError = VPPRuntimeError - VPPValueError = VPPValueError - VPPNotImplementedError = VPPNotImplementedError - VPPIOError = VPPIOError + def dmatch(dir): + """Match dir against right-hand components of the script dir""" + d = dir.split('/') # param 'dir' assumes a / separator + length = len(d) + return len(localdir_s) > length and localdir_s[-length:] == d + + def sdir(srcdir, variant): + """Build a path from srcdir to the staged API files of + 'variant' (typically '' or '_debug')""" + # Since 'core' and 'plugin' files are staged + # in separate directories, we target the parent dir. + return os.path.sep.join(( + srcdir, + 'build-root', + 'install-vpp%s-native' % variant, + 'vpp', + 'share', + 'vpp', + 'api', + )) + + srcdir = None + if dmatch('src/scripts'): + srcdir = os.path.sep.join(localdir_s[:-2]) + elif dmatch('src/vpp-api/python'): + srcdir = os.path.sep.join(localdir_s[:-3]) + elif dmatch('test'): + # we're apparently running tests + srcdir = os.path.sep.join(localdir_s[:-1]) + + if srcdir: + # we're in the source tree, try both the debug and release + # variants. + dirs.append(sdir(srcdir, '_debug')) + dirs.append(sdir(srcdir, '')) + + # Test for staged copies of the scripts + # For these, since we explicitly know if we're running a debug versus + # release variant, target only the relevant directory + if dmatch('build-root/install-vpp_debug-native/vpp/bin'): + srcdir = os.path.sep.join(localdir_s[:-4]) + dirs.append(sdir(srcdir, '_debug')) + if dmatch('build-root/install-vpp-native/vpp/bin'): + srcdir = os.path.sep.join(localdir_s[:-4]) + dirs.append(sdir(srcdir, '')) + + # finally, try the location system packages typically install into + dirs.append(os.path.sep.join(('', 'usr', 'share', 'vpp', 'api'))) + + # check the directories for existence; first one wins + for dir in dirs: + if os.path.isdir(dir): + return dir + + return None + + @classmethod + def find_api_files(cls, api_dir=None, patterns='*'): + """Find API definition files from the given directory tree with the + given pattern. If no directory is given then find_api_dir() is used + to locate one. If no pattern is given then all definition files found + in the directory tree are used. + + :param api_dir: A directory tree in which to locate API definition + files; subdirectories are descended into. + If this is None then find_api_dir() is called to discover it. + :param patterns: A list of patterns to use in each visited directory + when looking for files. + This can be a list/tuple object or a comma-separated string of + patterns. Each value in the list will have leading/trialing + whitespace stripped. + The pattern specifies the first part of the filename, '.api.json' + is appended. + The results are de-duplicated, thus overlapping patterns are fine. + If this is None it defaults to '*' meaning "all API files". + :returns: A list of file paths for the API files found. + """ + if api_dir is None: + api_dir = cls.find_api_dir([]) + if api_dir is None: + raise VPPApiError("api_dir cannot be located") + + if isinstance(patterns, list) or isinstance(patterns, tuple): + patterns = [p.strip() + '.api.json' for p in patterns] + else: + patterns = [p.strip() + '.api.json' for p in patterns.split(",")] + + api_files = [] + for root, dirnames, files in os.walk(api_dir): + # iterate all given patterns and de-dup the result + files = set(sum([fnmatch.filter(files, p) for p in patterns], [])) + for filename in files: + api_files.append(os.path.join(root, filename)) + return api_files + + @classmethod def process_json_file(self, apidef_file): api = json.load(apidef_file) types = {} + services = {} + messages = {} for t in api['enums']: t[0] = 'vl_api_' + t[0] + '_t' types[t[0]] = {'type': 'enum', 'data': t} @@ -146,7 +250,7 @@ class VPPApiClient(object): types[t[0]] = {'type': 'type', 'data': t} for t, v in api['aliases'].items(): types['vl_api_' + t + '_t'] = {'type': 'alias', 'data': v} - self.services.update(api['services']) + services.update(api['services']) i = 0 while True: @@ -184,9 +288,31 @@ class VPPApiClient(object): for m in api['messages']: try: - self.messages[m[0]] = VPPMessage(m[0], m[1:]) + messages[m[0]] = VPPMessage(m[0], m[1:]) except VPPNotImplementedError: + ### OLE FIXME self.logger.error('Not implemented error for {}'.format(m[0])) + return messages, services + +class VPPApiClient(object): + """VPP interface. + + This class provides the APIs to VPP. The APIs are loaded + from provided .api.json files and makes functions accordingly. + These functions are documented in the VPP .api files, as they + are dynamically created. + + Additionally, VPP can send callback messages; this class + provides a means to register a callback function to receive + these messages in a background thread. + """ + apidir = None + VPPApiError = VPPApiError + VPPRuntimeError = VPPRuntimeError + VPPValueError = VPPValueError + VPPNotImplementedError = VPPNotImplementedError + VPPIOError = VPPIOError + def __init__(self, apifiles=None, testmode=False, async_thread=True, logger=None, loglevel=None, @@ -236,7 +362,7 @@ class VPPApiClient(object): if not apifiles: # Pick up API definitions from default directory try: - apifiles = self.find_api_files() + apifiles = VPPApiJSONFiles.find_api_files(self.apidir) except RuntimeError: # In test mode we don't care that we can't find the API files if testmode: @@ -246,7 +372,9 @@ class VPPApiClient(object): for file in apifiles: with open(file) as apidef_file: - self.process_json_file(apidef_file) + m, s = VPPApiJSONFiles.process_json_file(apidef_file) + self.messages.update(m) + self.services.update(s) self.apifiles = apifiles @@ -259,6 +387,10 @@ class VPPApiClient(object): # Make sure we allow VPP to clean up the message rings. atexit.register(vpp_atexit, weakref.ref(self)) + def get_function(self, name): + return getattr(self._api, name) + + class ContextId(object): """Multiprocessing-safe provider of unique context IDs.""" def __init__(self): @@ -275,127 +407,6 @@ class VPPApiClient(object): def get_type(self, name): return vpp_get_type(name) - @classmethod - def find_api_dir(cls): - """Attempt to find the best directory in which API definition - files may reside. If the value VPP_API_DIR exists in the environment - then it is first on the search list. If we're inside a recognized - location in a VPP source tree (src/scripts and src/vpp-api/python) - then entries from there to the likely locations in build-root are - added. Finally the location used by system packages is added. - - :returns: A single directory name, or None if no such directory - could be found. - """ - dirs = [cls.apidir] if cls.apidir else [] - - # perhaps we're in the 'src/scripts' or 'src/vpp-api/python' dir; - # in which case, plot a course to likely places in the src tree - import __main__ as main - if hasattr(main, '__file__'): - # get the path of the calling script - localdir = os.path.dirname(os.path.realpath(main.__file__)) - else: - # use cwd if there is no calling script - localdir = os.getcwd() - localdir_s = localdir.split(os.path.sep) - - def dmatch(dir): - """Match dir against right-hand components of the script dir""" - d = dir.split('/') # param 'dir' assumes a / separator - length = len(d) - return len(localdir_s) > length and localdir_s[-length:] == d - - def sdir(srcdir, variant): - """Build a path from srcdir to the staged API files of - 'variant' (typically '' or '_debug')""" - # Since 'core' and 'plugin' files are staged - # in separate directories, we target the parent dir. - return os.path.sep.join(( - srcdir, - 'build-root', - 'install-vpp%s-native' % variant, - 'vpp', - 'share', - 'vpp', - 'api', - )) - - srcdir = None - if dmatch('src/scripts'): - srcdir = os.path.sep.join(localdir_s[:-2]) - elif dmatch('src/vpp-api/python'): - srcdir = os.path.sep.join(localdir_s[:-3]) - elif dmatch('test'): - # we're apparently running tests - srcdir = os.path.sep.join(localdir_s[:-1]) - - if srcdir: - # we're in the source tree, try both the debug and release - # variants. - dirs.append(sdir(srcdir, '_debug')) - dirs.append(sdir(srcdir, '')) - - # Test for staged copies of the scripts - # For these, since we explicitly know if we're running a debug versus - # release variant, target only the relevant directory - if dmatch('build-root/install-vpp_debug-native/vpp/bin'): - srcdir = os.path.sep.join(localdir_s[:-4]) - dirs.append(sdir(srcdir, '_debug')) - if dmatch('build-root/install-vpp-native/vpp/bin'): - srcdir = os.path.sep.join(localdir_s[:-4]) - dirs.append(sdir(srcdir, '')) - - # finally, try the location system packages typically install into - dirs.append(os.path.sep.join(('', 'usr', 'share', 'vpp', 'api'))) - - # check the directories for existence; first one wins - for dir in dirs: - if os.path.isdir(dir): - return dir - - return None - - @classmethod - def find_api_files(cls, api_dir=None, patterns='*'): - """Find API definition files from the given directory tree with the - given pattern. If no directory is given then find_api_dir() is used - to locate one. If no pattern is given then all definition files found - in the directory tree are used. - - :param api_dir: A directory tree in which to locate API definition - files; subdirectories are descended into. - If this is None then find_api_dir() is called to discover it. - :param patterns: A list of patterns to use in each visited directory - when looking for files. - This can be a list/tuple object or a comma-separated string of - patterns. Each value in the list will have leading/trialing - whitespace stripped. - The pattern specifies the first part of the filename, '.api.json' - is appended. - The results are de-duplicated, thus overlapping patterns are fine. - If this is None it defaults to '*' meaning "all API files". - :returns: A list of file paths for the API files found. - """ - if api_dir is None: - api_dir = cls.find_api_dir() - if api_dir is None: - raise VPPApiError("api_dir cannot be located") - - if isinstance(patterns, list) or isinstance(patterns, tuple): - patterns = [p.strip() + '.api.json' for p in patterns] - else: - patterns = [p.strip() + '.api.json' for p in patterns.split(",")] - - api_files = [] - for root, dirnames, files in os.walk(api_dir): - # iterate all given patterns and de-dup the result - files = set(sum([fnmatch.filter(files, p) for p in patterns], [])) - for filename in files: - api_files.append(os.path.join(root, filename)) - - return api_files - @property def api(self): if not hasattr(self, "_api"): |