diff options
author | Florin Coras <fcoras@cisco.com> | 2021-09-14 18:54:45 -0700 |
---|---|---|
committer | Ole Tr�an <otroan@employees.org> | 2021-09-27 07:04:51 +0000 |
commit | 248210c6ca1716ee2c5e6c974051218930fa4c26 (patch) | |
tree | 5c4e8e1c53800e8ec43da7a8dbc4fbe73e5c9672 /src/vlibmemory | |
parent | 1b6c7932a8feb419aae73a00a6784d7c110decdc (diff) |
misc: move part of vpe apis to vlibmemory
VPE apis are actually vlib apis. This moves those that are not tightly
coupled with vapi to vlib_api
Type: refactor
Signed-off-by: Florin Coras <fcoras@cisco.com>
Change-Id: I456a64ce49a0cdeff4a0931c6ea513cb639f683e
Signed-off-by: Ole Troan <ot@cisco.com>
Diffstat (limited to 'src/vlibmemory')
-rw-r--r-- | src/vlibmemory/CMakeLists.txt | 8 | ||||
-rw-r--r-- | src/vlibmemory/memclnt_api.c | 719 | ||||
-rw-r--r-- | src/vlibmemory/vlib.api | 250 | ||||
-rw-r--r-- | src/vlibmemory/vlib_api.c | 828 | ||||
-rw-r--r-- | src/vlibmemory/vlibapi_test.c | 470 |
5 files changed, 1674 insertions, 601 deletions
diff --git a/src/vlibmemory/CMakeLists.txt b/src/vlibmemory/CMakeLists.txt index b48ff7b5766..456cba9baeb 100644 --- a/src/vlibmemory/CMakeLists.txt +++ b/src/vlibmemory/CMakeLists.txt @@ -18,8 +18,9 @@ add_vpp_library (vlibmemory memory_client.c socket_client.c socket_api.c - vlib_api.c + memclnt_api.c vlib_api_cli.c + vlib_api.c ../vlibapi/api_shared.c ../vlibapi/node_serialize.c @@ -35,6 +36,7 @@ add_vpp_library (vlibmemory API_FILES memclnt.api + vlib.api LINK_LIBRARIES vppinfra svm vlib ) @@ -51,3 +53,7 @@ add_vpp_library (vlibmemoryclient LINK_LIBRARIES vppinfra svm ) add_dependencies(vlibmemoryclient vlibmemory_api_headers) + +add_vat_test_library(vlib + vlibapi_test.c +) diff --git a/src/vlibmemory/memclnt_api.c b/src/vlibmemory/memclnt_api.c new file mode 100644 index 00000000000..5ebc31f71dc --- /dev/null +++ b/src/vlibmemory/memclnt_api.c @@ -0,0 +1,719 @@ +/* + *------------------------------------------------------------------ + * memclnt_api.c VLIB API implementation + * + * Copyright (c) 2009 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include <fcntl.h> +#include <pthread.h> +#include <vppinfra/vec.h> +#include <vppinfra/hash.h> +#include <vppinfra/pool.h> +#include <vppinfra/format.h> +#include <vppinfra/byte_order.h> +#include <vppinfra/elog.h> +#include <vlib/vlib.h> +#include <vlib/unix/unix.h> +#include <vlibapi/api.h> +#include <vlibmemory/api.h> + +/** + * @file + * @brief Binary API messaging via shared memory + * Low-level, primary provisioning interface + */ +/*? %%clicmd:group_label Binary API CLI %% ?*/ +/*? %%syscfg:group_label Binary API configuration %% ?*/ + +#define TRACE_VLIB_MEMORY_QUEUE 0 + +#include <vlibmemory/vl_memory_msg_enum.h> /* enumerate all vlib messages */ + +#define vl_typedefs /* define message structures */ +#include <vlibmemory/vl_memory_api_h.h> +#undef vl_typedefs + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) +#define vl_printfun +#include <vlibmemory/vl_memory_api_h.h> +#undef vl_printfun + +static inline void * +vl_api_trace_plugin_msg_ids_t_print (vl_api_trace_plugin_msg_ids_t *a, + void *handle) +{ + vl_print (handle, "vl_api_trace_plugin_msg_ids: %s first %u last %u\n", + a->plugin_name, clib_host_to_net_u16 (a->first_msg_id), + clib_host_to_net_u16 (a->last_msg_id)); + return handle; +} + +/* instantiate all the endian swap functions we know about */ +#define vl_endianfun +#include <vlibmemory/vl_memory_api_h.h> +#undef vl_endianfun + +static void +vl_api_get_first_msg_id_t_handler (vl_api_get_first_msg_id_t *mp) +{ + vl_api_get_first_msg_id_reply_t *rmp; + vl_api_registration_t *regp; + uword *p; + api_main_t *am = vlibapi_get_main (); + vl_api_msg_range_t *rp; + u8 name[64]; + u16 first_msg_id = ~0; + int rv = -7; /* VNET_API_ERROR_INVALID_VALUE */ + + regp = vl_api_client_index_to_registration (mp->client_index); + if (!regp) + return; + + if (am->msg_range_by_name == 0) + goto out; + strncpy ((char *) name, (char *) mp->name, ARRAY_LEN (name)); + name[ARRAY_LEN (name) - 1] = '\0'; + p = hash_get_mem (am->msg_range_by_name, name); + if (p == 0) + goto out; + + rp = vec_elt_at_index (am->msg_ranges, p[0]); + first_msg_id = rp->first_msg_id; + rv = 0; + +out: + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_GET_FIRST_MSG_ID_REPLY); + rmp->context = mp->context; + rmp->retval = ntohl (rv); + rmp->first_msg_id = ntohs (first_msg_id); + vl_api_send_msg (regp, (u8 *) rmp); +} + +void +vl_api_api_versions_t_handler (vl_api_api_versions_t *mp) +{ + api_main_t *am = vlibapi_get_main (); + vl_api_api_versions_reply_t *rmp; + vl_api_registration_t *reg; + u32 nmsg = vec_len (am->api_version_list); + int msg_size = sizeof (*rmp) + sizeof (rmp->api_versions[0]) * nmsg; + int i; + + reg = vl_api_client_index_to_registration (mp->client_index); + if (!reg) + return; + + rmp = vl_msg_api_alloc (msg_size); + clib_memset (rmp, 0, msg_size); + rmp->_vl_msg_id = ntohs (VL_API_API_VERSIONS_REPLY); + + /* fill in the message */ + rmp->context = mp->context; + rmp->count = htonl (nmsg); + + for (i = 0; i < nmsg; ++i) + { + api_version_t *vl = &am->api_version_list[i]; + rmp->api_versions[i].major = htonl (vl->major); + rmp->api_versions[i].minor = htonl (vl->minor); + rmp->api_versions[i].patch = htonl (vl->patch); + strncpy ((char *) rmp->api_versions[i].name, vl->name, + ARRAY_LEN (rmp->api_versions[i].name)); + rmp->api_versions[i].name[ARRAY_LEN (rmp->api_versions[i].name) - 1] = + '\0'; + } + + vl_api_send_msg (reg, (u8 *) rmp); +} + +#define foreach_vlib_api_msg \ + _ (GET_FIRST_MSG_ID, get_first_msg_id) \ + _ (API_VERSIONS, api_versions) + +/* + * vl_api_init + */ +static int +vlib_api_init (void) +{ + vl_msg_api_msg_config_t cfg; + vl_msg_api_msg_config_t *c = &cfg; + + clib_memset (c, 0, sizeof (*c)); + +#define _(N, n) \ + do \ + { \ + c->id = VL_API_##N; \ + c->name = #n; \ + c->handler = vl_api_##n##_t_handler; \ + c->cleanup = vl_noop_handler; \ + c->endian = vl_api_##n##_t_endian; \ + c->print = vl_api_##n##_t_print; \ + c->size = sizeof (vl_api_##n##_t); \ + c->traced = 1; /* trace, so these msgs print */ \ + c->replay = 0; /* don't replay client create/delete msgs */ \ + c->message_bounce = 0; /* don't bounce this message */ \ + vl_msg_api_config (c); \ + } \ + while (0); + + foreach_vlib_api_msg; +#undef _ + + return 0; +} + +u64 vector_rate_histogram[SLEEP_N_BUCKETS]; + +/* + * Callback to send ourselves a plugin numbering-space trace msg + */ +static void +send_one_plugin_msg_ids_msg (u8 *name, u16 first_msg_id, u16 last_msg_id) +{ + vl_api_trace_plugin_msg_ids_t *mp; + api_main_t *am = vlibapi_get_main (); + vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; + svm_queue_t *q; + + mp = vl_msg_api_alloc_as_if_client (sizeof (*mp)); + clib_memset (mp, 0, sizeof (*mp)); + + mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_TRACE_PLUGIN_MSG_IDS); + strncpy ((char *) mp->plugin_name, (char *) name, + sizeof (mp->plugin_name) - 1); + mp->first_msg_id = clib_host_to_net_u16 (first_msg_id); + mp->last_msg_id = clib_host_to_net_u16 (last_msg_id); + + q = shmem_hdr->vl_input_queue; + + vl_msg_api_send_shmem (q, (u8 *) &mp); +} + +void +vl_api_save_msg_table (void) +{ + u8 *serialized_message_table; + api_main_t *am = vlibapi_get_main (); + u8 *chroot_file; + int fd, rv; + + /* + * Snapshoot the api message table. + */ + if (strstr ((char *) am->save_msg_table_filename, "..") || + index ((char *) am->save_msg_table_filename, '/')) + { + clib_warning ("illegal save-message-table filename '%s'", + am->save_msg_table_filename); + return; + } + + chroot_file = format (0, "/tmp/%s%c", am->save_msg_table_filename, 0); + + fd = creat ((char *) chroot_file, 0644); + + if (fd < 0) + { + clib_unix_warning ("creat"); + return; + } + + serialized_message_table = vl_api_serialize_message_table (am, 0); + + rv = + write (fd, serialized_message_table, vec_len (serialized_message_table)); + + if (rv != vec_len (serialized_message_table)) + clib_unix_warning ("write"); + + rv = close (fd); + if (rv < 0) + clib_unix_warning ("close"); + + vec_free (chroot_file); + vec_free (serialized_message_table); +} + +clib_error_t *vat_builtin_main_init (vlib_main_t *vm) __attribute__ ((weak)); +clib_error_t * +vat_builtin_main_init (vlib_main_t *vm) +{ + return 0; +} + +static uword +vl_api_clnt_process (vlib_main_t *vm, vlib_node_runtime_t *node, + vlib_frame_t *f) +{ + vlib_global_main_t *vgm = vlib_get_global_main (); + int private_segment_rotor = 0, i, rv; + vl_socket_args_for_process_t *a; + vl_shmem_hdr_t *shm; + svm_queue_t *q; + clib_error_t *e; + api_main_t *am = vlibapi_get_main (); + f64 dead_client_scan_time; + f64 sleep_time, start_time; + f64 vector_rate; + clib_error_t *error; + uword event_type; + uword *event_data = 0; + f64 now; + + if ((error = vl_sock_api_init (vm))) + { + clib_error_report (error); + clib_warning ("socksvr_api_init failed, quitting..."); + return 0; + } + + if ((rv = vlib_api_init ()) < 0) + { + clib_warning ("vlib_api_init returned %d, quitting...", rv); + return 0; + } + + shm = am->shmem_hdr; + q = shm->vl_input_queue; + + e = vlib_call_init_exit_functions (vm, &vgm->api_init_function_registrations, + 1 /* call_once */, 1 /* is_global */); + if (e) + clib_error_report (e); + + e = vat_builtin_main_init (vm); + if (e) + clib_error_report (e); + + sleep_time = 10.0; + dead_client_scan_time = vlib_time_now (vm) + 10.0; + + /* + * Send plugin message range messages for each plugin we loaded + */ + for (i = 0; i < vec_len (am->msg_ranges); i++) + { + vl_api_msg_range_t *rp = am->msg_ranges + i; + send_one_plugin_msg_ids_msg (rp->name, rp->first_msg_id, + rp->last_msg_id); + } + + /* + * Save the api message table snapshot, if configured + */ + if (am->save_msg_table_filename) + vl_api_save_msg_table (); + + /* $$$ pay attention to frame size, control CPU usage */ + while (1) + { + /* + * There's a reason for checking the queue before + * sleeping. If the vlib application crashes, it's entirely + * possible for a client to enqueue a connect request + * during the process restart interval. + * + * Unless some force of physics causes the new incarnation + * of the application to process the request, the client will + * sit and wait for Godot... + */ + vector_rate = (f64) vlib_last_vectors_per_main_loop (vm); + start_time = vlib_time_now (vm); + while (1) + { + if (vl_mem_api_handle_rpc (vm, node) || + vl_mem_api_handle_msg_main (vm, node)) + { + vm->api_queue_nonempty = 0; + VL_MEM_API_LOG_Q_LEN ("q-underflow: len %d", 0); + sleep_time = 20.0; + break; + } + + /* Allow no more than 10us without a pause */ + if (vlib_time_now (vm) > start_time + 10e-6) + { + int index = SLEEP_400_US; + if (vector_rate > 40.0) + sleep_time = 400e-6; + else if (vector_rate > 20.0) + { + index = SLEEP_200_US; + sleep_time = 200e-6; + } + else if (vector_rate >= 1.0) + { + index = SLEEP_100_US; + sleep_time = 100e-6; + } + else + { + index = SLEEP_10_US; + sleep_time = 10e-6; + } + vector_rate_histogram[index] += 1; + break; + } + } + + /* + * see if we have any private api shared-memory segments + * If so, push required context variables, and process + * a message. + */ + if (PREDICT_FALSE (vec_len (am->vlib_private_rps))) + { + if (private_segment_rotor >= vec_len (am->vlib_private_rps)) + private_segment_rotor = 0; + vl_mem_api_handle_msg_private (vm, node, private_segment_rotor++); + } + + vlib_process_wait_for_event_or_clock (vm, sleep_time); + vec_reset_length (event_data); + event_type = vlib_process_get_events (vm, &event_data); + now = vlib_time_now (vm); + + switch (event_type) + { + case QUEUE_SIGNAL_EVENT: + vm->queue_signal_pending = 0; + VL_MEM_API_LOG_Q_LEN ("q-awake: len %d", q->cursize); + + break; + case SOCKET_READ_EVENT: + for (i = 0; i < vec_len (event_data); i++) + { + vl_api_registration_t *regp; + + a = pool_elt_at_index (socket_main.process_args, event_data[i]); + regp = vl_socket_get_registration (a->reg_index); + if (regp) + { + vl_socket_process_api_msg (regp, (i8 *) a->data); + a = pool_elt_at_index (socket_main.process_args, + event_data[i]); + } + vec_free (a->data); + pool_put (socket_main.process_args, a); + } + break; + + /* Timeout... */ + case -1: + break; + + default: + clib_warning ("unknown event type %d", event_type); + break; + } + + if (now > dead_client_scan_time) + { + vl_mem_api_dead_client_scan (am, shm, now); + dead_client_scan_time = vlib_time_now (vm) + 10.0; + } + } + + return 0; +} + +VLIB_REGISTER_NODE (vl_api_clnt_node) = { + .function = vl_api_clnt_process, + .type = VLIB_NODE_TYPE_PROCESS, + .name = "api-rx-from-ring", + .state = VLIB_NODE_STATE_DISABLED, + .process_log2_n_stack_bytes = 18, +}; + +void +vl_mem_api_enable_disable (vlib_main_t *vm, int enable) +{ + vlib_node_set_state ( + vm, vl_api_clnt_node.index, + (enable ? VLIB_NODE_STATE_POLLING : VLIB_NODE_STATE_DISABLED)); +} + +static uword +api_rx_from_node (vlib_main_t *vm, vlib_node_runtime_t *node, + vlib_frame_t *frame) +{ + uword n_packets = frame->n_vectors; + uword n_left_from; + u32 *from; + static u8 *long_msg; + + vec_validate (long_msg, 4095); + n_left_from = frame->n_vectors; + from = vlib_frame_vector_args (frame); + + while (n_left_from > 0) + { + u32 bi0; + vlib_buffer_t *b0; + void *msg; + uword msg_len; + + bi0 = from[0]; + b0 = vlib_get_buffer (vm, bi0); + from += 1; + n_left_from -= 1; + + msg = b0->data + b0->current_data; + msg_len = b0->current_length; + if (b0->flags & VLIB_BUFFER_NEXT_PRESENT) + { + ASSERT (long_msg != 0); + _vec_len (long_msg) = 0; + vec_add (long_msg, msg, msg_len); + while (b0->flags & VLIB_BUFFER_NEXT_PRESENT) + { + b0 = vlib_get_buffer (vm, b0->next_buffer); + msg = b0->data + b0->current_data; + msg_len = b0->current_length; + vec_add (long_msg, msg, msg_len); + } + msg = long_msg; + } + vl_msg_api_handler_no_trace_no_free (msg); + } + + /* Free what we've been given. */ + vlib_buffer_free (vm, vlib_frame_vector_args (frame), n_packets); + + return n_packets; +} + +VLIB_REGISTER_NODE (api_rx_from_node_node, static) = { + .function = api_rx_from_node, + .type = VLIB_NODE_TYPE_INTERNAL, + .vector_size = 4, + .name = "api-rx-from-node", +}; + +static void +vl_api_rpc_call_t_handler (vl_api_rpc_call_t *mp) +{ + vl_api_rpc_call_reply_t *rmp; + int (*fp) (void *); + i32 rv = 0; + vlib_main_t *vm = vlib_get_main (); + + if (mp->function == 0) + { + rv = -1; + clib_warning ("rpc NULL function pointer"); + } + + else + { + if (mp->need_barrier_sync) + vlib_worker_thread_barrier_sync (vm); + + fp = uword_to_pointer (mp->function, int (*) (void *)); + rv = fp (mp->data); + + if (mp->need_barrier_sync) + vlib_worker_thread_barrier_release (vm); + } + + if (mp->send_reply) + { + svm_queue_t *q = vl_api_client_index_to_input_queue (mp->client_index); + if (q) + { + rmp = vl_msg_api_alloc_as_if_client (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_RPC_CALL_REPLY); + rmp->context = mp->context; + rmp->retval = rv; + vl_msg_api_send_shmem (q, (u8 *) &rmp); + } + } + if (mp->multicast) + { + clib_warning ("multicast not yet implemented..."); + } +} + +static void +vl_api_rpc_call_reply_t_handler (vl_api_rpc_call_reply_t *mp) +{ + clib_warning ("unimplemented"); +} + +void +vl_api_send_pending_rpc_requests (vlib_main_t *vm) +{ + vlib_main_t *vm_global = vlib_get_first_main (); + + ASSERT (vm != vm_global); + + clib_spinlock_lock_if_init (&vm_global->pending_rpc_lock); + vec_append (vm_global->pending_rpc_requests, vm->pending_rpc_requests); + vec_reset_length (vm->pending_rpc_requests); + clib_spinlock_unlock_if_init (&vm_global->pending_rpc_lock); +} + +always_inline void +vl_api_rpc_call_main_thread_inline (void *fp, u8 *data, u32 data_length, + u8 force_rpc) +{ + vl_api_rpc_call_t *mp; + vlib_main_t *vm_global = vlib_get_first_main (); + vlib_main_t *vm = vlib_get_main (); + + /* Main thread and not a forced RPC: call the function directly */ + if ((force_rpc == 0) && (vlib_get_thread_index () == 0)) + { + void (*call_fp) (void *); + + vlib_worker_thread_barrier_sync (vm); + + call_fp = fp; + call_fp (data); + + vlib_worker_thread_barrier_release (vm); + return; + } + + /* Otherwise, actually do an RPC */ + mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + data_length); + + clib_memset (mp, 0, sizeof (*mp)); + clib_memcpy_fast (mp->data, data, data_length); + mp->_vl_msg_id = ntohs (VL_API_RPC_CALL); + mp->function = pointer_to_uword (fp); + mp->need_barrier_sync = 1; + + /* Add to the pending vector. Thread 0 requires locking. */ + if (vm == vm_global) + clib_spinlock_lock_if_init (&vm_global->pending_rpc_lock); + vec_add1 (vm->pending_rpc_requests, (uword) mp); + if (vm == vm_global) + clib_spinlock_unlock_if_init (&vm_global->pending_rpc_lock); +} + +/* + * Check if called from worker threads. + * If so, make rpc call of fp through shmem. + * Otherwise, call fp directly + */ +void +vl_api_rpc_call_main_thread (void *fp, u8 *data, u32 data_length) +{ + vl_api_rpc_call_main_thread_inline (fp, data, data_length, /*force_rpc */ + 0); +} + +/* + * Always make rpc call of fp through shmem, useful for calling from threads + * not setup as worker threads, such as DPDK callback thread + */ +void +vl_api_force_rpc_call_main_thread (void *fp, u8 *data, u32 data_length) +{ + vl_api_rpc_call_main_thread_inline (fp, data, data_length, /*force_rpc */ + 1); +} + +static void +vl_api_trace_plugin_msg_ids_t_handler (vl_api_trace_plugin_msg_ids_t *mp) +{ + api_main_t *am = vlibapi_get_main (); + vl_api_msg_range_t *rp; + uword *p; + + /* Noop (except for tracing) during normal operation */ + if (am->replay_in_progress == 0) + return; + + p = hash_get_mem (am->msg_range_by_name, mp->plugin_name); + if (p == 0) + { + clib_warning ("WARNING: traced plugin '%s' not in current image", + mp->plugin_name); + return; + } + + rp = vec_elt_at_index (am->msg_ranges, p[0]); + if (rp->first_msg_id != clib_net_to_host_u16 (mp->first_msg_id)) + { + clib_warning ("WARNING: traced plugin '%s' first message id %d not %d", + mp->plugin_name, clib_net_to_host_u16 (mp->first_msg_id), + rp->first_msg_id); + } + + if (rp->last_msg_id != clib_net_to_host_u16 (mp->last_msg_id)) + { + clib_warning ("WARNING: traced plugin '%s' last message id %d not %d", + mp->plugin_name, clib_net_to_host_u16 (mp->last_msg_id), + rp->last_msg_id); + } +} + +#define foreach_rpc_api_msg \ + _ (RPC_CALL, rpc_call) \ + _ (RPC_CALL_REPLY, rpc_call_reply) + +#define foreach_plugin_trace_msg _ (TRACE_PLUGIN_MSG_IDS, trace_plugin_msg_ids) + +/* + * Set the rpc callback at our earliest possible convenience. + * This avoids ordering issues between thread_init() -> start_workers and + * an init function which we could define here. If we ever intend to use + * vlib all by itself, we can't create a link-time dependency on + * an init function here and a typical "call foo_init first" + * guitar lick. + */ + +extern void *rpc_call_main_thread_cb_fn; + +static clib_error_t * +rpc_api_hookup (vlib_main_t *vm) +{ + api_main_t *am = vlibapi_get_main (); +#define _(N, n) \ + vl_msg_api_set_handlers ( \ + VL_API_##N, #n, vl_api_##n##_t_handler, vl_noop_handler, vl_noop_handler, \ + vl_api_##n##_t_print, sizeof (vl_api_##n##_t), 0 /* do not trace */); + foreach_rpc_api_msg; +#undef _ + +#define _(N, n) \ + vl_msg_api_set_handlers ( \ + VL_API_##N, #n, vl_api_##n##_t_handler, vl_noop_handler, vl_noop_handler, \ + vl_api_##n##_t_print, sizeof (vl_api_##n##_t), 1 /* do trace */); + foreach_plugin_trace_msg; +#undef _ + + /* No reason to halt the parade to create a trace record... */ + am->is_mp_safe[VL_API_TRACE_PLUGIN_MSG_IDS] = 1; + rpc_call_main_thread_cb_fn = vl_api_rpc_call_main_thread; + return 0; +} + +VLIB_API_INIT_FUNCTION (rpc_api_hookup); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vlibmemory/vlib.api b/src/vlibmemory/vlib.api new file mode 100644 index 00000000000..5e8ba47ba7d --- /dev/null +++ b/src/vlibmemory/vlib.api @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2021 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +option version = "1.0.0"; + +/** \brief Process a vpe parser cli string request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param cmd_in_shmem - pointer to cli command string +*/ +define cli +{ + u32 client_index; + u32 context; + u64 cmd_in_shmem; +}; +define cli_inband +{ + u32 client_index; + u32 context; + string cmd[]; +}; + +/** \brief vpe parser cli string response + @param context - sender context, to match reply w/ request + @param retval - return code for request + @param reply_in_shmem - Reply string from cli processing if any +*/ +define cli_reply +{ + u32 context; + i32 retval; + u64 reply_in_shmem; +}; +define cli_inband_reply +{ + u32 context; + i32 retval; + string reply[]; +}; + +/** \brief Get node index using name request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param node_name[] - name of the node +*/ +define get_node_index +{ + u32 client_index; + u32 context; + string node_name[64]; +}; + +/** \brief Get node index using name request + @param context - sender context, to match reply w/ request + @param retval - return code for the request + @param node_index - index of the desired node if found, else ~0 +*/ +define get_node_index_reply +{ + u32 context; + i32 retval; + u32 node_index; +}; + +/** \brief Set the next node for a given node request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param node_name[] - node to add the next node to + @param next_name[] - node to add as the next node +*/ +define add_node_next +{ + u32 client_index; + u32 context; + string node_name[64]; + string next_name[64]; +}; + +/** \brief IP Set the next node for a given node response + @param context - sender context, to match reply w/ request + @param retval - return code for the add next node request + @param next_index - the index of the next node if success, else ~0 +*/ +define add_node_next_reply +{ + u32 context; + i32 retval; + u32 next_index; +}; + +/** \brief show_threads display the information about vpp + threads running on system along with their process id, + cpu id, physical core and cpu socket. +*/ +define show_threads +{ + u32 client_index; + u32 context; +}; + +/** \brief thread data + @param id - thread index + @param name - thread name i.e. vpp_main or vpp_wk_0 + @param type - thread type i.e. workers or stats + @param pid - thread Process Id + @param cpu_id - thread pinned to cpu. + "CPUs or Logical cores are the number of physical cores times + the number of threads that can run on each core through + the use of hyperthreading." (from unix.stackexchange.com) + @param core - thread pinned to actual physical core. + @param cpu_socket - thread is running on which cpu socket. +*/ +typedef thread_data +{ + u32 id; + string name[64]; + string type[64]; + u32 pid; + u32 cpu_id; + u32 core; + u32 cpu_socket; +}; + +/** \brief show_threads_reply + @param context - returned sender context, to match reply w/ request + @param retval - return code + @param count - number of threads in thread_data array + @param thread_data - array of thread data +*/ +define show_threads_reply +{ + u32 context; + i32 retval; + u32 count; + vl_api_thread_data_t thread_data[count]; +}; + +define get_node_graph +{ + u32 client_index; + u32 context; +}; + +/** \brief get_node_graph_reply + @param context - returned sender context, to match reply w/ request + @param retval - return code + @param reply_in_shmem - result from vlib_node_serialize, in shared + memory. Process with vlib_node_unserialize, remember to switch + heaps and free the result. +*/ + +define get_node_graph_reply +{ + u32 context; + i32 retval; + u64 reply_in_shmem; +}; + +/** \brief Query relative index via node names + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param node_name - name of node to find relative index from + @param next_name - next node from node_name to find relative index of +*/ +define get_next_index +{ + u32 client_index; + u32 context; + string node_name[64]; + string next_name[64]; +}; + +/** \brief Reply for get next node index + @param context - sender context which was passed in the request + @param retval - return value + @param next_index - index of the next_node +*/ +define get_next_index_reply +{ + u32 context; + i32 retval; + u32 next_index; +}; + +/** \brief f64 types are not standardized across the wire. Sense wire format in each direction by sending the f64 value 1.0. + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param f64_one - The constant of 1.0. If you send a different value, expect an rv=VNET_API_ERROR_API_ENDIAN_FAILED. +*/ +define get_f64_endian_value +{ + u32 client_index; + u32 context; + f64 f64_one [default=1.0]; +}; + +/** \brief get_f64_endian_value reply message + @param context - sender context which was passed in the request + @param retval - return value - VNET_API_ERROR_API_ENDIAN_FAILED if f64_one != 1.0 + @param f64_one_result - The value of 'f64 1.0' +*/ +define get_f64_endian_value_reply +{ + u32 context; + u32 retval; + f64 f64_one_result; +}; + +/** \brief Verify f64 wire format by sending a value and receiving the value + 1.0 + @param client_index - opaque cookie to identify the sender. + @param context - sender context, to match reply w/ request. + @param f64_value - The value you want to test. Default: 1.0. +*/ +define get_f64_increment_by_one +{ + u32 client_index; + u32 context; + f64 f64_value [default=1.0]; +}; + +/** \brief get_f64_increment_by_one reply + @param client_index - opaque cookie to identify the sender. + @param context - sender context, to match reply w/ request. + @param f64_value - The input f64_value incremented by 1.0. +*/ +define get_f64_increment_by_one_reply +{ + u32 context; + u32 retval; + f64 f64_value; +}; + +/* + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */
\ No newline at end of file diff --git a/src/vlibmemory/vlib_api.c b/src/vlibmemory/vlib_api.c index f9ed891f90a..e5d77eb5bf6 100644 --- a/src/vlibmemory/vlib_api.c +++ b/src/vlibmemory/vlib_api.c @@ -1,8 +1,7 @@ /* - *------------------------------------------------------------------ * vlib_api.c VLIB API implementation * - * Copyright (c) 2009 Cisco and/or its affiliates. + * Copyright (c) 2021 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: @@ -14,710 +13,339 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - *------------------------------------------------------------------ */ -#include <fcntl.h> -#include <pthread.h> -#include <vppinfra/vec.h> -#include <vppinfra/hash.h> -#include <vppinfra/pool.h> -#include <vppinfra/format.h> -#include <vppinfra/byte_order.h> -#include <vppinfra/elog.h> -#include <vlib/vlib.h> -#include <vlib/unix/unix.h> #include <vlibapi/api.h> #include <vlibmemory/api.h> +#include <vnet/api_errno.h> -/** - * @file - * @brief Binary API messaging via shared memory - * Low-level, primary provisioning interface - */ -/*? %%clicmd:group_label Binary API CLI %% ?*/ -/*? %%syscfg:group_label Binary API configuration %% ?*/ - -#define TRACE_VLIB_MEMORY_QUEUE 0 - -#include <vlibmemory/vl_memory_msg_enum.h> /* enumerate all vlib messages */ +#include <vlibmemory/vlib.api_enum.h> +#include <vlibmemory/vlib.api_types.h> -#define vl_typedefs /* define message structures */ -#include <vlibmemory/vl_memory_api_h.h> -#undef vl_typedefs +u16 msg_id_base; +#define REPLY_MSG_ID_BASE msg_id_base +#include <vlibapi/api_helper_macros.h> -/* instantiate all the print functions we know about */ -#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) -#define vl_printfun -#include <vlibmemory/vl_memory_api_h.h> -#undef vl_printfun - -static inline void * -vl_api_trace_plugin_msg_ids_t_print (vl_api_trace_plugin_msg_ids_t * a, - void *handle) +static void +shmem_cli_output (uword arg, u8 *buffer, uword buffer_bytes) { - vl_print (handle, "vl_api_trace_plugin_msg_ids: %s first %u last %u\n", - a->plugin_name, - clib_host_to_net_u16 (a->first_msg_id), - clib_host_to_net_u16 (a->last_msg_id)); - return handle; -} + u8 **shmem_vecp = (u8 **) arg; + u8 *shmem_vec; + void *oldheap; + u32 offset; -/* instantiate all the endian swap functions we know about */ -#define vl_endianfun -#include <vlibmemory/vl_memory_api_h.h> -#undef vl_endianfun + shmem_vec = *shmem_vecp; -static void -vl_api_get_first_msg_id_t_handler (vl_api_get_first_msg_id_t * mp) -{ - vl_api_get_first_msg_id_reply_t *rmp; - vl_api_registration_t *regp; - uword *p; - api_main_t *am = vlibapi_get_main (); - vl_api_msg_range_t *rp; - u8 name[64]; - u16 first_msg_id = ~0; - int rv = -7; /* VNET_API_ERROR_INVALID_VALUE */ + offset = vec_len (shmem_vec); - regp = vl_api_client_index_to_registration (mp->client_index); - if (!regp) - return; + oldheap = vl_msg_push_heap (); - if (am->msg_range_by_name == 0) - goto out; - strncpy ((char *) name, (char *) mp->name, ARRAY_LEN (name)); - name[ARRAY_LEN (name) - 1] = '\0'; - p = hash_get_mem (am->msg_range_by_name, name); - if (p == 0) - goto out; + vec_validate (shmem_vec, offset + buffer_bytes - 1); - rp = vec_elt_at_index (am->msg_ranges, p[0]); - first_msg_id = rp->first_msg_id; - rv = 0; + clib_memcpy (shmem_vec + offset, buffer, buffer_bytes); -out: - rmp = vl_msg_api_alloc (sizeof (*rmp)); - rmp->_vl_msg_id = ntohs (VL_API_GET_FIRST_MSG_ID_REPLY); - rmp->context = mp->context; - rmp->retval = ntohl (rv); - rmp->first_msg_id = ntohs (first_msg_id); - vl_api_send_msg (regp, (u8 *) rmp); + vl_msg_pop_heap (oldheap); + + *shmem_vecp = shmem_vec; } -void -vl_api_api_versions_t_handler (vl_api_api_versions_t * mp) +static void +vl_api_cli_t_handler (vl_api_cli_t *mp) { - api_main_t *am = vlibapi_get_main (); - vl_api_api_versions_reply_t *rmp; + vl_api_cli_reply_t *rp; vl_api_registration_t *reg; - u32 nmsg = vec_len (am->api_version_list); - int msg_size = sizeof (*rmp) + sizeof (rmp->api_versions[0]) * nmsg; - int i; + vlib_main_t *vm = vlib_get_main (); + unformat_input_t input; + u8 *shmem_vec = 0; + void *oldheap; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; + ; - rmp = vl_msg_api_alloc (msg_size); - clib_memset (rmp, 0, msg_size); - rmp->_vl_msg_id = ntohs (VL_API_API_VERSIONS_REPLY); - - /* fill in the message */ - rmp->context = mp->context; - rmp->count = htonl (nmsg); + rp = vl_msg_api_alloc (sizeof (*rp)); + rp->_vl_msg_id = ntohs (VL_API_CLI_REPLY + REPLY_MSG_ID_BASE); + rp->context = mp->context; - for (i = 0; i < nmsg; ++i) - { - api_version_t *vl = &am->api_version_list[i]; - rmp->api_versions[i].major = htonl (vl->major); - rmp->api_versions[i].minor = htonl (vl->minor); - rmp->api_versions[i].patch = htonl (vl->patch); - strncpy ((char *) rmp->api_versions[i].name, vl->name, - ARRAY_LEN (rmp->api_versions[i].name)); - rmp->api_versions[i].name[ARRAY_LEN (rmp->api_versions[i].name) - 1] = - '\0'; - } + unformat_init_vector (&input, (u8 *) (uword) mp->cmd_in_shmem); - vl_api_send_msg (reg, (u8 *) rmp); -} + vlib_cli_input (vm, &input, shmem_cli_output, (uword) &shmem_vec); -#define foreach_vlib_api_msg \ -_(GET_FIRST_MSG_ID, get_first_msg_id) \ -_(API_VERSIONS, api_versions) + oldheap = vl_msg_push_heap (); + vec_add1 (shmem_vec, 0); + vl_msg_pop_heap (oldheap); -/* - * vl_api_init - */ -static int -vlib_api_init (void) -{ - vl_msg_api_msg_config_t cfg; - vl_msg_api_msg_config_t *c = &cfg; - - clib_memset (c, 0, sizeof (*c)); - -#define _(N,n) do { \ - c->id = VL_API_##N; \ - c->name = #n; \ - c->handler = vl_api_##n##_t_handler; \ - c->cleanup = vl_noop_handler; \ - c->endian = vl_api_##n##_t_endian; \ - c->print = vl_api_##n##_t_print; \ - c->size = sizeof(vl_api_##n##_t); \ - c->traced = 1; /* trace, so these msgs print */ \ - c->replay = 0; /* don't replay client create/delete msgs */ \ - c->message_bounce = 0; /* don't bounce this message */ \ - vl_msg_api_config(c);} while (0); - - foreach_vlib_api_msg; -#undef _ + rp->reply_in_shmem = (uword) shmem_vec; - return 0; + vl_api_send_msg (reg, (u8 *) rp); } -u64 vector_rate_histogram[SLEEP_N_BUCKETS]; - -/* - * Callback to send ourselves a plugin numbering-space trace msg - */ static void -send_one_plugin_msg_ids_msg (u8 * name, u16 first_msg_id, u16 last_msg_id) +inband_cli_output (uword arg, u8 *buffer, uword buffer_bytes) { - vl_api_trace_plugin_msg_ids_t *mp; - api_main_t *am = vlibapi_get_main (); - vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; - svm_queue_t *q; + u8 **mem_vecp = (u8 **) arg; + u8 *mem_vec = *mem_vecp; + u32 offset = vec_len (mem_vec); - mp = vl_msg_api_alloc_as_if_client (sizeof (*mp)); - clib_memset (mp, 0, sizeof (*mp)); - - mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_TRACE_PLUGIN_MSG_IDS); - strncpy ((char *) mp->plugin_name, (char *) name, - sizeof (mp->plugin_name) - 1); - mp->first_msg_id = clib_host_to_net_u16 (first_msg_id); - mp->last_msg_id = clib_host_to_net_u16 (last_msg_id); - - q = shmem_hdr->vl_input_queue; - - vl_msg_api_send_shmem (q, (u8 *) & mp); + vec_validate (mem_vec, offset + buffer_bytes - 1); + clib_memcpy (mem_vec + offset, buffer, buffer_bytes); + *mem_vecp = mem_vec; } -void -vl_api_save_msg_table (void) +static void +vl_api_cli_inband_t_handler (vl_api_cli_inband_t *mp) { - u8 *serialized_message_table; - api_main_t *am = vlibapi_get_main (); - u8 *chroot_file; - int fd, rv; - - /* - * Snapshoot the api message table. - */ - if (strstr ((char *) am->save_msg_table_filename, "..") - || index ((char *) am->save_msg_table_filename, '/')) - { - clib_warning ("illegal save-message-table filename '%s'", - am->save_msg_table_filename); - return; - } - - chroot_file = format (0, "/tmp/%s%c", am->save_msg_table_filename, 0); - - fd = creat ((char *) chroot_file, 0644); + vl_api_cli_inband_reply_t *rmp; + int rv = 0; + vlib_main_t *vm = vlib_get_main (); + unformat_input_t input; + u8 *out_vec = 0; + u8 *cmd_vec = 0; - if (fd < 0) + if (vl_msg_api_get_msg_length (mp) < + vl_api_string_len (&mp->cmd) + sizeof (*mp)) { - clib_unix_warning ("creat"); - return; + rv = -1; + goto error; } - serialized_message_table = vl_api_serialize_message_table (am, 0); + cmd_vec = vl_api_from_api_to_new_vec (mp, &mp->cmd); - rv = write (fd, serialized_message_table, - vec_len (serialized_message_table)); + unformat_init_string (&input, (char *) cmd_vec, + vl_api_string_len (&mp->cmd)); + rv = vlib_cli_input (vm, &input, inband_cli_output, (uword) &out_vec); + unformat_free (&input); - if (rv != vec_len (serialized_message_table)) - clib_unix_warning ("write"); - - rv = close (fd); - if (rv < 0) - clib_unix_warning ("close"); - - vec_free (chroot_file); - vec_free (serialized_message_table); +error: + REPLY_MACRO3 (VL_API_CLI_INBAND_REPLY, vec_len (out_vec), + ({ vl_api_vec_to_api_string (out_vec, &rmp->reply); })); + vec_free (out_vec); + vec_free (cmd_vec); } -clib_error_t *vat_builtin_main_init (vlib_main_t * vm) __attribute__ ((weak)); -clib_error_t * -vat_builtin_main_init (vlib_main_t * vm) -{ - return 0; -} - -static uword -vl_api_clnt_process (vlib_main_t * vm, vlib_node_runtime_t * node, - vlib_frame_t * f) +static void +vl_api_get_node_index_t_handler (vl_api_get_node_index_t *mp) { - vlib_global_main_t *vgm = vlib_get_global_main (); - int private_segment_rotor = 0, i, rv; - vl_socket_args_for_process_t *a; - vl_shmem_hdr_t *shm; - svm_queue_t *q; - clib_error_t *e; - api_main_t *am = vlibapi_get_main (); - f64 dead_client_scan_time; - f64 sleep_time, start_time; - f64 vector_rate; - clib_error_t *error; - uword event_type; - uword *event_data = 0; - f64 now; - - if ((error = vl_sock_api_init (vm))) - { - clib_error_report (error); - clib_warning ("socksvr_api_init failed, quitting..."); - return 0; - } + vlib_main_t *vm = vlib_get_main (); + vl_api_get_node_index_reply_t *rmp; + vlib_node_t *n; + int rv = 0; + u32 node_index = ~0; - if ((rv = vlib_api_init ()) < 0) - { - clib_warning ("vlib_api_init returned %d, quitting...", rv); - return 0; - } + n = vlib_get_node_by_name (vm, mp->node_name); - shm = am->shmem_hdr; - q = shm->vl_input_queue; + if (n == 0) + rv = VNET_API_ERROR_NO_SUCH_NODE; + else + node_index = n->index; - e = vlib_call_init_exit_functions (vm, &vgm->api_init_function_registrations, - 1 /* call_once */, 1 /* is_global */); - if (e) - clib_error_report (e); + REPLY_MACRO2 (VL_API_GET_NODE_INDEX_REPLY, + ({ rmp->node_index = htonl (node_index); })); +} - e = vat_builtin_main_init (vm); - if (e) - clib_error_report (e); +static void +vl_api_add_node_next_t_handler (vl_api_add_node_next_t *mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_add_node_next_reply_t *rmp; + vlib_node_t *n, *next; + int rv = 0; + u32 next_index = ~0; - sleep_time = 10.0; - dead_client_scan_time = vlib_time_now (vm) + 10.0; + n = vlib_get_node_by_name (vm, mp->node_name); - /* - * Send plugin message range messages for each plugin we loaded - */ - for (i = 0; i < vec_len (am->msg_ranges); i++) + if (n == 0) { - vl_api_msg_range_t *rp = am->msg_ranges + i; - send_one_plugin_msg_ids_msg (rp->name, rp->first_msg_id, - rp->last_msg_id); + rv = VNET_API_ERROR_NO_SUCH_NODE; + goto out; } - /* - * Save the api message table snapshot, if configured - */ - if (am->save_msg_table_filename) - vl_api_save_msg_table (); + next = vlib_get_node_by_name (vm, mp->next_name); - /* $$$ pay attention to frame size, control CPU usage */ - while (1) - { - /* - * There's a reason for checking the queue before - * sleeping. If the vlib application crashes, it's entirely - * possible for a client to enqueue a connect request - * during the process restart interval. - * - * Unless some force of physics causes the new incarnation - * of the application to process the request, the client will - * sit and wait for Godot... - */ - vector_rate = (f64) vlib_last_vectors_per_main_loop (vm); - start_time = vlib_time_now (vm); - while (1) - { - if (vl_mem_api_handle_rpc (vm, node) - || vl_mem_api_handle_msg_main (vm, node)) - { - vm->api_queue_nonempty = 0; - VL_MEM_API_LOG_Q_LEN ("q-underflow: len %d", 0); - sleep_time = 20.0; - break; - } - - /* Allow no more than 10us without a pause */ - if (vlib_time_now (vm) > start_time + 10e-6) - { - int index = SLEEP_400_US; - if (vector_rate > 40.0) - sleep_time = 400e-6; - else if (vector_rate > 20.0) - { - index = SLEEP_200_US; - sleep_time = 200e-6; - } - else if (vector_rate >= 1.0) - { - index = SLEEP_100_US; - sleep_time = 100e-6; - } - else - { - index = SLEEP_10_US; - sleep_time = 10e-6; - } - vector_rate_histogram[index] += 1; - break; - } - } - - /* - * see if we have any private api shared-memory segments - * If so, push required context variables, and process - * a message. - */ - if (PREDICT_FALSE (vec_len (am->vlib_private_rps))) - { - if (private_segment_rotor >= vec_len (am->vlib_private_rps)) - private_segment_rotor = 0; - vl_mem_api_handle_msg_private (vm, node, private_segment_rotor++); - } - - vlib_process_wait_for_event_or_clock (vm, sleep_time); - vec_reset_length (event_data); - event_type = vlib_process_get_events (vm, &event_data); - now = vlib_time_now (vm); - - switch (event_type) - { - case QUEUE_SIGNAL_EVENT: - vm->queue_signal_pending = 0; - VL_MEM_API_LOG_Q_LEN ("q-awake: len %d", q->cursize); - - break; - case SOCKET_READ_EVENT: - for (i = 0; i < vec_len (event_data); i++) - { - vl_api_registration_t *regp; - - a = pool_elt_at_index (socket_main.process_args, event_data[i]); - regp = vl_socket_get_registration (a->reg_index); - if (regp) - { - vl_socket_process_api_msg (regp, (i8 *) a->data); - a = pool_elt_at_index (socket_main.process_args, - event_data[i]); - } - vec_free (a->data); - pool_put (socket_main.process_args, a); - } - break; - - /* Timeout... */ - case -1: - break; - - default: - clib_warning ("unknown event type %d", event_type); - break; - } - - if (now > dead_client_scan_time) - { - vl_mem_api_dead_client_scan (am, shm, now); - dead_client_scan_time = vlib_time_now (vm) + 10.0; - } - } + if (next == 0) + rv = VNET_API_ERROR_NO_SUCH_NODE2; + else + next_index = vlib_node_add_next (vm, n->index, next->index); - return 0; -} -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (vl_api_clnt_node) = -{ - .function = vl_api_clnt_process, - .type = VLIB_NODE_TYPE_PROCESS, - .name = "api-rx-from-ring", - .state = VLIB_NODE_STATE_DISABLED, - .process_log2_n_stack_bytes = 18, -}; -/* *INDENT-ON* */ - -void -vl_mem_api_enable_disable (vlib_main_t * vm, int enable) -{ - vlib_node_set_state (vm, vl_api_clnt_node.index, - (enable - ? VLIB_NODE_STATE_POLLING - : VLIB_NODE_STATE_DISABLED)); +out: + REPLY_MACRO2 (VL_API_ADD_NODE_NEXT_REPLY, + ({ rmp->next_index = htonl (next_index); })); } -static uword -api_rx_from_node (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +static void +get_thread_data (vl_api_thread_data_t *td, int index) { - uword n_packets = frame->n_vectors; - uword n_left_from; - u32 *from; - static u8 *long_msg; - - vec_validate (long_msg, 4095); - n_left_from = frame->n_vectors; - from = vlib_frame_vector_args (frame); - - while (n_left_from > 0) - { - u32 bi0; - vlib_buffer_t *b0; - void *msg; - uword msg_len; - - bi0 = from[0]; - b0 = vlib_get_buffer (vm, bi0); - from += 1; - n_left_from -= 1; - - msg = b0->data + b0->current_data; - msg_len = b0->current_length; - if (b0->flags & VLIB_BUFFER_NEXT_PRESENT) - { - ASSERT (long_msg != 0); - _vec_len (long_msg) = 0; - vec_add (long_msg, msg, msg_len); - while (b0->flags & VLIB_BUFFER_NEXT_PRESENT) - { - b0 = vlib_get_buffer (vm, b0->next_buffer); - msg = b0->data + b0->current_data; - msg_len = b0->current_length; - vec_add (long_msg, msg, msg_len); - } - msg = long_msg; - } - vl_msg_api_handler_no_trace_no_free (msg); - } - - /* Free what we've been given. */ - vlib_buffer_free (vm, vlib_frame_vector_args (frame), n_packets); - - return n_packets; + vlib_worker_thread_t *w = vlib_worker_threads + index; + td->id = htonl (index); + if (w->name) + strncpy ((char *) td->name, (char *) w->name, ARRAY_LEN (td->name) - 1); + if (w->registration) + strncpy ((char *) td->type, (char *) w->registration->name, + ARRAY_LEN (td->type) - 1); + td->pid = htonl (w->lwp); + td->cpu_id = htonl (w->cpu_id); + td->core = htonl (w->core_id); + td->cpu_socket = htonl (w->numa_id); } -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (api_rx_from_node_node,static) = { - .function = api_rx_from_node, - .type = VLIB_NODE_TYPE_INTERNAL, - .vector_size = 4, - .name = "api-rx-from-node", -}; -/* *INDENT-ON* */ - static void -vl_api_rpc_call_t_handler (vl_api_rpc_call_t * mp) +vl_api_show_threads_t_handler (vl_api_show_threads_t *mp) { - vl_api_rpc_call_reply_t *rmp; - int (*fp) (void *); - i32 rv = 0; - vlib_main_t *vm = vlib_get_main (); - - if (mp->function == 0) - { - rv = -1; - clib_warning ("rpc NULL function pointer"); - } + int count = 0; - else - { - if (mp->need_barrier_sync) - vlib_worker_thread_barrier_sync (vm); +#if !defined(__powerpc64__) + vl_api_registration_t *reg; + vl_api_show_threads_reply_t *rmp; + vl_api_thread_data_t *td; + int i, msg_size = 0; + count = vec_len (vlib_worker_threads); + if (!count) + return; - fp = uword_to_pointer (mp->function, int (*)(void *)); - rv = fp (mp->data); + msg_size = sizeof (*rmp) + sizeof (rmp->thread_data[0]) * count; + reg = vl_api_client_index_to_registration (mp->client_index); + if (!reg) + return; - if (mp->need_barrier_sync) - vlib_worker_thread_barrier_release (vm); - } + rmp = vl_msg_api_alloc (msg_size); + clib_memset (rmp, 0, msg_size); + rmp->_vl_msg_id = htons (VL_API_SHOW_THREADS_REPLY + REPLY_MSG_ID_BASE); + rmp->context = mp->context; + rmp->count = htonl (count); + td = rmp->thread_data; - if (mp->send_reply) + for (i = 0; i < count; i++) { - svm_queue_t *q = vl_api_client_index_to_input_queue (mp->client_index); - if (q) - { - rmp = vl_msg_api_alloc_as_if_client (sizeof (*rmp)); - rmp->_vl_msg_id = ntohs (VL_API_RPC_CALL_REPLY); - rmp->context = mp->context; - rmp->retval = rv; - vl_msg_api_send_shmem (q, (u8 *) & rmp); - } + get_thread_data (&td[i], i); } - if (mp->multicast) - { - clib_warning ("multicast not yet implemented..."); - } -} -static void -vl_api_rpc_call_reply_t_handler (vl_api_rpc_call_reply_t * mp) -{ - clib_warning ("unimplemented"); -} - -void -vl_api_send_pending_rpc_requests (vlib_main_t * vm) -{ - vlib_main_t *vm_global = vlib_get_first_main (); - - ASSERT (vm != vm_global); + vl_api_send_msg (reg, (u8 *) rmp); +#else - clib_spinlock_lock_if_init (&vm_global->pending_rpc_lock); - vec_append (vm_global->pending_rpc_requests, vm->pending_rpc_requests); - vec_reset_length (vm->pending_rpc_requests); - clib_spinlock_unlock_if_init (&vm_global->pending_rpc_lock); + /* unimplemented support */ + rv = -9; + clib_warning ("power pc does not support show threads api"); + REPLY_MACRO2 (VL_API_SHOW_THREADS_REPLY, ({ rmp->count = htonl (count); })); +#endif } -always_inline void -vl_api_rpc_call_main_thread_inline (void *fp, u8 * data, u32 data_length, - u8 force_rpc) +static void +vl_api_get_node_graph_t_handler (vl_api_get_node_graph_t *mp) { - vl_api_rpc_call_t *mp; - vlib_main_t *vm_global = vlib_get_first_main (); + int rv = 0; + u8 *vector = 0; vlib_main_t *vm = vlib_get_main (); + void *oldheap; + vl_api_get_node_graph_reply_t *rmp; + static vlib_node_t ***node_dups; + static vlib_main_t **stat_vms; - /* Main thread and not a forced RPC: call the function directly */ - if ((force_rpc == 0) && (vlib_get_thread_index () == 0)) - { - void (*call_fp) (void *); - - vlib_worker_thread_barrier_sync (vm); + oldheap = vl_msg_push_heap (); - call_fp = fp; - call_fp (data); + /* + * Keep the number of memcpy ops to a minimum (e.g. 1). + */ + vec_validate (vector, 16384); + vec_reset_length (vector); - vlib_worker_thread_barrier_release (vm); - return; - } + vlib_node_get_nodes (vm, 0 /* main threads */, 0 /* include stats */, + 1 /* barrier sync */, &node_dups, &stat_vms); + vector = vlib_node_serialize (vm, node_dups, vector, 1 /* include nexts */, + 1 /* include stats */); - /* Otherwise, actually do an RPC */ - mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + data_length); - - clib_memset (mp, 0, sizeof (*mp)); - clib_memcpy_fast (mp->data, data, data_length); - mp->_vl_msg_id = ntohs (VL_API_RPC_CALL); - mp->function = pointer_to_uword (fp); - mp->need_barrier_sync = 1; - - /* Add to the pending vector. Thread 0 requires locking. */ - if (vm == vm_global) - clib_spinlock_lock_if_init (&vm_global->pending_rpc_lock); - vec_add1 (vm->pending_rpc_requests, (uword) mp); - if (vm == vm_global) - clib_spinlock_unlock_if_init (&vm_global->pending_rpc_lock); -} + vl_msg_pop_heap (oldheap); -/* - * Check if called from worker threads. - * If so, make rpc call of fp through shmem. - * Otherwise, call fp directly - */ -void -vl_api_rpc_call_main_thread (void *fp, u8 * data, u32 data_length) -{ - vl_api_rpc_call_main_thread_inline (fp, data, data_length, /*force_rpc */ - 0); -} - -/* - * Always make rpc call of fp through shmem, useful for calling from threads - * not setup as worker threads, such as DPDK callback thread - */ -void -vl_api_force_rpc_call_main_thread (void *fp, u8 * data, u32 data_length) -{ - vl_api_rpc_call_main_thread_inline (fp, data, data_length, /*force_rpc */ - 1); + REPLY_MACRO2 (VL_API_GET_NODE_GRAPH_REPLY, + ({ rmp->reply_in_shmem = (uword) vector; })); } static void -vl_api_trace_plugin_msg_ids_t_handler (vl_api_trace_plugin_msg_ids_t * mp) +vl_api_get_next_index_t_handler (vl_api_get_next_index_t *mp) { - api_main_t *am = vlibapi_get_main (); - vl_api_msg_range_t *rp; + vlib_main_t *vm = vlib_get_main (); + vl_api_get_next_index_reply_t *rmp; + vlib_node_t *node, *next_node; + int rv = 0; + u32 next_node_index = ~0, next_index = ~0; uword *p; - /* Noop (except for tracing) during normal operation */ - if (am->replay_in_progress == 0) - return; + node = vlib_get_node_by_name (vm, mp->node_name); - p = hash_get_mem (am->msg_range_by_name, mp->plugin_name); - if (p == 0) + if (node == 0) { - clib_warning ("WARNING: traced plugin '%s' not in current image", - mp->plugin_name); - return; + rv = VNET_API_ERROR_NO_SUCH_NODE; + goto out; } - rp = vec_elt_at_index (am->msg_ranges, p[0]); - if (rp->first_msg_id != clib_net_to_host_u16 (mp->first_msg_id)) + next_node = vlib_get_node_by_name (vm, mp->next_name); + + if (next_node == 0) { - clib_warning ("WARNING: traced plugin '%s' first message id %d not %d", - mp->plugin_name, clib_net_to_host_u16 (mp->first_msg_id), - rp->first_msg_id); + rv = VNET_API_ERROR_NO_SUCH_NODE2; + goto out; } + else + next_node_index = next_node->index; - if (rp->last_msg_id != clib_net_to_host_u16 (mp->last_msg_id)) + p = hash_get (node->next_slot_by_node, next_node_index); + + if (p == 0) { - clib_warning ("WARNING: traced plugin '%s' last message id %d not %d", - mp->plugin_name, clib_net_to_host_u16 (mp->last_msg_id), - rp->last_msg_id); + rv = VNET_API_ERROR_NO_SUCH_ENTRY; + goto out; } -} + else + next_index = p[0]; -#define foreach_rpc_api_msg \ -_(RPC_CALL,rpc_call) \ -_(RPC_CALL_REPLY,rpc_call_reply) +out: + REPLY_MACRO2 (VL_API_GET_NEXT_INDEX_REPLY, + ({ rmp->next_index = htonl (next_index); })); +} -#define foreach_plugin_trace_msg \ -_(TRACE_PLUGIN_MSG_IDS,trace_plugin_msg_ids) +static void +vl_api_get_f64_endian_value_t_handler (vl_api_get_f64_endian_value_t *mp) +{ + int rv = 0; + f64 one = 1.0; + vl_api_get_f64_endian_value_reply_t *rmp; + if (1.0 != clib_net_to_host_f64 (mp->f64_one)) + rv = VNET_API_ERROR_API_ENDIAN_FAILED; + + REPLY_MACRO2 (VL_API_GET_F64_ENDIAN_VALUE_REPLY, + ({ rmp->f64_one_result = clib_host_to_net_f64 (one); })); +} -/* - * Set the rpc callback at our earliest possible convenience. - * This avoids ordering issues between thread_init() -> start_workers and - * an init function which we could define here. If we ever intend to use - * vlib all by itself, we can't create a link-time dependency on - * an init function here and a typical "call foo_init first" - * guitar lick. - */ +static void +vl_api_get_f64_increment_by_one_t_handler ( + vl_api_get_f64_increment_by_one_t *mp) +{ + int rv = 0; + vl_api_get_f64_increment_by_one_reply_t *rmp; -extern void *rpc_call_main_thread_cb_fn; + REPLY_MACRO2 (VL_API_GET_F64_INCREMENT_BY_ONE_REPLY, ({ + rmp->f64_value = clib_host_to_net_f64 ( + clib_net_to_host_f64 (mp->f64_value) + 1.0); + })); +} +#include <vlibmemory/vlib.api.c> static clib_error_t * -rpc_api_hookup (vlib_main_t * vm) +vlib_apis_hookup (vlib_main_t *vm) { api_main_t *am = vlibapi_get_main (); -#define _(N,n) \ - vl_msg_api_set_handlers(VL_API_##N, #n, \ - vl_api_##n##_t_handler, \ - vl_noop_handler, \ - vl_noop_handler, \ - vl_api_##n##_t_print, \ - sizeof(vl_api_##n##_t), 0 /* do not trace */); - foreach_rpc_api_msg; -#undef _ - -#define _(N,n) \ - vl_msg_api_set_handlers(VL_API_##N, #n, \ - vl_api_##n##_t_handler, \ - vl_noop_handler, \ - vl_noop_handler, \ - vl_api_##n##_t_print, \ - sizeof(vl_api_##n##_t), 1 /* do trace */); - foreach_plugin_trace_msg; -#undef _ - - /* No reason to halt the parade to create a trace record... */ - am->is_mp_safe[VL_API_TRACE_PLUGIN_MSG_IDS] = 1; - rpc_call_main_thread_cb_fn = vl_api_rpc_call_main_thread; + + /* + * Set up the (msg_name, crc, message-id) table + */ + msg_id_base = setup_message_id_table (); + + am->is_mp_safe[VL_API_GET_NODE_GRAPH] = 1; + return 0; } -VLIB_API_INIT_FUNCTION (rpc_api_hookup); +VLIB_API_INIT_FUNCTION (vlib_apis_hookup); /* * fd.io coding-style-patch-verification: ON diff --git a/src/vlibmemory/vlibapi_test.c b/src/vlibmemory/vlibapi_test.c new file mode 100644 index 00000000000..820096ab80d --- /dev/null +++ b/src/vlibmemory/vlibapi_test.c @@ -0,0 +1,470 @@ +/* + *------------------------------------------------------------------ + * Copyright (c) 2021 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include <vat/vat.h> +#include <vlibapi/api.h> +#include <vlibmemory/api.h> +#include <vppinfra/error.h> + +#include <vpp/api/types.h> +#include <vnet/mpls/packet.h> +#include <vnet/ip/ip_types_api.h> + +typedef struct +{ + u16 msg_id_base; + vat_main_t *vat_main; +} vlib_test_main_t; +vlib_test_main_t vlib_test_main; + +#define __plugin_msg_base vlib_test_main.msg_id_base +#include <vlibapi/vat_helper_macros.h> + +/* Declare message IDs */ +#include <vlibmemory/vlib.api_enum.h> +#include <vlibmemory/vlib.api_types.h> + +static void +vl_api_cli_reply_t_handler (vl_api_cli_reply_t *mp) +{ + vat_main_t *vam = &vat_main; + i32 retval = ntohl (mp->retval); + + vam->retval = retval; + vam->shmem_result = uword_to_pointer (mp->reply_in_shmem, u8 *); + vam->result_ready = 1; +} + +static void +vl_api_cli_inband_reply_t_handler (vl_api_cli_inband_reply_t *mp) +{ + vat_main_t *vam = &vat_main; + i32 retval = ntohl (mp->retval); + + vec_reset_length (vam->cmd_reply); + + vam->retval = retval; + if (retval == 0) + vam->cmd_reply = vl_api_from_api_to_new_vec (mp, &mp->reply); + vam->result_ready = 1; +} + +static void +vl_api_get_node_index_reply_t_handler (vl_api_get_node_index_reply_t *mp) +{ + vat_main_t *vam = &vat_main; + i32 retval = ntohl (mp->retval); + if (vam->async_mode) + { + vam->async_errors += (retval < 0); + } + else + { + vam->retval = retval; + if (retval == 0) + errmsg ("node index %d", ntohl (mp->node_index)); + vam->result_ready = 1; + } +} + +static void +vl_api_get_next_index_reply_t_handler (vl_api_get_next_index_reply_t *mp) +{ + vat_main_t *vam = &vat_main; + i32 retval = ntohl (mp->retval); + if (vam->async_mode) + { + vam->async_errors += (retval < 0); + } + else + { + vam->retval = retval; + if (retval == 0) + errmsg ("next node index %d", ntohl (mp->next_index)); + vam->result_ready = 1; + } +} + +static void +vl_api_add_node_next_reply_t_handler (vl_api_add_node_next_reply_t *mp) +{ + vat_main_t *vam = &vat_main; + i32 retval = ntohl (mp->retval); + if (vam->async_mode) + { + vam->async_errors += (retval < 0); + } + else + { + vam->retval = retval; + if (retval == 0) + errmsg ("next index %d", ntohl (mp->next_index)); + vam->result_ready = 1; + } +} + +static void +vl_api_get_f64_endian_value_reply_t_handler ( + vl_api_get_f64_endian_value_reply_t *mp) +{ + // not yet implemented +} + +static void +vl_api_get_f64_increment_by_one_reply_t_handler ( + vl_api_get_f64_increment_by_one_reply_t *mp) +{ + // not yet implemented +} + +static int +api_get_f64_endian_value (vat_main_t *vam) +{ + // not yet implemented + return -1; +} + +static int +api_get_f64_increment_by_one (vat_main_t *vam) +{ + // not yet implemented + return -1; +} + +/* + * Pass CLI buffers directly in the CLI_INBAND API message, + * instead of an additional shared memory area. + */ +static int +exec_inband (vat_main_t *vam) +{ + vl_api_cli_inband_t *mp; + unformat_input_t *i = vam->input; + int ret; + + if (vec_len (i->buffer) == 0) + return -1; + + if (vam->exec_mode == 0 && unformat (i, "mode")) + { + vam->exec_mode = 1; + return 0; + } + if (vam->exec_mode == 1 && (unformat (i, "exit") || unformat (i, "quit"))) + { + vam->exec_mode = 0; + return 0; + } + + /* + * In order for the CLI command to work, it + * must be a vector ending in \n, not a C-string ending + * in \n\0. + */ + M2 (CLI_INBAND, mp, vec_len (vam->input->buffer)); + vl_api_vec_to_api_string (vam->input->buffer, &mp->cmd); + + S (mp); + W (ret); + /* json responses may or may not include a useful reply... */ + if (vec_len (vam->cmd_reply)) + print (vam->ofp, "%v", (char *) (vam->cmd_reply)); + return ret; +} +static int +api_cli_inband (vat_main_t *vam) +{ + return exec_inband (vam); +} + +int +exec (vat_main_t *vam) +{ + return exec_inband (vam); +} + +static int +api_cli (vat_main_t *vam) +{ + return exec (vam); +} + +static int +api_get_node_index (vat_main_t *vam) +{ + unformat_input_t *i = vam->input; + vl_api_get_node_index_t *mp; + u8 *name = 0; + int ret; + + while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT) + { + if (unformat (i, "node %s", &name)) + ; + else + break; + } + if (name == 0) + { + errmsg ("node name required"); + return -99; + } + if (vec_len (name) >= ARRAY_LEN (mp->node_name)) + { + errmsg ("node name too long, max %d", ARRAY_LEN (mp->node_name)); + return -99; + } + + M (GET_NODE_INDEX, mp); + clib_memcpy (mp->node_name, name, vec_len (name)); + vec_free (name); + + S (mp); + W (ret); + return ret; +} + +static int +api_get_next_index (vat_main_t *vam) +{ + unformat_input_t *i = vam->input; + vl_api_get_next_index_t *mp; + u8 *node_name = 0, *next_node_name = 0; + int ret; + + while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT) + { + if (unformat (i, "node-name %s", &node_name)) + ; + else if (unformat (i, "next-node-name %s", &next_node_name)) + break; + } + + if (node_name == 0) + { + errmsg ("node name required"); + return -99; + } + if (vec_len (node_name) >= ARRAY_LEN (mp->node_name)) + { + errmsg ("node name too long, max %d", ARRAY_LEN (mp->node_name)); + return -99; + } + + if (next_node_name == 0) + { + errmsg ("next node name required"); + return -99; + } + if (vec_len (next_node_name) >= ARRAY_LEN (mp->next_name)) + { + errmsg ("next node name too long, max %d", ARRAY_LEN (mp->next_name)); + return -99; + } + + M (GET_NEXT_INDEX, mp); + clib_memcpy (mp->node_name, node_name, vec_len (node_name)); + clib_memcpy (mp->next_name, next_node_name, vec_len (next_node_name)); + vec_free (node_name); + vec_free (next_node_name); + + S (mp); + W (ret); + return ret; +} + +static int +api_add_node_next (vat_main_t *vam) +{ + unformat_input_t *i = vam->input; + vl_api_add_node_next_t *mp; + u8 *name = 0; + u8 *next = 0; + int ret; + + while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT) + { + if (unformat (i, "node %s", &name)) + ; + else if (unformat (i, "next %s", &next)) + ; + else + break; + } + if (name == 0) + { + errmsg ("node name required"); + return -99; + } + if (vec_len (name) >= ARRAY_LEN (mp->node_name)) + { + errmsg ("node name too long, max %d", ARRAY_LEN (mp->node_name)); + return -99; + } + if (next == 0) + { + errmsg ("next node required"); + return -99; + } + if (vec_len (next) >= ARRAY_LEN (mp->next_name)) + { + errmsg ("next name too long, max %d", ARRAY_LEN (mp->next_name)); + return -99; + } + + M (ADD_NODE_NEXT, mp); + clib_memcpy (mp->node_name, name, vec_len (name)); + clib_memcpy (mp->next_name, next, vec_len (next)); + vec_free (name); + vec_free (next); + + S (mp); + W (ret); + return ret; +} + +static void +vl_api_show_threads_reply_t_handler (vl_api_show_threads_reply_t *mp) +{ + vat_main_t *vam = &vat_main; + i32 retval = ntohl (mp->retval); + int i, count = 0; + + if (retval >= 0) + count = ntohl (mp->count); + + for (i = 0; i < count; i++) + print (vam->ofp, "\n%-2d %-11s %-11s %-5d %-6d %-4d %-6d", + ntohl (mp->thread_data[i].id), mp->thread_data[i].name, + mp->thread_data[i].type, ntohl (mp->thread_data[i].pid), + ntohl (mp->thread_data[i].cpu_id), ntohl (mp->thread_data[i].core), + ntohl (mp->thread_data[i].cpu_socket)); + + vam->retval = retval; + vam->result_ready = 1; +} + +static int +api_show_threads (vat_main_t *vam) +{ + vl_api_show_threads_t *mp; + int ret; + + print (vam->ofp, "\n%-2s %-11s %-11s %-5s %-6s %-4s %-6s", "ID", "Name", + "Type", "LWP", "cpu_id", "Core", "Socket"); + + M (SHOW_THREADS, mp); + + S (mp); + W (ret); + return ret; +} + +static void +vl_api_get_node_graph_reply_t_handler (vl_api_get_node_graph_reply_t *mp) +{ + vat_main_t *vam = &vat_main; + i32 retval = ntohl (mp->retval); + u8 *pvt_copy, *reply; + void *oldheap; + vlib_node_t *node; + int i; + + if (vam->async_mode) + { + vam->async_errors += (retval < 0); + } + else + { + vam->retval = retval; + vam->result_ready = 1; + } + + /* "Should never happen..." */ + if (retval != 0) + return; + + reply = uword_to_pointer (mp->reply_in_shmem, u8 *); + pvt_copy = vec_dup (reply); + + /* Toss the shared-memory original... */ + oldheap = vl_msg_push_heap (); + + vec_free (reply); + + vl_msg_pop_heap (oldheap); + + if (vam->graph_nodes) + { + hash_free (vam->graph_node_index_by_name); + + for (i = 0; i < vec_len (vam->graph_nodes[0]); i++) + { + node = vam->graph_nodes[0][i]; + vec_free (node->name); + vec_free (node->next_nodes); + vec_free (node); + } + vec_free (vam->graph_nodes[0]); + vec_free (vam->graph_nodes); + } + + vam->graph_node_index_by_name = hash_create_string (0, sizeof (uword)); + vam->graph_nodes = vlib_node_unserialize (pvt_copy); + vec_free (pvt_copy); + + for (i = 0; i < vec_len (vam->graph_nodes[0]); i++) + { + node = vam->graph_nodes[0][i]; + hash_set_mem (vam->graph_node_index_by_name, node->name, i); + } +} + +static int +api_get_node_graph (vat_main_t *vam) +{ + vl_api_get_node_graph_t *mp; + int ret; + + M (GET_NODE_GRAPH, mp); + + /* send it... */ + S (mp); + /* Wait for the reply */ + W (ret); + return ret; +} + +#define VL_API_LOCAL_SETUP_MESSAGE_ID_TABLE local_setup_message_id_table +static void +local_setup_message_id_table (vat_main_t *vam) +{ + /* Add exec as an alias for cli_inband */ + hash_set_mem (vam->function_by_name, "exec", api_cli_inband); + hash_set_mem (vam->help_by_name, "exec", + "usage: exec <vpe-debug-CLI-command>"); +} + +#include <vlibmemory/vlib.api_test.c> + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ |