summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorOle Troan <ot@cisco.com>2022-01-27 16:25:43 +0100
committerOle Troan <ot@cisco.com>2022-05-02 16:26:24 +0200
commit2ca88ff97884ec9ed20a853b13cee6d86f9c9d0f (patch)
tree65061fc95f2ad79580a367ec4e11bcbcf380dcc9 /src
parent6a2868734c2f96186b6bfb705969a5daa702ebb6 (diff)
vapi: support api clients within vpp process
Add vapi_connect_from_vpp() and vapi_disconnect_from_vpp() calls to allow API clients from within VPP process. Add a new memclnt_create version that gives the user a knob to enable or disable dead client scans (keepalive). Type: feature Signed-off-by: Ole Troan <ot@cisco.com> Change-Id: Id0b7bb89308db3a3aed2d3fcbedf4e1282dcd03f Signed-off-by: Ole Troan <ot@cisco.com>
Diffstat (limited to 'src')
-rw-r--r--src/plugins/unittest/CMakeLists.txt4
-rw-r--r--src/plugins/unittest/api_test.c101
-rw-r--r--src/vlibapi/api_common.h2
-rw-r--r--src/vlibmemory/memclnt.api16
-rw-r--r--src/vlibmemory/memory_api.c89
-rw-r--r--src/vnet/interface_api.c28
-rw-r--r--src/vpp-api/vapi/vapi.c423
-rw-r--r--src/vpp-api/vapi/vapi.h48
8 files changed, 653 insertions, 58 deletions
diff --git a/src/plugins/unittest/CMakeLists.txt b/src/plugins/unittest/CMakeLists.txt
index 34e47fa24f4..faf55dfec2d 100644
--- a/src/plugins/unittest/CMakeLists.txt
+++ b/src/plugins/unittest/CMakeLists.txt
@@ -15,9 +15,10 @@ set(chacha20_poly1305)
if (OPENSSL_VERSION VERSION_GREATER_EQUAL 1.1.0)
set(chacha20_poly1305 crypto/chacha20_poly1305.c)
endif()
-
+include_directories(${CMAKE_SOURCE_DIR}/vpp-api ${CMAKE_CURRENT_BINARY_DIR}/../../vpp-api)
add_vpp_plugin(unittest
SOURCES
+ api_test.c
api_fuzz_test.c
bier_test.c
bihash_test.c
@@ -60,4 +61,5 @@ add_vpp_plugin(unittest
COMPONENT
vpp-plugin-devtools
+ LINK_LIBRARIES vapiclient
)
diff --git a/src/plugins/unittest/api_test.c b/src/plugins/unittest/api_test.c
new file mode 100644
index 00000000000..4bed50c2969
--- /dev/null
+++ b/src/plugins/unittest/api_test.c
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2022 Cisco Systems, Inc.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/plugin/plugin.h>
+
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vpp/app/version.h>
+#include <stdbool.h>
+#include <vapi/vapi.h>
+
+#include <vapi/memclnt.api.vapi.h>
+#include <vapi/vlib.api.vapi.h>
+#include <vapi/vpe.api.vapi.h>
+
+/*
+ * Example of how to call the VPP binary API from an internal API client.
+ * Using the VAPI C language binding.
+ */
+
+DEFINE_VAPI_MSG_IDS_VPE_API_JSON;
+
+/*
+ * Connect an VPP binary API client to VPP API
+ */
+static vapi_ctx_t
+connect_to_vpp (void)
+{
+ vapi_ctx_t ctx;
+ if (vapi_ctx_alloc (&ctx) != VAPI_OK)
+ {
+ clib_warning ("ctx_alloc failed");
+ return 0;
+ }
+ if (vapi_connect_from_vpp (ctx, "apifromplugin", 64, 32, VAPI_MODE_BLOCKING,
+ true) != VAPI_OK)
+ {
+ clib_warning ("vapi_connect failed");
+ return 0;
+ }
+ return ctx;
+}
+
+/*
+ * Gets called when the show_version_reply message is received
+ */
+vapi_error_e
+show_version_cb (vapi_ctx_t ctx, void *caller_ctx, vapi_error_e rv,
+ bool is_last, vapi_payload_show_version_reply *p)
+{
+ if (rv != VAPI_OK)
+ clib_warning ("Return value: %d", rv);
+ fformat (
+ stdout,
+ "show_version_reply: program: `%s', version: `%s', build directory: "
+ "`%s', build date: `%s'\n",
+ p->program, p->version, p->build_directory, p->build_date);
+ return VAPI_OK;
+}
+
+static void *
+api_show_version_blocking_fn (void *args)
+{
+ vapi_ctx_t ctx;
+
+ if ((ctx = connect_to_vpp ()) == 0)
+ return clib_error_return (0, "API connection failed");
+
+ int called;
+ vapi_msg_show_version *sv = vapi_alloc_show_version (ctx);
+ vapi_error_e vapi_rv = vapi_show_version (ctx, sv, show_version_cb, &called);
+ if (vapi_rv != VAPI_OK)
+ clib_warning ("call failed");
+
+ vapi_disconnect_from_vpp (ctx);
+ vapi_ctx_free (ctx);
+
+ return 0;
+}
+
+static clib_error_t *
+test_api_test_command_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
+{
+ /* Run call in a pthread */
+ pthread_t thread;
+ int rv = pthread_create (&thread, NULL, api_show_version_blocking_fn, 0);
+ if (rv)
+ {
+ return clib_error_return (0, "API call failed");
+ }
+ return 0;
+}
+
+VLIB_CLI_COMMAND (test_api_command, static) = {
+ .path = "test api internal",
+ .short_help = "test internal api client",
+ .function = test_api_test_command_fn,
+};
diff --git a/src/vlibapi/api_common.h b/src/vlibapi/api_common.h
index 4c48b003e1e..491ecb8eaef 100644
--- a/src/vlibapi/api_common.h
+++ b/src/vlibapi/api_common.h
@@ -75,6 +75,8 @@ typedef struct vl_api_registration_
/* socket client only */
u32 server_handle; /**< Socket client only: server handle */
u32 server_index; /**< Socket client only: server index */
+
+ bool keepalive; /**< Dead client scan */
} vl_api_registration_t;
#define VL_API_INVALID_FI ((u32)~0)
diff --git a/src/vlibmemory/memclnt.api b/src/vlibmemory/memclnt.api
index bd999b51dd0..0532f1734bf 100644
--- a/src/vlibmemory/memclnt.api
+++ b/src/vlibmemory/memclnt.api
@@ -232,3 +232,19 @@ define control_ping_reply
u32 vpe_pid;
};
+define memclnt_create_v2 {
+ u32 context; /* opaque value to be returned in the reply */
+ i32 ctx_quota; /* requested punt context quota */
+ u64 input_queue; /* client's queue */
+ string name[64]; /* for show, find by name, whatever */
+ u32 api_versions[8]; /* client-server pairs use as desired */
+ bool keepalive[default=true]; /* dead client scan keepalives */
+};
+
+define memclnt_create_v2_reply {
+ u32 context; /* opaque value from the create request */
+ i32 response; /* Non-negative = success */
+ u64 handle; /* handle by which vlib knows this client */
+ u32 index; /* index, used e.g. by API trace replay */
+ u64 message_table; /* serialized message table in shmem */
+};
diff --git a/src/vlibmemory/memory_api.c b/src/vlibmemory/memory_api.c
index a16110f2fa1..b1d250b8a5f 100644
--- a/src/vlibmemory/memory_api.c
+++ b/src/vlibmemory/memory_api.c
@@ -192,6 +192,7 @@ vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t * mp)
regp->name = format (0, "%s", mp->name);
vec_add1 (regp->name, 0);
+ regp->keepalive = true;
if (am->serialized_message_table_in_shmem == 0)
am->serialized_message_table_in_shmem =
@@ -218,6 +219,87 @@ vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t * mp)
}
void
+vl_api_memclnt_create_v2_t_handler (vl_api_memclnt_create_v2_t *mp)
+{
+ vl_api_registration_t **regpp;
+ vl_api_registration_t *regp;
+ vl_api_memclnt_create_v2_reply_t *rp;
+ svm_queue_t *q;
+ int rv = 0;
+ void *oldheap;
+ api_main_t *am = vlibapi_get_main ();
+ u8 *msg_table;
+
+ /*
+ * This is tortured. Maintain a vlib-address-space private
+ * pool of client registrations. We use the shared-memory virtual
+ * address of client structure as a handle, to allow direct
+ * manipulation of context quota vbls from the client library.
+ *
+ * This scheme causes trouble w/ API message trace replay, since
+ * some random VA from clib_mem_alloc() certainly won't
+ * occur in the Linux sim. The (very) few places
+ * that care need to use the pool index.
+ *
+ * Putting the registration object(s) into a pool in shared memory and
+ * using the pool index as a handle seems like a great idea.
+ * Unfortunately, each and every reference to that pool would need
+ * to be protected by a mutex:
+ *
+ * Client VLIB
+ * ------ ----
+ * convert pool index to
+ * pointer.
+ * <deschedule>
+ * expand pool
+ * <deschedule>
+ * kaboom!
+ */
+
+ pool_get (am->vl_clients, regpp);
+
+ oldheap = vl_msg_push_heap ();
+ *regpp = clib_mem_alloc (sizeof (vl_api_registration_t));
+
+ regp = *regpp;
+ clib_memset (regp, 0, sizeof (*regp));
+ regp->registration_type = REGISTRATION_TYPE_SHMEM;
+ regp->vl_api_registration_pool_index = regpp - am->vl_clients;
+ regp->vlib_rp = am->vlib_rp;
+ regp->shmem_hdr = am->shmem_hdr;
+ regp->clib_file_index = am->shmem_hdr->clib_file_index;
+
+ q = regp->vl_input_queue = (svm_queue_t *) (uword) mp->input_queue;
+ VL_MSG_API_SVM_QUEUE_UNPOISON (q);
+
+ regp->name = format (0, "%s", mp->name);
+ vec_add1 (regp->name, 0);
+ regp->keepalive = mp->keepalive;
+
+ if (am->serialized_message_table_in_shmem == 0)
+ am->serialized_message_table_in_shmem =
+ vl_api_serialize_message_table (am, 0);
+
+ if (am->vlib_rp != am->vlib_primary_rp)
+ msg_table = vl_api_serialize_message_table (am, 0);
+ else
+ msg_table = am->serialized_message_table_in_shmem;
+
+ vl_msg_pop_heap (oldheap);
+
+ rp = vl_msg_api_alloc (sizeof (*rp));
+ rp->_vl_msg_id = ntohs (VL_API_MEMCLNT_CREATE_V2_REPLY);
+ rp->handle = (uword) regp;
+ rp->index = vl_msg_api_handle_from_index_and_epoch (
+ regp->vl_api_registration_pool_index, am->shmem_hdr->application_restarts);
+ rp->context = mp->context;
+ rp->response = ntohl (rv);
+ rp->message_table = pointer_to_uword (msg_table);
+
+ vl_msg_api_send_shmem (q, (u8 *) &rp);
+}
+
+void
vl_api_call_reaper_functions (u32 client_index)
{
clib_error_t *error = 0;
@@ -399,6 +481,7 @@ vl_api_memclnt_keepalive_t_handler (vl_api_memclnt_keepalive_t * mp)
#define foreach_vlib_api_msg \
_ (MEMCLNT_CREATE, memclnt_create, 0) \
+ _ (MEMCLNT_CREATE_V2, memclnt_create_v2, 0) \
_ (MEMCLNT_DELETE, memclnt_delete, 0) \
_ (MEMCLNT_KEEPALIVE, memclnt_keepalive, 0) \
_ (MEMCLNT_KEEPALIVE_REPLY, memclnt_keepalive_reply, 0)
@@ -578,8 +661,10 @@ vl_mem_api_dead_client_scan (api_main_t * am, vl_shmem_hdr_t * shm, f64 now)
/* *INDENT-OFF* */
pool_foreach (regpp, am->vl_clients) {
+ if (!(*regpp)->keepalive)
+ continue;
vl_mem_send_client_keepalive_w_reg (am, now, regpp, &dead_indices,
- &confused_indices);
+ &confused_indices);
}
/* *INDENT-ON* */
@@ -944,7 +1029,7 @@ vl_api_client_index_to_input_queue (u32 index)
static clib_error_t *
setup_memclnt_exit (vlib_main_t * vm)
{
- atexit (vl_unmap_shmem);
+ atexit (vl_unmap_shmem_client);
return 0;
}
diff --git a/src/vnet/interface_api.c b/src/vnet/interface_api.c
index 00a1ce3a599..29f98f7851f 100644
--- a/src/vnet/interface_api.c
+++ b/src/vnet/interface_api.c
@@ -1624,23 +1624,27 @@ interface_api_hookup (vlib_main_t * vm)
{
api_main_t *am = vlibapi_get_main ();
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ REPLY_MSG_ID_BASE = setup_message_id_table ();
+
/* Mark these APIs as mp safe */
- am->is_mp_safe[VL_API_SW_INTERFACE_DUMP] = 1;
- am->is_mp_safe[VL_API_SW_INTERFACE_DETAILS] = 1;
- am->is_mp_safe[VL_API_SW_INTERFACE_TAG_ADD_DEL] = 1;
- am->is_mp_safe[VL_API_SW_INTERFACE_SET_INTERFACE_NAME] = 1;
+ am->is_mp_safe[REPLY_MSG_ID_BASE + VL_API_SW_INTERFACE_DUMP] = 1;
+ am->is_mp_safe[REPLY_MSG_ID_BASE + VL_API_SW_INTERFACE_DETAILS] = 1;
+ am->is_mp_safe[REPLY_MSG_ID_BASE + VL_API_SW_INTERFACE_TAG_ADD_DEL] = 1;
+ am->is_mp_safe[REPLY_MSG_ID_BASE + VL_API_SW_INTERFACE_SET_INTERFACE_NAME] =
+ 1;
/* Do not replay VL_API_SW_INTERFACE_DUMP messages */
- am->api_trace_cfg[VL_API_SW_INTERFACE_DUMP].replay_enable = 0;
+ am->api_trace_cfg[REPLY_MSG_ID_BASE + VL_API_SW_INTERFACE_DUMP]
+ .replay_enable = 0;
/* Mark these APIs as autoendian */
- am->is_autoendian[VL_API_SW_INTERFACE_SET_TX_PLACEMENT] = 1;
- am->is_autoendian[VL_API_SW_INTERFACE_TX_PLACEMENT_GET] = 1;
-
- /*
- * Set up the (msg_name, crc, message-id) table
- */
- REPLY_MSG_ID_BASE = setup_message_id_table ();
+ am->is_autoendian[REPLY_MSG_ID_BASE + VL_API_SW_INTERFACE_SET_TX_PLACEMENT] =
+ 1;
+ am->is_autoendian[REPLY_MSG_ID_BASE + VL_API_SW_INTERFACE_TX_PLACEMENT_GET] =
+ 1;
return 0;
}
diff --git a/src/vpp-api/vapi/vapi.c b/src/vpp-api/vapi/vapi.c
index ca46f8d3b84..7808bec8521 100644
--- a/src/vpp-api/vapi/vapi.c
+++ b/src/vpp-api/vapi/vapi.c
@@ -30,10 +30,18 @@
#include <vlib/vlib.h>
#include <vlibapi/api_common.h>
#include <vlibmemory/memory_client.h>
+#include <vlibmemory/memory_api.h>
+#include <vlibmemory/api.h>
#include <vapi/memclnt.api.vapi.h>
#include <vapi/vlib.api.vapi.h>
+#include <vlibmemory/vl_memory_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vlibmemory/vl_memory_api_h.h>
+#undef vl_typedefs
+
/* we need to use control pings for some stuff and because we're forced to put
* the code in headers, we need a way to be able to grab the ids of these
* messages - so declare them here as extern */
@@ -89,6 +97,11 @@ struct vapi_ctx_s
bool connected;
bool handle_keepalives;
pthread_mutex_t requests_mutex;
+
+ svm_queue_t *vl_input_queue;
+ u32 my_client_index;
+ /** client message index hash table */
+ uword *msg_index_by_name_and_crc;
};
u32
@@ -221,7 +234,7 @@ vapi_msg_alloc (vapi_ctx_t ctx, size_t size)
{
return NULL;
}
- void *rv = vl_msg_api_alloc_or_null (size);
+ void *rv = vl_msg_api_alloc_as_if_client_or_null (size);
if (rv)
{
clib_memset (rv, 0, size);
@@ -302,13 +315,174 @@ vapi_is_msg_available (vapi_ctx_t ctx, vapi_msg_id_t id)
return vapi_lookup_vl_msg_id (ctx, id) != UINT16_MAX;
}
+/* Cut and paste to avoid adding dependency to client library */
+__clib_nosanitize_addr static void
+VL_API_VEC_UNPOISON (const void *v)
+{
+ const vec_header_t *vh = &((vec_header_t *) v)[-1];
+ clib_mem_unpoison (vh, sizeof (*vh) + vec_len (v));
+}
+
+static void
+vapi_api_name_and_crc_free (vapi_ctx_t ctx)
+{
+ int i;
+ u8 **keys = 0;
+ hash_pair_t *hp;
+
+ if (!ctx->msg_index_by_name_and_crc)
+ return;
+ hash_foreach_pair (hp, ctx->msg_index_by_name_and_crc,
+ ({ vec_add1 (keys, (u8 *) hp->key); }));
+ for (i = 0; i < vec_len (keys); i++)
+ vec_free (keys[i]);
+ vec_free (keys);
+ hash_free (ctx->msg_index_by_name_and_crc);
+}
+
+static void
+vapi_memclnt_create_v2_reply_t_handler (vapi_ctx_t ctx,
+ vl_api_memclnt_create_v2_reply_t *mp)
+{
+ serialize_main_t _sm, *sm = &_sm;
+ u8 *tblv;
+ u32 nmsgs;
+ int i;
+ u8 *name_and_crc;
+ u32 msg_index;
+
+ ctx->my_client_index = mp->index;
+
+ /* Clean out any previous hash table (unlikely) */
+ vapi_api_name_and_crc_free (ctx);
+
+ ctx->msg_index_by_name_and_crc = hash_create_string (0, sizeof (uword));
+
+ /* Recreate the vnet-side API message handler table */
+ tblv = uword_to_pointer (mp->message_table, u8 *);
+ unserialize_open_data (sm, tblv, vec_len (tblv));
+ unserialize_integer (sm, &nmsgs, sizeof (u32));
+
+ VL_API_VEC_UNPOISON (tblv);
+
+ for (i = 0; i < nmsgs; i++)
+ {
+ msg_index = unserialize_likely_small_unsigned_integer (sm);
+ unserialize_cstring (sm, (char **) &name_and_crc);
+ hash_set_mem (ctx->msg_index_by_name_and_crc, name_and_crc, msg_index);
+ }
+}
+
+static void
+vapi_memclnt_delete_reply_t_handler (vapi_ctx_t ctx,
+ vl_api_memclnt_delete_reply_t *mp)
+{
+ void *oldheap;
+ oldheap = vl_msg_push_heap ();
+ svm_queue_free (ctx->vl_input_queue);
+ vl_msg_pop_heap (oldheap);
+
+ ctx->my_client_index = ~0;
+ ctx->vl_input_queue = 0;
+}
+
+int
+vapi_client_connect (vapi_ctx_t ctx, const char *name, int ctx_quota,
+ int input_queue_size, bool keepalive)
+{
+ vl_api_memclnt_create_v2_t *mp;
+ vl_api_memclnt_create_v2_reply_t *rp;
+ svm_queue_t *vl_input_queue;
+ vl_shmem_hdr_t *shmem_hdr;
+ int rv = 0;
+ void *oldheap;
+ api_main_t *am = vlibapi_get_main ();
+
+ shmem_hdr = am->shmem_hdr;
+
+ if (shmem_hdr == 0 || shmem_hdr->vl_input_queue == 0)
+ {
+ clib_warning ("shmem_hdr / input queue NULL");
+ return -1;
+ }
+
+ clib_mem_unpoison (shmem_hdr, sizeof (*shmem_hdr));
+ VL_MSG_API_SVM_QUEUE_UNPOISON (shmem_hdr->vl_input_queue);
+
+ oldheap = vl_msg_push_heap ();
+ vl_input_queue =
+ svm_queue_alloc_and_init (input_queue_size, sizeof (uword), getpid ());
+ vl_msg_pop_heap (oldheap);
+
+ ctx->my_client_index = ~0;
+ ctx->vl_input_queue = vl_input_queue;
+
+ mp = vl_msg_api_alloc_as_if_client (sizeof (vl_api_memclnt_create_v2_t));
+ clib_memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_MEMCLNT_CREATE_V2);
+ mp->ctx_quota = ctx_quota;
+ mp->input_queue = (uword) vl_input_queue;
+ strncpy ((char *) mp->name, name, sizeof (mp->name) - 1);
+ mp->keepalive = keepalive;
+
+ vl_msg_api_send_shmem (shmem_hdr->vl_input_queue, (u8 *) &mp);
+
+ while (1)
+ {
+ int qstatus;
+ struct timespec ts, tsrem;
+ int i;
+
+ /* Wait up to 10 seconds */
+ for (i = 0; i < 1000; i++)
+ {
+ qstatus =
+ svm_queue_sub (vl_input_queue, (u8 *) &rp, SVM_Q_NOWAIT, 0);
+ if (qstatus == 0)
+ goto read_one_msg;
+ ts.tv_sec = 0;
+ ts.tv_nsec = 10000 * 1000; /* 10 ms */
+ while (nanosleep (&ts, &tsrem) < 0)
+ ts = tsrem;
+ }
+ /* Timeout... */
+ return -1;
+
+ read_one_msg:
+ VL_MSG_API_UNPOISON (rp);
+ if (ntohs (rp->_vl_msg_id) != VL_API_MEMCLNT_CREATE_V2_REPLY)
+ {
+ clib_warning ("unexpected reply: id %d", ntohs (rp->_vl_msg_id));
+ continue;
+ }
+ rv = clib_net_to_host_u32 (rp->response);
+ vapi_memclnt_create_v2_reply_t_handler (ctx, rp);
+ break;
+ }
+ return (rv);
+}
+
+u32
+vapi_api_get_msg_index (vapi_ctx_t ctx, u8 *name_and_crc)
+{
+ uword *p;
+
+ if (ctx->msg_index_by_name_and_crc)
+ {
+ p = hash_get_mem (ctx->msg_index_by_name_and_crc, name_and_crc);
+ if (p)
+ return p[0];
+ }
+ return ~0;
+}
+
vapi_error_e
-vapi_connect (vapi_ctx_t ctx, const char *name,
- const char *chroot_prefix,
- int max_outstanding_requests,
- int response_queue_size, vapi_mode_e mode,
- bool handle_keepalives)
+vapi_connect (vapi_ctx_t ctx, const char *name, const char *chroot_prefix,
+ int max_outstanding_requests, int response_queue_size,
+ vapi_mode_e mode, bool handle_keepalives)
{
+ int rv;
+
if (response_queue_size <= 0 || max_outstanding_requests <= 0)
{
return VAPI_EINVAL;
@@ -317,6 +491,7 @@ vapi_connect (vapi_ctx_t ctx, const char *name,
{
return VAPI_ENOMEM;
}
+
ctx->requests_size = max_outstanding_requests;
const size_t size = ctx->requests_size * sizeof (*ctx->requests);
void *tmp = realloc (ctx->requests, size);
@@ -328,6 +503,7 @@ vapi_connect (vapi_ctx_t ctx, const char *name,
clib_memset (ctx->requests, 0, size);
/* coverity[MISSING_LOCK] - 177211 requests_mutex is not needed here */
ctx->requests_start = ctx->requests_count = 0;
+
if (chroot_prefix)
{
VAPI_DBG ("set memory root path `%s'", chroot_prefix);
@@ -335,12 +511,13 @@ vapi_connect (vapi_ctx_t ctx, const char *name,
}
static char api_map[] = "/vpe-api";
VAPI_DBG ("client api map `%s'", api_map);
- if ((vl_client_api_map (api_map)) < 0)
+ if ((rv = vl_map_shmem (api_map, 0 /* is_vlib */)) < 0)
{
return VAPI_EMAP_FAIL;
}
VAPI_DBG ("connect client `%s'", name);
- if (vl_client_connect ((char *) name, 0, response_queue_size) < 0)
+ if (vapi_client_connect (ctx, (char *) name, 0, response_queue_size, true) <
+ 0)
{
vl_client_api_unmap ();
return VAPI_ECON_FAIL;
@@ -348,14 +525,15 @@ vapi_connect (vapi_ctx_t ctx, const char *name,
#if VAPI_DEBUG_CONNECT
VAPI_DBG ("start probing messages");
#endif
- int rv;
+
int i;
for (i = 0; i < __vapi_metadata.count; ++i)
{
vapi_message_desc_t *m = __vapi_metadata.msgs[i];
u8 scratch[m->name_with_crc_len + 1];
memcpy (scratch, m->name_with_crc, m->name_with_crc_len + 1);
- u32 id = vl_msg_api_get_msg_index (scratch);
+ u32 id = vapi_api_get_msg_index (ctx, scratch);
+
if (VAPI_INVALID_MSG_ID != id)
{
if (id > UINT16_MAX)
@@ -367,10 +545,9 @@ vapi_connect (vapi_ctx_t ctx, const char *name,
}
if (id > ctx->vl_msg_id_max)
{
- vapi_msg_id_t *tmp = realloc (ctx->vl_msg_id_to_vapi_msg_t,
- sizeof
- (*ctx->vl_msg_id_to_vapi_msg_t) *
- (id + 1));
+ vapi_msg_id_t *tmp =
+ realloc (ctx->vl_msg_id_to_vapi_msg_t,
+ sizeof (*ctx->vl_msg_id_to_vapi_msg_t) * (id + 1));
if (!tmp)
{
rv = VAPI_ENOMEM;
@@ -398,8 +575,8 @@ vapi_connect (vapi_ctx_t ctx, const char *name,
if (!vapi_is_msg_available (ctx, vapi_msg_id_control_ping) ||
!vapi_is_msg_available (ctx, vapi_msg_id_control_ping_reply))
{
- VAPI_ERR
- ("control ping or control ping reply not available, cannot connect");
+ VAPI_ERR (
+ "control ping or control ping reply not available, cannot connect");
rv = VAPI_EINCOMPATIBLE;
goto fail;
}
@@ -420,6 +597,157 @@ fail:
return rv;
}
+/*
+ * API client running in the same process as VPP
+ */
+vapi_error_e
+vapi_connect_from_vpp (vapi_ctx_t ctx, const char *name,
+ int max_outstanding_requests, int response_queue_size,
+ vapi_mode_e mode, bool handle_keepalives)
+{
+ int rv;
+
+ if (response_queue_size <= 0 || max_outstanding_requests <= 0)
+ {
+ return VAPI_EINVAL;
+ }
+
+ ctx->requests_size = max_outstanding_requests;
+ const size_t size = ctx->requests_size * sizeof (*ctx->requests);
+ void *tmp = realloc (ctx->requests, size);
+ if (!tmp)
+ {
+ return VAPI_ENOMEM;
+ }
+ ctx->requests = tmp;
+ clib_memset (ctx->requests, 0, size);
+ /* coverity[MISSING_LOCK] - 177211 requests_mutex is not needed here */
+ ctx->requests_start = ctx->requests_count = 0;
+
+ VAPI_DBG ("connect client `%s'", name);
+ if (vapi_client_connect (ctx, (char *) name, 0, response_queue_size,
+ handle_keepalives) < 0)
+ {
+ return VAPI_ECON_FAIL;
+ }
+
+ int i;
+ for (i = 0; i < __vapi_metadata.count; ++i)
+ {
+ vapi_message_desc_t *m = __vapi_metadata.msgs[i];
+ u8 scratch[m->name_with_crc_len + 1];
+ memcpy (scratch, m->name_with_crc, m->name_with_crc_len + 1);
+ u32 id = vapi_api_get_msg_index (ctx, scratch);
+ if (VAPI_INVALID_MSG_ID != id)
+ {
+ if (id > UINT16_MAX)
+ {
+ VAPI_ERR ("Returned vl_msg_id `%u' > UINT16MAX `%u'!", id,
+ UINT16_MAX);
+ rv = VAPI_EINVAL;
+ goto fail;
+ }
+ if (id > ctx->vl_msg_id_max)
+ {
+ vapi_msg_id_t *tmp =
+ realloc (ctx->vl_msg_id_to_vapi_msg_t,
+ sizeof (*ctx->vl_msg_id_to_vapi_msg_t) * (id + 1));
+ if (!tmp)
+ {
+ rv = VAPI_ENOMEM;
+ goto fail;
+ }
+ ctx->vl_msg_id_to_vapi_msg_t = tmp;
+ ctx->vl_msg_id_max = id;
+ }
+ ctx->vl_msg_id_to_vapi_msg_t[id] = m->id;
+ ctx->vapi_msg_id_t_to_vl_msg_id[m->id] = id;
+ }
+ else
+ {
+ ctx->vapi_msg_id_t_to_vl_msg_id[m->id] = UINT16_MAX;
+ VAPI_DBG ("Message `%s' not available", m->name_with_crc);
+ }
+ }
+ if (!vapi_is_msg_available (ctx, vapi_msg_id_control_ping) ||
+ !vapi_is_msg_available (ctx, vapi_msg_id_control_ping_reply))
+ {
+ VAPI_ERR (
+ "control ping or control ping reply not available, cannot connect");
+ rv = VAPI_EINCOMPATIBLE;
+ goto fail;
+ }
+ ctx->mode = mode;
+ ctx->connected = true;
+ if (vapi_is_msg_available (ctx, vapi_msg_id_memclnt_keepalive))
+ {
+ ctx->handle_keepalives = handle_keepalives;
+ }
+ else
+ {
+ ctx->handle_keepalives = false;
+ }
+ return VAPI_OK;
+fail:
+ vl_client_disconnect ();
+ return rv;
+}
+
+vapi_error_e
+vapi_disconnect_from_vpp (vapi_ctx_t ctx)
+{
+ if (!ctx->connected)
+ {
+ return VAPI_EINVAL;
+ }
+ vl_api_memclnt_delete_reply_t *rp;
+ svm_queue_t *vl_input_queue;
+ time_t begin;
+ vl_input_queue = ctx->vl_input_queue;
+ vl_client_send_disconnect (0 /* wait for reply */);
+
+ /*
+ * Have to be careful here, in case the client is disconnecting
+ * because e.g. the vlib process died, or is unresponsive.
+ */
+ begin = time (0);
+ vapi_error_e rv = VAPI_OK;
+ while (1)
+ {
+ time_t now;
+
+ now = time (0);
+
+ if (now >= (begin + 2))
+ {
+ clib_warning ("peer unresponsive, give up");
+ ctx->my_client_index = ~0;
+ rv = VAPI_ENORESP;
+ goto fail;
+ }
+ if (svm_queue_sub (vl_input_queue, (u8 *) &rp, SVM_Q_NOWAIT, 0) < 0)
+ continue;
+
+ VL_MSG_API_UNPOISON (rp);
+
+ /* drain the queue */
+ if (ntohs (rp->_vl_msg_id) != VL_API_MEMCLNT_DELETE_REPLY)
+ {
+ clib_warning ("queue drain: %d", ntohs (rp->_vl_msg_id));
+ vl_msg_api_free (rp);
+ continue;
+ }
+ vapi_memclnt_delete_reply_t_handler (
+ ctx, (void *) rp /*, ntohl (msgbuf->data_len)*/);
+ break;
+ }
+fail:
+ vapi_api_name_and_crc_free (ctx);
+
+ ctx->connected = false;
+ return rv;
+}
+
vapi_error_e
vapi_disconnect (vapi_ctx_t ctx)
{
@@ -427,13 +755,57 @@ vapi_disconnect (vapi_ctx_t ctx)
{
return VAPI_EINVAL;
}
- vl_client_disconnect ();
+
+ vl_api_memclnt_delete_reply_t *rp;
+ svm_queue_t *vl_input_queue;
+ time_t begin;
+ vl_input_queue = ctx->vl_input_queue;
+ vl_client_send_disconnect (0 /* wait for reply */);
+
+ /*
+ * Have to be careful here, in case the client is disconnecting
+ * because e.g. the vlib process died, or is unresponsive.
+ */
+ begin = time (0);
+ vapi_error_e rv = VAPI_OK;
+ while (1)
+ {
+ time_t now;
+
+ now = time (0);
+
+ if (now >= (begin + 2))
+ {
+ clib_warning ("peer unresponsive, give up");
+ ctx->my_client_index = ~0;
+ rv = VAPI_ENORESP;
+ goto fail;
+ }
+ if (svm_queue_sub (vl_input_queue, (u8 *) &rp, SVM_Q_NOWAIT, 0) < 0)
+ continue;
+
+ VL_MSG_API_UNPOISON (rp);
+
+ /* drain the queue */
+ if (ntohs (rp->_vl_msg_id) != VL_API_MEMCLNT_DELETE_REPLY)
+ {
+ clib_warning ("queue drain: %d", ntohs (rp->_vl_msg_id));
+ vl_msg_api_free (rp);
+ continue;
+ }
+ vapi_memclnt_delete_reply_t_handler (
+ ctx, (void *) rp /*, ntohl (msgbuf->data_len)*/);
+ break;
+ }
+fail:
+ vapi_api_name_and_crc_free (ctx);
+
vl_client_api_unmap ();
#if VAPI_DEBUG_ALLOC
vapi_to_be_freed_validate ();
#endif
ctx->connected = false;
- return VAPI_OK;
+ return rv;
}
vapi_error_e
@@ -541,15 +913,10 @@ vapi_recv (vapi_ctx_t ctx, void **msg, size_t * msg_size,
return VAPI_EINVAL;
}
vapi_error_e rv = VAPI_OK;
- api_main_t *am = vlibapi_get_main ();
uword data;
- if (am->our_pid == 0)
- {
- return VAPI_EINVAL;
- }
+ svm_queue_t *q = ctx->vl_input_queue;
- svm_queue_t *q = am->vl_input_queue;
again:
VAPI_DBG ("doing shm queue sub");
@@ -610,7 +977,6 @@ again:
vapi_msg_memclnt_keepalive_reply_hton (reply);
while (VAPI_EAGAIN == vapi_send (ctx, reply));
vapi_msg_free (ctx, *msg);
- VAPI_DBG ("autohandled memclnt_keepalive");
goto again;
}
}
@@ -689,9 +1055,8 @@ vapi_dispatch_response (vapi_ctx_t ctx, vapi_msg_id_t id,
}
if (payload_offset != -1)
{
- rv =
- ctx->requests[tmp].callback (ctx, ctx->requests[tmp].callback_ctx,
- VAPI_OK, is_last, payload);
+ rv = ctx->requests[tmp].callback (
+ ctx, ctx->requests[tmp].callback_ctx, VAPI_OK, is_last, payload);
}
else
{
@@ -870,7 +1235,7 @@ vapi_lookup_vl_msg_id (vapi_ctx_t ctx, vapi_msg_id_t id)
int
vapi_get_client_index (vapi_ctx_t ctx)
{
- return vlibapi_get_main ()->my_client_index;
+ return ctx->my_client_index;
}
bool
diff --git a/src/vpp-api/vapi/vapi.h b/src/vpp-api/vapi/vapi.h
index 08d016b0dd7..46666293e4b 100644
--- a/src/vpp-api/vapi/vapi.h
+++ b/src/vpp-api/vapi/vapi.h
@@ -44,7 +44,7 @@ extern "C"
* process). It's not recommended to mix the higher and lower level APIs. Due
* to version issues, the higher-level APIs are not part of the shared library.
*/
- typedef struct vapi_ctx_s *vapi_ctx_t;
+typedef struct vapi_ctx_s *vapi_ctx_t;
/**
* @brief allocate vapi message of given size
@@ -56,7 +56,7 @@ extern "C"
*
* @return pointer to message or NULL if out of memory
*/
- void *vapi_msg_alloc (vapi_ctx_t ctx, size_t size);
+void *vapi_msg_alloc (vapi_ctx_t ctx, size_t size);
/**
* @brief free a vapi message
@@ -66,7 +66,7 @@ extern "C"
* @param ctx opaque vapi context
* @param msg message to be freed
*/
- void vapi_msg_free (vapi_ctx_t ctx, void *msg);
+void vapi_msg_free (vapi_ctx_t ctx, void *msg);
/**
* @brief allocate vapi context
@@ -75,18 +75,18 @@ extern "C"
*
* @return VAPI_OK on success, other error code on error
*/
- vapi_error_e vapi_ctx_alloc (vapi_ctx_t * result);
+vapi_error_e vapi_ctx_alloc (vapi_ctx_t *result);
/**
* @brief free vapi context
*/
- void vapi_ctx_free (vapi_ctx_t ctx);
+void vapi_ctx_free (vapi_ctx_t ctx);
/**
* @brief check if message identified by it's message id is known by the vpp to
* which the connection is open
*/
- bool vapi_is_msg_available (vapi_ctx_t ctx, vapi_msg_id_t type);
+bool vapi_is_msg_available (vapi_ctx_t ctx, vapi_msg_id_t type);
/**
* @brief connect to vpp
@@ -101,11 +101,30 @@ extern "C"
*
* @return VAPI_OK on success, other error code on error
*/
- vapi_error_e vapi_connect (vapi_ctx_t ctx, const char *name,
- const char *chroot_prefix,
- int max_outstanding_requests,
- int response_queue_size, vapi_mode_e mode,
- bool handle_keepalives);
+vapi_error_e vapi_connect (vapi_ctx_t ctx, const char *name,
+ const char *chroot_prefix,
+ int max_outstanding_requests,
+ int response_queue_size, vapi_mode_e mode,
+ bool handle_keepalives);
+
+/**
+ * @brief connect to vpp from a client in same process
+ * @remark This MUST be called from a separate thread. If called
+ * from the main thread, it will deadlock.
+ *
+ * @param ctx opaque vapi context, must be allocated using vapi_ctx_alloc first
+ * @param name application name
+ * @param max_outstanding_requests max number of outstanding requests queued
+ * @param response_queue_size size of the response queue
+ * @param mode mode of operation - blocking or nonblocking
+ * @param handle_keepalives - if true, automatically handle memclnt_keepalive
+ *
+ * @return VAPI_OK on success, other error code on error
+ */
+vapi_error_e vapi_connect_from_vpp (vapi_ctx_t ctx, const char *name,
+ int max_outstanding_requests,
+ int response_queue_size, vapi_mode_e mode,
+ bool handle_keepalives);
/**
* @brief disconnect from vpp
@@ -114,7 +133,8 @@ extern "C"
*
* @return VAPI_OK on success, other error code on error
*/
- vapi_error_e vapi_disconnect (vapi_ctx_t ctx);
+vapi_error_e vapi_disconnect (vapi_ctx_t ctx);
+vapi_error_e vapi_disconnect_from_vpp (vapi_ctx_t ctx);
/**
* @brief get event file descriptor
@@ -127,7 +147,7 @@ extern "C"
*
* @return VAPI_OK on success, other error code on error
*/
- vapi_error_e vapi_get_fd (vapi_ctx_t ctx, int *fd);
+vapi_error_e vapi_get_fd (vapi_ctx_t ctx, int *fd);
/**
* @brief low-level api for sending messages to vpp
@@ -140,7 +160,7 @@ extern "C"
*
* @return VAPI_OK on success, other error code on error
*/
- vapi_error_e vapi_send (vapi_ctx_t ctx, void *msg);
+vapi_error_e vapi_send (vapi_ctx_t ctx, void *msg);
/**
* @brief low-level api for atomically sending two messages to vpp - either