summaryrefslogtreecommitdiffstats
path: root/src/vlibmemory/memory_vlib.c
diff options
context:
space:
mode:
authorDave Barach <dbarach@cisco.com>2017-12-15 12:22:57 -0500
committerJohn Lo <loj@cisco.com>2017-12-15 22:32:11 +0000
commit2877eee189993dbd1b9a5c3d22499930a4768786 (patch)
tree7c106163b2595a06441c329a2781537929296bc9 /src/vlibmemory/memory_vlib.c
parent891f0a1caa795ed7799f0c6faa1cb91b4669d4da (diff)
VPP-1102: fix dangling references in RPC handling
Queue RPC calls and send them from the main dispatch loop. As things stood, if the vpp main input queue filled, worker threads could enter a barrier-sync spin-wait in the middle of processing a frame. If thread 0 decided to recreate worker thread data structures, the worker thread(s) could easily crash. Legislate the problem out of existence by enqueueing RPC messages only from the main dispatch loop. At that point, doing a barrier-sync wait is perfectly OK. Change-Id: I18da3e44bb1f29a63fe5f30cf11de732ecfd5bf7 Signed-off-by: Dave Barach <dave@barachs.net>
Diffstat (limited to 'src/vlibmemory/memory_vlib.c')
-rw-r--r--src/vlibmemory/memory_vlib.c66
1 files changed, 39 insertions, 27 deletions
diff --git a/src/vlibmemory/memory_vlib.c b/src/vlibmemory/memory_vlib.c
index 7ae7867f05e..1c099ed0a3e 100644
--- a/src/vlibmemory/memory_vlib.c
+++ b/src/vlibmemory/memory_vlib.c
@@ -1839,19 +1839,51 @@ vl_api_rpc_call_reply_t_handler (vl_api_rpc_call_reply_t * mp)
clib_warning ("unimplemented");
}
+void
+vl_api_send_pending_rpc_requests (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+ vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
+ unix_shared_memory_queue_t *q;
+ int i;
+
+ /*
+ * Use the "normal" control-plane mechanism for the main thread.
+ * Well, almost. if the main input queue is full, we cannot
+ * block. Otherwise, we can expect a barrier sync timeout.
+ */
+ q = shmem_hdr->vl_input_queue;
+
+ for (i = 0; i < vec_len (vm->pending_rpc_requests); i++)
+ {
+ while (pthread_mutex_trylock (&q->mutex))
+ vlib_worker_thread_barrier_check ();
+
+ while (PREDICT_FALSE (unix_shared_memory_queue_is_full (q)))
+ {
+ pthread_mutex_unlock (&q->mutex);
+ vlib_worker_thread_barrier_check ();
+ while (pthread_mutex_trylock (&q->mutex))
+ vlib_worker_thread_barrier_check ();
+ }
+
+ vl_msg_api_send_shmem_nolock (q, (u8 *) (vm->pending_rpc_requests + i));
+
+ pthread_mutex_unlock (&q->mutex);
+ }
+ _vec_len (vm->pending_rpc_requests) = 0;
+}
+
always_inline void
vl_api_rpc_call_main_thread_inline (void *fp, u8 * data, u32 data_length,
u8 force_rpc)
{
vl_api_rpc_call_t *mp;
- api_main_t *am = &api_main;
- vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
- unix_shared_memory_queue_t *q;
+ vlib_main_t *vm = vlib_get_main ();
- /* Main thread: call the function directly */
+ /* Main thread and not a forced RPC: call the function directly */
if ((force_rpc == 0) && (vlib_get_thread_index () == 0))
{
- vlib_main_t *vm = vlib_get_main ();
void (*call_fp) (void *);
vlib_worker_thread_barrier_sync (vm);
@@ -1863,7 +1895,7 @@ vl_api_rpc_call_main_thread_inline (void *fp, u8 * data, u32 data_length,
return;
}
- /* Any other thread, actually do an RPC call... */
+ /* Otherwise, actually do an RPC */
mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + data_length);
memset (mp, 0, sizeof (*mp));
@@ -1872,27 +1904,7 @@ vl_api_rpc_call_main_thread_inline (void *fp, u8 * data, u32 data_length,
mp->function = pointer_to_uword (fp);
mp->need_barrier_sync = 1;
- /*
- * Use the "normal" control-plane mechanism for the main thread.
- * Well, almost. if the main input queue is full, we cannot
- * block. Otherwise, we can expect a barrier sync timeout.
- */
- q = shmem_hdr->vl_input_queue;
-
- while (pthread_mutex_trylock (&q->mutex))
- vlib_worker_thread_barrier_check ();
-
- while (PREDICT_FALSE (unix_shared_memory_queue_is_full (q)))
- {
- pthread_mutex_unlock (&q->mutex);
- vlib_worker_thread_barrier_check ();
- while (pthread_mutex_trylock (&q->mutex))
- vlib_worker_thread_barrier_check ();
- }
-
- vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
-
- pthread_mutex_unlock (&q->mutex);
+ vec_add1 (vm->pending_rpc_requests, (uword) mp);
}
/*