diff options
author | Dave Barach <dbarach@cisco.com> | 2017-12-15 12:22:57 -0500 |
---|---|---|
committer | John Lo <loj@cisco.com> | 2017-12-15 22:32:11 +0000 |
commit | 2877eee189993dbd1b9a5c3d22499930a4768786 (patch) | |
tree | 7c106163b2595a06441c329a2781537929296bc9 | |
parent | 891f0a1caa795ed7799f0c6faa1cb91b4669d4da (diff) |
VPP-1102: fix dangling references in RPC handling
Queue RPC calls and send them from the main dispatch loop. As things stood,
if the vpp main input queue filled, worker threads could enter a
barrier-sync spin-wait in the middle of processing a frame. If thread
0 decided to recreate worker thread data structures, the worker thread(s)
could easily crash.
Legislate the problem out of existence by enqueueing RPC messages only
from the main dispatch loop. At that point, doing a barrier-sync wait
is perfectly OK.
Change-Id: I18da3e44bb1f29a63fe5f30cf11de732ecfd5bf7
Signed-off-by: Dave Barach <dave@barachs.net>
-rw-r--r-- | src/vlib/main.c | 13 | ||||
-rw-r--r-- | src/vlib/main.h | 3 | ||||
-rw-r--r-- | src/vlib/threads.c | 3 | ||||
-rw-r--r-- | src/vlibmemory/api_common.h | 1 | ||||
-rw-r--r-- | src/vlibmemory/memory_vlib.c | 66 |
5 files changed, 59 insertions, 27 deletions
diff --git a/src/vlib/main.c b/src/vlib/main.c index fd9937fe3e6..d32ca7b8ea2 100644 --- a/src/vlib/main.c +++ b/src/vlib/main.c @@ -1423,6 +1423,13 @@ dispatch_suspended_process (vlib_main_t * vm, return t; } +void vl_api_send_pending_rpc_requests (vlib_main_t *) __attribute__ ((weak)); +void +vl_api_send_pending_rpc_requests (vlib_main_t * vm) +{ +} + + static_always_inline void vlib_main_or_worker_loop (vlib_main_t * vm, int is_main) { @@ -1475,6 +1482,9 @@ vlib_main_or_worker_loop (vlib_main_t * vm, int is_main) { vlib_node_runtime_t *n; + if (PREDICT_FALSE (_vec_len (vm->pending_rpc_requests) > 0)) + vl_api_send_pending_rpc_requests (vm); + if (!is_main) { vlib_worker_thread_barrier_check (); @@ -1742,6 +1752,9 @@ vlib_main (vlib_main_t * volatile vm, unformat_input_t * input) 10e-6 /* timer period 10us */ , ~0 /* max expirations per call */ ); + vec_validate (vm->pending_rpc_requests, 0); + _vec_len (vm->pending_rpc_requests) = 0; + switch (clib_setjmp (&vm->main_loop_exit, VLIB_MAIN_LOOP_EXIT_NONE)) { case VLIB_MAIN_LOOP_EXIT_NONE: diff --git a/src/vlib/main.h b/src/vlib/main.h index 4288d6f0388..28412e823a9 100644 --- a/src/vlib/main.h +++ b/src/vlib/main.h @@ -210,6 +210,9 @@ typedef struct vlib_main_t /* Earliest barrier can be closed again */ f64 barrier_no_close_before; + /* Vector of pending RPC requests */ + uword *pending_rpc_requests; + } vlib_main_t; /* Global main structure. */ diff --git a/src/vlib/threads.c b/src/vlib/threads.c index be8daa64c0a..3edf1ebbc62 100644 --- a/src/vlib/threads.c +++ b/src/vlib/threads.c @@ -830,6 +830,9 @@ start_workers (vlib_main_t * vm) vm_clone->mbuf_alloc_list = 0; vm_clone->init_functions_called = hash_create (0, /* value bytes */ 0); + vm_clone->pending_rpc_requests = 0; + vec_validate (vm_clone->pending_rpc_requests, 0); + _vec_len (vm_clone->pending_rpc_requests) = 0; memset (&vm_clone->random_buffer, 0, sizeof (vm_clone->random_buffer)); diff --git a/src/vlibmemory/api_common.h b/src/vlibmemory/api_common.h index 63a7e5e4188..2080a4bb18e 100644 --- a/src/vlibmemory/api_common.h +++ b/src/vlibmemory/api_common.h @@ -132,6 +132,7 @@ u32 vl_api_memclnt_create_internal (char *, unix_shared_memory_queue_t *); void vl_init_shmem (svm_region_t * vlib_rp, int is_vlib, int is_private_region); void vl_client_install_client_message_handlers (void); +void vl_api_send_pending_rpc_requests (vlib_main_t * vm); /* API messages over sockets */ diff --git a/src/vlibmemory/memory_vlib.c b/src/vlibmemory/memory_vlib.c index 7ae7867f05e..1c099ed0a3e 100644 --- a/src/vlibmemory/memory_vlib.c +++ b/src/vlibmemory/memory_vlib.c @@ -1839,19 +1839,51 @@ vl_api_rpc_call_reply_t_handler (vl_api_rpc_call_reply_t * mp) clib_warning ("unimplemented"); } +void +vl_api_send_pending_rpc_requests (vlib_main_t * vm) +{ + api_main_t *am = &api_main; + vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; + unix_shared_memory_queue_t *q; + int i; + + /* + * Use the "normal" control-plane mechanism for the main thread. + * Well, almost. if the main input queue is full, we cannot + * block. Otherwise, we can expect a barrier sync timeout. + */ + q = shmem_hdr->vl_input_queue; + + for (i = 0; i < vec_len (vm->pending_rpc_requests); i++) + { + while (pthread_mutex_trylock (&q->mutex)) + vlib_worker_thread_barrier_check (); + + while (PREDICT_FALSE (unix_shared_memory_queue_is_full (q))) + { + pthread_mutex_unlock (&q->mutex); + vlib_worker_thread_barrier_check (); + while (pthread_mutex_trylock (&q->mutex)) + vlib_worker_thread_barrier_check (); + } + + vl_msg_api_send_shmem_nolock (q, (u8 *) (vm->pending_rpc_requests + i)); + + pthread_mutex_unlock (&q->mutex); + } + _vec_len (vm->pending_rpc_requests) = 0; +} + always_inline void vl_api_rpc_call_main_thread_inline (void *fp, u8 * data, u32 data_length, u8 force_rpc) { vl_api_rpc_call_t *mp; - api_main_t *am = &api_main; - vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; - unix_shared_memory_queue_t *q; + vlib_main_t *vm = vlib_get_main (); - /* Main thread: call the function directly */ + /* Main thread and not a forced RPC: call the function directly */ if ((force_rpc == 0) && (vlib_get_thread_index () == 0)) { - vlib_main_t *vm = vlib_get_main (); void (*call_fp) (void *); vlib_worker_thread_barrier_sync (vm); @@ -1863,7 +1895,7 @@ vl_api_rpc_call_main_thread_inline (void *fp, u8 * data, u32 data_length, return; } - /* Any other thread, actually do an RPC call... */ + /* Otherwise, actually do an RPC */ mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + data_length); memset (mp, 0, sizeof (*mp)); @@ -1872,27 +1904,7 @@ vl_api_rpc_call_main_thread_inline (void *fp, u8 * data, u32 data_length, mp->function = pointer_to_uword (fp); mp->need_barrier_sync = 1; - /* - * Use the "normal" control-plane mechanism for the main thread. - * Well, almost. if the main input queue is full, we cannot - * block. Otherwise, we can expect a barrier sync timeout. - */ - q = shmem_hdr->vl_input_queue; - - while (pthread_mutex_trylock (&q->mutex)) - vlib_worker_thread_barrier_check (); - - while (PREDICT_FALSE (unix_shared_memory_queue_is_full (q))) - { - pthread_mutex_unlock (&q->mutex); - vlib_worker_thread_barrier_check (); - while (pthread_mutex_trylock (&q->mutex)) - vlib_worker_thread_barrier_check (); - } - - vl_msg_api_send_shmem_nolock (q, (u8 *) & mp); - - pthread_mutex_unlock (&q->mutex); + vec_add1 (vm->pending_rpc_requests, (uword) mp); } /* |