diff options
author | Florin Coras <fcoras@cisco.com> | 2018-11-26 17:01:36 -0800 |
---|---|---|
committer | Damjan Marion <dmarion@me.com> | 2018-11-29 11:27:18 +0000 |
commit | 47c40e2d944c9a29677d0542103207ba8372b66a (patch) | |
tree | e43cf5361213d119df53fb16c15cf0dca550f2d4 /src/vcl/vcl_private.c | |
parent | 743ee3e12531ec3c6ba2e2add694dde3361b0e03 (diff) |
vcl: basic support for apps that fork
- intercept fork and register a new worker with vpp
- share sessions between parent and forked child
- keep binary api state per worker
Change-Id: Ib177517d661724fa042bd2d98d18e777056352a2
Signed-off-by: Florin Coras <fcoras@cisco.com>
Diffstat (limited to 'src/vcl/vcl_private.c')
-rw-r--r-- | src/vcl/vcl_private.c | 164 |
1 files changed, 151 insertions, 13 deletions
diff --git a/src/vcl/vcl_private.c b/src/vcl/vcl_private.c index d159a49a89c..86dccfe1cc9 100644 --- a/src/vcl/vcl_private.c +++ b/src/vcl/vcl_private.c @@ -220,12 +220,12 @@ vcl_worker_free (vcl_worker_t * wrk) pool_put (vcm->workers, wrk); } -static void -vcl_worker_cleanup (void *arg) +void +vcl_worker_cleanup (void) { vcl_worker_t *wrk = vcl_worker_get_current (); - VDBG (0, "cleaning up worker %u", wrk->wrk_index); + clib_spinlock_lock (&vcm->workers_lock); vcl_send_app_worker_add_del (0 /* is_add */ ); close (wrk->mqs_epfd); hash_free (wrk->session_index_by_vpp_handles); @@ -235,6 +235,14 @@ vcl_worker_cleanup (void *arg) vec_free (wrk->mq_msg_vector); vcl_set_worker_index (~0); vcl_worker_free (wrk); + clib_spinlock_unlock (&vcm->workers_lock); + VDBG (0, "cleaned up worker %u", wrk->wrk_index); +} + +static void +vcl_worker_cleanup_cb (void *arg) +{ + vcl_worker_cleanup (); } vcl_worker_t * @@ -255,6 +263,8 @@ vcl_worker_alloc_and_init () clib_spinlock_lock (&vcm->workers_lock); wrk = vcl_worker_alloc (); vcl_set_worker_index (wrk->wrk_index); + wrk->thread_id = pthread_self (); + wrk->current_pid = getpid (); wrk->mqs_epfd = -1; if (vcm->cfg.use_mq_eventfd) @@ -263,7 +273,7 @@ vcl_worker_alloc_and_init () if (wrk->mqs_epfd < 0) { clib_unix_warning ("epoll_create() returned"); - return 0; + goto done; } } @@ -276,32 +286,160 @@ vcl_worker_alloc_and_init () vec_reset_length (wrk->mq_msg_vector); vec_validate (wrk->unhandled_evts_vector, 128); vec_reset_length (wrk->unhandled_evts_vector); + clib_spinlock_unlock (&vcm->workers_lock); - if (wrk->wrk_index == 0) - { - clib_spinlock_unlock (&vcm->workers_lock); - return wrk; - } +done: + return wrk; +} + +int +vcl_worker_register_with_vpp (void) +{ + vcl_worker_t *wrk = vcl_worker_get_current (); + + clib_spinlock_lock (&vcm->workers_lock); vcm->app_state = STATE_APP_ADDING_WORKER; vcl_send_app_worker_add_del (1 /* is_add */ ); if (vcl_wait_for_app_state_change (STATE_APP_READY)) { clib_warning ("failed to add worker to vpp"); - return 0; + return -1; } - if (pthread_key_create (&vcl_worker_stop_key, vcl_worker_cleanup)) + if (pthread_key_create (&vcl_worker_stop_key, vcl_worker_cleanup_cb)) clib_warning ("failed to add pthread cleanup function"); if (pthread_setspecific (vcl_worker_stop_key, &wrk->thread_id)) clib_warning ("failed to setup key value"); - wrk->thread_id = pthread_self (); clib_spinlock_unlock (&vcm->workers_lock); VDBG (0, "added worker %u", wrk->wrk_index); + return 0; +} - return wrk; +int +vcl_worker_set_bapi (void) +{ + vcl_worker_t *wrk = vcl_worker_get_current (); + int i; + + /* Find the first worker with the same pid */ + for (i = 0; i < vec_len (vcm->workers); i++) + { + if (i == wrk->wrk_index) + continue; + if (vcm->workers[i].current_pid == wrk->current_pid) + { + wrk->vl_input_queue = vcm->workers[i].vl_input_queue; + wrk->my_client_index = vcm->workers[i].my_client_index; + return 0; + } + } + return -1; +} + +vcl_shared_session_t * +vcl_shared_session_alloc (void) +{ + vcl_shared_session_t *ss; + pool_get (vcm->shared_sessions, ss); + memset (ss, 0, sizeof (*ss)); + ss->ss_index = ss - vcm->shared_sessions; + return ss; +} + +vcl_shared_session_t * +vcl_shared_session_get (u32 ss_index) +{ + if (pool_is_free_index (vcm->shared_sessions, ss_index)) + return 0; + return pool_elt_at_index (vcm->shared_sessions, ss_index); +} + +void +vcl_shared_session_free (vcl_shared_session_t * ss) +{ + pool_put (vcm->shared_sessions, ss); +} + +void +vcl_worker_share_session (vcl_worker_t * parent, vcl_worker_t * wrk, + vcl_session_t * new_s) +{ + vcl_shared_session_t *ss; + vcl_session_t *s; + + s = vcl_session_get (parent, new_s->session_index); + if (s->shared_index == ~0) + { + ss = vcl_shared_session_alloc (); + vec_add1 (ss->workers, parent->wrk_index); + s->shared_index = ss->ss_index; + } + else + { + ss = vcl_shared_session_get (s->shared_index); + } + new_s->shared_index = ss->ss_index; + vec_add1 (ss->workers, wrk->wrk_index); +} + +int +vcl_worker_unshare_session (vcl_worker_t * wrk, vcl_session_t * s) +{ + vcl_shared_session_t *ss; + int i; + + ss = vcl_shared_session_get (s->shared_index); + for (i = 0; i < vec_len (ss->workers); i++) + { + if (ss->workers[i] == wrk->wrk_index) + { + vec_del1 (ss->workers, i); + break; + } + } + + if (vec_len (ss->workers) == 0) + { + vcl_shared_session_free (ss); + return 1; + } + + return 0; +} + +void +vcl_worker_share_sessions (u32 parent_wrk_index) +{ + vcl_worker_t *parent_wrk, *wrk; + vcl_session_t *new_s; + + parent_wrk = vcl_worker_get (parent_wrk_index); + if (!parent_wrk->sessions) + return; + + wrk = vcl_worker_get_current (); + wrk->sessions = pool_dup (parent_wrk->sessions); + wrk->session_index_by_vpp_handles = + hash_dup (parent_wrk->session_index_by_vpp_handles); + + /* *INDENT-OFF* */ + pool_foreach (new_s, wrk->sessions, ({ + vcl_worker_share_session (parent_wrk, wrk, new_s); + })); + /* *INDENT-ON* */ +} + +int +vcl_session_get_refcnt (vcl_session_t * s) +{ + vcl_shared_session_t *ss; + ss = vcl_shared_session_get (s->shared_index); + if (ss) + return vec_len (ss->workers); + return 0; } /* |