summaryrefslogtreecommitdiffstats
path: root/src/vcl/vppcom.c
diff options
context:
space:
mode:
authorFlorin Coras <fcoras@cisco.com>2019-01-02 19:31:22 -0800
committerDave Barach <openvpp@barachs.net>2019-01-05 21:53:16 +0000
commit30e79c2e388a98160a3660f4f03103890c9b1b7c (patch)
tree0b108f43d95e28304924cc6e1d43900b3046c8de /src/vcl/vppcom.c
parent3c1cf2c1716f436e5da4a106dd2b9a3df5d3a4a3 (diff)
vcl/session: add api for changing session app worker
In case of multi process apps, after forking, the parent may decide to close part or all of the sessions it shares with the child. Because the sessions have fifos allocated in the parent's segment manager, they must be moved to the child's segment manager. Change-Id: I85b4c8c8545005724023ee14043647719cef61dd Signed-off-by: Florin Coras <fcoras@cisco.com>
Diffstat (limited to 'src/vcl/vppcom.c')
-rw-r--r--src/vcl/vppcom.c206
1 files changed, 171 insertions, 35 deletions
diff --git a/src/vcl/vppcom.c b/src/vcl/vppcom.c
index 70afdce7832..6a1bf1cfcc7 100644
--- a/src/vcl/vppcom.c
+++ b/src/vcl/vppcom.c
@@ -44,6 +44,22 @@ vcl_wait_for_segment (u64 segment_handle)
return 1;
}
+static inline int
+vcl_mq_dequeue_batch (vcl_worker_t * wrk, svm_msg_q_t * mq)
+{
+ svm_msg_q_msg_t *msg;
+ u32 n_msgs;
+ int i;
+
+ n_msgs = svm_msg_q_size (mq);
+ for (i = 0; i < n_msgs; i++)
+ {
+ vec_add2 (wrk->mq_msg_vector, msg, 1);
+ svm_msg_q_sub_w_lock (mq, msg);
+ }
+ return n_msgs;
+}
+
const char *
vppcom_session_state_str (session_state_t state)
{
@@ -175,15 +191,6 @@ format_ip46_address (u8 * s, va_list * args)
*/
-static svm_msg_q_t *
-vcl_session_vpp_evt_q (vcl_worker_t * wrk, vcl_session_t * s)
-{
- if (vcl_session_is_ct (s))
- return wrk->vpp_event_queues[0];
- else
- return wrk->vpp_event_queues[s->vpp_thread_index];
-}
-
static void
vcl_send_session_accepted_reply (svm_msg_q_t * mq, u32 context,
session_handle_t handle, int retval)
@@ -227,6 +234,24 @@ vcl_send_session_reset_reply (svm_msg_q_t * mq, u32 context,
app_send_ctrl_evt_to_vpp (mq, app_evt);
}
+void
+vcl_send_session_worker_update (vcl_worker_t * wrk, vcl_session_t * s,
+ u32 wrk_index)
+{
+ app_session_evt_t _app_evt, *app_evt = &_app_evt;
+ session_worker_update_msg_t *mp;
+ svm_msg_q_t *mq;
+
+ mq = vcl_session_vpp_evt_q (wrk, s);
+ app_alloc_ctrl_evt_to_vpp (mq, app_evt, SESSION_CTRL_EVT_WORKER_UPDATE);
+ mp = (session_worker_update_msg_t *) app_evt->evt->data;
+ mp->client_index = wrk->my_client_index;
+ mp->handle = s->vpp_handle;
+ mp->req_wrk_index = wrk->vpp_wrk_index;
+ mp->wrk_index = wrk_index;
+ app_send_ctrl_evt_to_vpp (mq, app_evt);
+}
+
static u32
vcl_session_accepted_handler (vcl_worker_t * wrk, session_accepted_msg_t * mp)
{
@@ -540,7 +565,6 @@ vcl_session_disconnected_handler (vcl_worker_t * wrk,
/* Caught a disconnect before actually accepting the session */
if (session->session_state == STATE_LISTEN)
{
-
if (!vcl_flag_accepted_session (session, msg->handle,
VCL_ACCEPTED_F_CLOSED))
VDBG (0, "session was not accepted!");
@@ -551,6 +575,59 @@ vcl_session_disconnected_handler (vcl_worker_t * wrk,
return session;
}
+static void
+vcl_session_req_worker_update_handler (vcl_worker_t * wrk, void *data)
+{
+ session_req_worker_update_msg_t *msg;
+ vcl_session_t *s;
+
+ msg = (session_req_worker_update_msg_t *) data;
+ s = vcl_session_get_w_vpp_handle (wrk, msg->session_handle);
+ if (!s)
+ return;
+
+ vec_add1 (wrk->pending_session_wrk_updates, s->session_index);
+}
+
+static void
+vcl_session_worker_update_reply_handler (vcl_worker_t * wrk, void *data)
+{
+ session_worker_update_reply_msg_t *msg;
+ vcl_session_t *s;
+
+ msg = (session_worker_update_reply_msg_t *) data;
+ s = vcl_session_get_w_vpp_handle (wrk, msg->handle);
+ if (!s)
+ {
+ VDBG (0, "unknown handle 0x%llx", msg->handle);
+ return;
+ }
+ if (vcl_wait_for_segment (msg->segment_handle))
+ {
+ clib_warning ("segment for session %u couldn't be mounted!",
+ s->session_index);
+ return;
+ }
+ s->rx_fifo = uword_to_pointer (msg->rx_fifo, svm_fifo_t *);
+ s->tx_fifo = uword_to_pointer (msg->tx_fifo, svm_fifo_t *);
+
+ s->rx_fifo->client_session_index = s->session_index;
+ s->tx_fifo->client_session_index = s->session_index;
+ s->rx_fifo->client_thread_index = wrk->wrk_index;
+ s->tx_fifo->client_thread_index = wrk->wrk_index;
+ s->session_state = STATE_UPDATED;
+
+ if (s->shared_index != VCL_INVALID_SESSION_INDEX)
+ {
+ vcl_shared_session_t *ss;
+ ss = vcl_shared_session_get (s->shared_index);
+ if (vec_len (ss->workers) > 1)
+ VDBG (0, "workers need to be updated");
+ }
+ VDBG (0, "session %u[0x%llx] moved to worker %u", s->session_index,
+ s->vpp_handle, wrk->wrk_index);
+}
+
static int
vcl_handle_mq_event (vcl_worker_t * wrk, session_event_t * e)
{
@@ -587,13 +664,19 @@ vcl_handle_mq_event (vcl_worker_t * wrk, session_event_t * e)
case SESSION_CTRL_EVT_BOUND:
vcl_session_bound_handler (wrk, (session_bound_msg_t *) e->data);
break;
+ case SESSION_CTRL_EVT_REQ_WORKER_UPDATE:
+ vcl_session_req_worker_update_handler (wrk, e->data);
+ break;
+ case SESSION_CTRL_EVT_WORKER_UPDATE_REPLY:
+ vcl_session_worker_update_reply_handler (wrk, e->data);
+ break;
default:
clib_warning ("unhandled %u", e->event_type);
}
return VPPCOM_OK;
}
-static inline int
+static int
vppcom_wait_for_session_state_change (u32 session_index,
session_state_t state,
f64 wait_for_time)
@@ -638,6 +721,52 @@ vppcom_wait_for_session_state_change (u32 session_index,
return VPPCOM_ETIMEDOUT;
}
+static void
+vcl_handle_pending_wrk_updates (vcl_worker_t * wrk)
+{
+ session_state_t state;
+ vcl_session_t *s;
+ u32 *sip;
+
+ if (PREDICT_TRUE (vec_len (wrk->pending_session_wrk_updates) == 0))
+ return;
+
+ vec_foreach (sip, wrk->pending_session_wrk_updates)
+ {
+ s = vcl_session_get (wrk, *sip);
+ vcl_send_session_worker_update (wrk, s, wrk->wrk_index);
+ state = s->session_state;
+ vppcom_wait_for_session_state_change (s->session_index, STATE_UPDATED, 5);
+ s->session_state = state;
+ }
+ vec_reset_length (wrk->pending_session_wrk_updates);
+}
+
+static void
+vcl_flush_mq_events (void)
+{
+ vcl_worker_t *wrk = vcl_worker_get_current ();
+ svm_msg_q_msg_t *msg;
+ session_event_t *e;
+ svm_msg_q_t *mq;
+ int i;
+
+ mq = wrk->app_event_queue;
+ svm_msg_q_lock (mq);
+ vcl_mq_dequeue_batch (wrk, mq);
+ svm_msg_q_unlock (mq);
+
+ for (i = 0; i < vec_len (wrk->mq_msg_vector); i++)
+ {
+ msg = vec_elt_at_index (wrk->mq_msg_vector, i);
+ e = svm_msg_q_msg_data (mq, msg);
+ vcl_handle_mq_event (wrk, e);
+ svm_msg_q_free_msg (mq, msg);
+ }
+ vec_reset_length (wrk->mq_msg_vector);
+ vcl_handle_pending_wrk_updates (wrk);
+}
+
static int
vppcom_app_session_enable (void)
{
@@ -845,13 +974,14 @@ static void
vcl_app_pre_fork (void)
{
vcl_incercept_sigchld ();
+ vcl_flush_mq_events ();
}
static void
vcl_app_fork_child_handler (void)
{
+ vcl_worker_t *parent_wrk, *wrk;
int rv, parent_wrk_index;
- vcl_worker_t *parent_wrk;
u8 *child_name;
parent_wrk_index = vcl_get_worker_index ();
@@ -884,6 +1014,8 @@ vcl_app_fork_child_handler (void)
*/
vcl_worker_register_with_vpp ();
parent_wrk = vcl_worker_get (parent_wrk_index);
+ wrk = vcl_worker_get_current ();
+ wrk->vpp_event_queues = vec_dup (parent_wrk->vpp_event_queues);
vcl_worker_share_sessions (parent_wrk);
parent_wrk->forked_child = vcl_get_worker_index ();
@@ -1097,7 +1229,11 @@ vppcom_session_close (uint32_t session_handle)
}
if (!do_disconnect)
- goto cleanup;
+ {
+ VDBG (0, "session handle %u [0x%llx] disconnect skipped",
+ session_handle, vpp_handle);
+ goto cleanup;
+ }
if (state & STATE_LISTEN)
{
@@ -1143,10 +1279,7 @@ cleanup:
vcl_ct_registration_unlock (wrk);
}
- if (vpp_handle != ~0)
- {
- vcl_session_table_del_vpp_handle (wrk, vpp_handle);
- }
+ vcl_session_table_del_vpp_handle (wrk, vpp_handle);
vcl_session_free (wrk, session);
VDBG (0, "session handle %u [0x%llx] removed", session_handle, vpp_handle);
@@ -1948,22 +2081,6 @@ vppcom_session_write_ready (vcl_session_t * session)
return svm_fifo_max_enqueue (session->tx_fifo);
}
-static inline int
-vcl_mq_dequeue_batch (vcl_worker_t * wrk, svm_msg_q_t * mq)
-{
- svm_msg_q_msg_t *msg;
- u32 n_msgs;
- int i;
-
- n_msgs = svm_msg_q_size (mq);
- for (i = 0; i < n_msgs; i++)
- {
- vec_add2 (wrk->mq_msg_vector, msg, 1);
- svm_msg_q_sub_w_lock (mq, msg);
- }
- return n_msgs;
-}
-
#define vcl_fifo_rx_evt_valid_or_break(_fifo) \
if (PREDICT_FALSE (svm_fifo_is_empty (_fifo))) \
{ \
@@ -2067,6 +2184,12 @@ vcl_select_handle_mq_event (vcl_worker_t * wrk, session_event_t * e,
*bits_set += 1;
}
break;
+ case SESSION_CTRL_EVT_WORKER_UPDATE_REPLY:
+ vcl_session_worker_update_reply_handler (wrk, e->data);
+ break;
+ case SESSION_CTRL_EVT_REQ_WORKER_UPDATE:
+ vcl_session_req_worker_update_handler (wrk, e->data);
+ break;
default:
clib_warning ("unhandled: %u", e->event_type);
break;
@@ -2122,6 +2245,7 @@ vcl_select_handle_mq (vcl_worker_t * wrk, svm_msg_q_t * mq,
svm_msg_q_free_msg (mq, msg);
}
vec_reset_length (wrk->mq_msg_vector);
+ vcl_handle_pending_wrk_updates (wrk);
return *bits_set;
}
@@ -2676,6 +2800,12 @@ vcl_epoll_wait_handle_mq_event (vcl_worker_t * wrk, session_event_t * e,
session_evt_data = session->vep.ev.data.u64;
session_events = session->vep.ev.events;
break;
+ case SESSION_CTRL_EVT_REQ_WORKER_UPDATE:
+ vcl_session_req_worker_update_handler (wrk, e->data);
+ break;
+ case SESSION_CTRL_EVT_WORKER_UPDATE_REPLY:
+ vcl_session_worker_update_reply_handler (wrk, e->data);
+ break;
default:
VDBG (0, "unhandled: %u", e->event_type);
break;
@@ -2741,7 +2871,7 @@ handle_dequeued:
svm_msg_q_free_msg (mq, msg);
}
vec_reset_length (wrk->mq_msg_vector);
-
+ vcl_handle_pending_wrk_updates (wrk);
return *num_ev;
}
@@ -3580,12 +3710,18 @@ vppcom_mq_epoll_fd (void)
}
int
-vppcom_session_index (uint32_t session_handle)
+vppcom_session_index (vcl_session_handle_t session_handle)
{
return session_handle & 0xFFFFFF;
}
int
+vppcom_session_worker (vcl_session_handle_t session_handle)
+{
+ return session_handle >> 24;
+}
+
+int
vppcom_session_handle (uint32_t session_index)
{
return (vcl_get_worker_index () << 24) | session_index;