aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFilip Tehlar <ftehlar@cisco.com>2022-02-02 17:38:20 +0000
committerFlorin Coras <florin.coras@gmail.com>2022-02-17 17:38:47 +0000
commit8ccc6b350703d3390633636d2b1c2f578f37cb21 (patch)
tree412975bfc85ed2857e7e2bddced676e76f0c97af
parent0cbc4bd272ac7e24208f741c4fb1fcb1ce5e0e5d (diff)
vcl: add support for reconnect
Supported only when eventfd option is enabled. Type: feature Change-Id: Ic9d6e38604e978f7bc8e54d74fe9b8f3fc53622d Signed-off-by: Filip Tehlar <ftehlar@cisco.com>
-rw-r--r--src/plugins/hs_apps/vcl/vcl_test_server.c11
-rw-r--r--src/vcl/vcl_private.c70
-rw-r--r--src/vcl/vcl_private.h2
-rw-r--r--src/vcl/vcl_sapi.c4
-rw-r--r--src/vcl/vppcom.c52
5 files changed, 133 insertions, 6 deletions
diff --git a/src/plugins/hs_apps/vcl/vcl_test_server.c b/src/plugins/hs_apps/vcl/vcl_test_server.c
index 2abb9924e70..1b3b759bfcd 100644
--- a/src/plugins/hs_apps/vcl/vcl_test_server.c
+++ b/src/plugins/hs_apps/vcl/vcl_test_server.c
@@ -564,6 +564,7 @@ vts_handle_ctrl_cfg (vcl_test_server_worker_t *wrk, vcl_test_cfg_t *rx_cfg,
wrk->nfds--;
if (wrk->nfds)
vts_wrk_cleanup_all (wrk);
+ vcl_server_main.ctrl = 0;
break;
default:
@@ -677,13 +678,13 @@ vts_worker_loop (void *arg)
*/
if (ep_evts[i].events & (EPOLLHUP | EPOLLRDHUP))
{
- vts_session_cleanup (conn);
- wrk->nfds--;
- if (!wrk->nfds)
+ if (conn == vsm->ctrl)
{
- vtinf ("All client connections closed\n");
- goto done;
+ vtinf ("ctrl session went away");
+ vsm->ctrl = 0;
}
+ vts_session_cleanup (conn);
+ wrk->nfds--;
continue;
}
diff --git a/src/vcl/vcl_private.c b/src/vcl/vcl_private.c
index d12d2d31a8f..b6ef815b53d 100644
--- a/src/vcl/vcl_private.c
+++ b/src/vcl/vcl_private.c
@@ -38,6 +38,25 @@ vcl_mq_evt_conn_get (vcl_worker_t * wrk, u32 mq_conn_idx)
return pool_elt_at_index (wrk->mq_evt_conns, mq_conn_idx);
}
+/* Add unix socket to epoll.
+ * Used only to get a notification on socket close
+ * We can't use eventfd because we don't get notifications on that fds
+ */
+static int
+vcl_mq_epoll_add_api_sock (vcl_worker_t *wrk)
+{
+ clib_socket_t *cs = &wrk->app_api_sock;
+ struct epoll_event e = { 0 };
+ int rv;
+
+ e.data.u32 = ~0;
+ rv = epoll_ctl (wrk->mqs_epfd, EPOLL_CTL_ADD, cs->fd, &e);
+ if (rv != EEXIST && rv < 0)
+ return -1;
+
+ return 0;
+}
+
int
vcl_mq_epoll_add_evfd (vcl_worker_t * wrk, svm_msg_q_t * mq)
{
@@ -64,6 +83,12 @@ vcl_mq_epoll_add_evfd (vcl_worker_t * wrk, svm_msg_q_t * mq)
return -1;
}
+ if (vcl_mq_epoll_add_api_sock (wrk))
+ {
+ VDBG (0, "failed to add mq socket to mq epoll fd");
+ return -1;
+ }
+
return mqc_index;
}
@@ -158,6 +183,33 @@ vcl_worker_cleanup_cb (void *arg)
VDBG (0, "cleaned up worker %u", wrk_index);
}
+void
+vcl_worker_detach_sessions (vcl_worker_t *wrk)
+{
+ session_event_t *e;
+ vcl_session_t *s;
+
+ close (wrk->app_api_sock.fd);
+ pool_foreach (s, wrk->sessions)
+ {
+ if (s->session_state == VCL_STATE_LISTEN)
+ {
+ s->session_state = VCL_STATE_LISTEN_NO_MQ;
+ continue;
+ }
+ if (s->flags & VCL_SESSION_F_IS_VEP)
+ continue;
+
+ s->session_state = VCL_STATE_DETACHED;
+ vec_add2 (wrk->unhandled_evts_vector, e, 1);
+ e->event_type = SESSION_CTRL_EVT_DISCONNECTED;
+ e->session_index = s->session_index;
+ e->postponed = 1;
+ }
+
+ vcl_segment_detach_all ();
+}
+
vcl_worker_t *
vcl_worker_alloc_and_init ()
{
@@ -410,6 +462,24 @@ vcl_segment_detach (u64 segment_handle)
VDBG (0, "detached segment %u handle %u", segment_index, segment_handle);
}
+void
+vcl_segment_detach_all ()
+{
+ u64 *segs = 0, *seg, key;
+ u32 val;
+
+ clib_rwlock_reader_lock (&vcm->segment_table_lock);
+
+ hash_foreach (key, val, vcm->segment_table, ({ vec_add1 (segs, key); }));
+
+ clib_rwlock_reader_unlock (&vcm->segment_table_lock);
+
+ vec_foreach (seg, segs)
+ vcl_segment_detach (seg[0]);
+
+ vec_free (segs);
+}
+
int
vcl_segment_attach_session (uword segment_handle, uword rxf_offset,
uword txf_offset, uword mq_offset, u32 mq_index,
diff --git a/src/vcl/vcl_private.h b/src/vcl/vcl_private.h
index f163de20125..e4f1c149b2d 100644
--- a/src/vcl/vcl_private.h
+++ b/src/vcl/vcl_private.h
@@ -716,6 +716,7 @@ int vcl_send_worker_rpc (u32 dst_wrk_index, void *data, u32 data_len);
int vcl_segment_attach (u64 segment_handle, char *name,
ssvm_segment_type_t type, int fd);
void vcl_segment_detach (u64 segment_handle);
+void vcl_segment_detach_all ();
void vcl_send_session_unlisten (vcl_worker_t * wrk, vcl_session_t * s);
int vcl_segment_attach_session (uword segment_handle, uword rxf_offset,
@@ -729,6 +730,7 @@ svm_fifo_chunk_t *vcl_segment_alloc_chunk (uword segment_handle,
uword *offset);
int vcl_session_share_fifos (vcl_session_t *s, svm_fifo_t *rxf,
svm_fifo_t *txf);
+void vcl_worker_detach_sessions (vcl_worker_t *wrk);
/*
* VCL Binary API
diff --git a/src/vcl/vcl_sapi.c b/src/vcl/vcl_sapi.c
index 981257ede8d..870a634db8c 100644
--- a/src/vcl/vcl_sapi.c
+++ b/src/vcl/vcl_sapi.c
@@ -30,7 +30,9 @@ vcl_api_connect_app_socket (vcl_worker_t * wrk)
if ((err = clib_socket_init (cs)))
{
- clib_error_report (err);
+ /* don't report the error to avoid flood of error messages during
+ * reconnect */
+ clib_error_free (err);
rv = -1;
goto done;
}
diff --git a/src/vcl/vppcom.c b/src/vcl/vppcom.c
index 4a6c46b3f3b..0e81749224e 100644
--- a/src/vcl/vppcom.c
+++ b/src/vcl/vppcom.c
@@ -1257,6 +1257,33 @@ vcl_api_attach (void)
}
static void
+vcl_api_retry_attach (vcl_worker_t *wrk)
+{
+ vcl_session_t *s;
+
+ if (vcl_api_attach ())
+ return;
+
+ /* Treat listeners as configuration that needs to be re-added to vpp */
+ pool_foreach (s, wrk->sessions)
+ {
+ if (s->flags & VCL_SESSION_F_IS_VEP)
+ continue;
+ if (s->session_state == VCL_STATE_LISTEN_NO_MQ)
+ vppcom_session_listen (vcl_session_handle (s), 10);
+ else
+ VDBG (0, "internal error: unexpected state %d", s->session_state);
+ }
+}
+
+static void
+vcl_api_handle_disconnect (vcl_worker_t *wrk)
+{
+ wrk->api_client_handle = ~0;
+ vcl_worker_detach_sessions (wrk);
+}
+
+static void
vcl_api_detach (vcl_worker_t * wrk)
{
vcl_send_app_detach (wrk);
@@ -2468,11 +2495,23 @@ vppcom_select_eventfd (vcl_worker_t * wrk, int n_bits,
int n_mq_evts, i;
u64 buf;
+ if (PREDICT_FALSE (wrk->api_client_handle == ~0))
+ {
+ vcl_api_retry_attach (wrk);
+ return 0;
+ }
+
vec_validate (wrk->mq_events, pool_elts (wrk->mq_evt_conns));
n_mq_evts = epoll_wait (wrk->mqs_epfd, wrk->mq_events,
vec_len (wrk->mq_events), time_to_wait);
for (i = 0; i < n_mq_evts; i++)
{
+ if (PREDICT_FALSE (wrk->mq_events[i].data.u32 == ~0))
+ {
+ vcl_api_handle_disconnect (wrk);
+ continue;
+ }
+
mqc = vcl_mq_evt_conn_get (wrk, wrk->mq_events[i].data.u32);
n_read = read (mqc->mq_fd, &buf, sizeof (buf));
vcl_select_handle_mq (wrk, mqc->mq, n_bits, read_map, write_map,
@@ -3189,6 +3228,12 @@ vppcom_epoll_wait_eventfd (vcl_worker_t *wrk, struct epoll_event *events,
double end = -1;
u64 buf;
+ if (PREDICT_FALSE (wrk->api_client_handle == ~0))
+ {
+ vcl_api_retry_attach (wrk);
+ return n_evts;
+ }
+
vec_validate (wrk->mq_events, pool_elts (wrk->mq_evt_conns));
if (!n_evts)
{
@@ -3208,6 +3253,13 @@ vppcom_epoll_wait_eventfd (vcl_worker_t *wrk, struct epoll_event *events,
for (i = 0; i < n_mq_evts; i++)
{
+ if (PREDICT_FALSE (wrk->mq_events[i].data.u32 == ~0))
+ {
+ /* api socket was closed */
+ vcl_api_handle_disconnect (wrk);
+ continue;
+ }
+
mqc = vcl_mq_evt_conn_get (wrk, wrk->mq_events[i].data.u32);
n_read = read (mqc->mq_fd, &buf, sizeof (buf));
vcl_epoll_wait_handle_mq (wrk, mqc->mq, events, maxevents, 0,