summaryrefslogtreecommitdiffstats
path: root/src/plugins/unittest/session_test.c
diff options
context:
space:
mode:
authorFlorin Coras <fcoras@cisco.com>2022-12-22 15:03:44 -0800
committerDave Barach <vpp@barachs.net>2023-08-09 18:45:26 +0000
commit0242d30fc717aeacb758281dad8e5b2e56bf6709 (patch)
treeeb7addb00bbe78061fa58442a6e9bdbd7f3e181c /src/plugins/unittest/session_test.c
parent6d733a93b2eb9c16196ee17d5cdc77db21589571 (diff)
session: async rx event notifications
Move from synchronous flushing of io and ctrl events from transports to applications to an async model via a new session_input input node that runs in interrupt mode. Events are coalesced per application worker. On the one hand, this helps by minimizing message queue locking churn. And on the other, it opens the possibility for further optimizations of event message generation, obviates need for rx rescheduling rpcs and is a first step towards a fully async data/io rx path. Type: improvement Signed-off-by: Florin Coras <fcoras@cisco.com> Change-Id: Id6bebcb65fc9feef8aa02ddf1af6d9ba6f6745ce
Diffstat (limited to 'src/plugins/unittest/session_test.c')
-rw-r--r--src/plugins/unittest/session_test.c70
1 files changed, 69 insertions, 1 deletions
diff --git a/src/plugins/unittest/session_test.c b/src/plugins/unittest/session_test.c
index c4e41c34dd0..70b3b32a2e4 100644
--- a/src/plugins/unittest/session_test.c
+++ b/src/plugins/unittest/session_test.c
@@ -1771,6 +1771,74 @@ wait_for_event (svm_msg_q_t * mq, int fd, int epfd, u8 use_eventfd)
}
}
+/* Used to be part of application_worker.c prior to adding support for
+ * async rx
+ */
+static int
+test_mq_try_lock_and_alloc_msg (svm_msg_q_t *mq, session_mq_rings_e ring,
+ svm_msg_q_msg_t *msg)
+{
+ int rv, n_try = 0;
+
+ while (n_try < 75)
+ {
+ rv = svm_msg_q_lock_and_alloc_msg_w_ring (mq, ring, SVM_Q_NOWAIT, msg);
+ if (!rv)
+ return 0;
+ /*
+ * Break the loop if mq is full, usually this is because the
+ * app has crashed or is hanging on somewhere.
+ */
+ if (rv != -1)
+ break;
+ n_try += 1;
+ usleep (1);
+ }
+
+ return -1;
+}
+
+/* Used to be part of application_worker.c prior to adding support for
+ * async rx and was used for delivering io events over mq
+ * NB: removed handling of mq congestion
+ */
+static inline int
+test_app_send_io_evt_rx (app_worker_t *app_wrk, session_t *s)
+{
+ svm_msg_q_msg_t _mq_msg = { 0 }, *mq_msg = &_mq_msg;
+ session_event_t *evt;
+ svm_msg_q_t *mq;
+ u32 app_session;
+ int rv;
+
+ if (app_worker_application_is_builtin (app_wrk))
+ return app_worker_rx_notify (app_wrk, s);
+
+ if (svm_fifo_has_event (s->rx_fifo))
+ return 0;
+
+ app_session = s->rx_fifo->shr->client_session_index;
+ mq = app_wrk->event_queue;
+
+ rv = test_mq_try_lock_and_alloc_msg (mq, SESSION_MQ_IO_EVT_RING, mq_msg);
+
+ if (PREDICT_FALSE (rv))
+ {
+ clib_warning ("failed to alloc mq message");
+ return -1;
+ }
+
+ evt = svm_msg_q_msg_data (mq, mq_msg);
+ evt->event_type = SESSION_IO_EVT_RX;
+ evt->session_index = app_session;
+
+ (void) svm_fifo_set_event (s->rx_fifo);
+
+ svm_msg_q_add_and_unlock (mq, mq_msg);
+
+ return 0;
+}
+
static int
session_test_mq_speed (vlib_main_t * vm, unformat_input_t * input)
{
@@ -1885,7 +1953,7 @@ session_test_mq_speed (vlib_main_t * vm, unformat_input_t * input)
{
while (svm_fifo_has_event (rx_fifo))
;
- app_worker_lock_and_send_event (app_wrk, &s, SESSION_IO_EVT_RX);
+ test_app_send_io_evt_rx (app_wrk, &s);
}
}