diff options
author | Florin Coras <fcoras@cisco.com> | 2022-12-22 15:03:44 -0800 |
---|---|---|
committer | Dave Barach <vpp@barachs.net> | 2023-08-09 18:45:26 +0000 |
commit | 0242d30fc717aeacb758281dad8e5b2e56bf6709 (patch) | |
tree | eb7addb00bbe78061fa58442a6e9bdbd7f3e181c /src/svm/message_queue.c | |
parent | 6d733a93b2eb9c16196ee17d5cdc77db21589571 (diff) |
session: async rx event notifications
Move from synchronous flushing of io and ctrl events from transports to
applications to an async model via a new session_input input node that
runs in interrupt mode. Events are coalesced per application worker.
On the one hand, this helps by minimizing message queue locking churn.
And on the other, it opens the possibility for further optimizations of
event message generation, obviates need for rx rescheduling rpcs and is
a first step towards a fully async data/io rx path.
Type: improvement
Signed-off-by: Florin Coras <fcoras@cisco.com>
Change-Id: Id6bebcb65fc9feef8aa02ddf1af6d9ba6f6745ce
Diffstat (limited to 'src/svm/message_queue.c')
-rw-r--r-- | src/svm/message_queue.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/src/svm/message_queue.c b/src/svm/message_queue.c index 2880645b427..ab0d230b1f0 100644 --- a/src/svm/message_queue.c +++ b/src/svm/message_queue.c @@ -340,15 +340,15 @@ svm_msq_q_msg_is_valid (svm_msg_q_t * mq, svm_msg_q_msg_t * msg) return (dist1 < dist2); } -static void -svm_msg_q_add_raw (svm_msg_q_t *mq, u8 *elem) +void +svm_msg_q_add_raw (svm_msg_q_t *mq, svm_msg_q_msg_t *msg) { svm_msg_q_shared_queue_t *sq = mq->q.shr; i8 *tailp; u32 sz; tailp = (i8 *) (&sq->data[0] + sq->elsize * sq->tail); - clib_memcpy_fast (tailp, elem, sq->elsize); + clib_memcpy_fast (tailp, msg, sq->elsize); sq->tail = (sq->tail + 1) % sq->maxsize; @@ -381,7 +381,7 @@ svm_msg_q_add (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, int nowait) svm_msg_q_wait_prod (mq); } - svm_msg_q_add_raw (mq, (u8 *) msg); + svm_msg_q_add_raw (mq, msg); svm_msg_q_unlock (mq); @@ -392,7 +392,7 @@ void svm_msg_q_add_and_unlock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg) { ASSERT (svm_msq_q_msg_is_valid (mq, msg)); - svm_msg_q_add_raw (mq, (u8 *) msg); + svm_msg_q_add_raw (mq, msg); svm_msg_q_unlock (mq); } |