summaryrefslogtreecommitdiffstats
path: root/src/svm
diff options
context:
space:
mode:
authorFlorin Coras <fcoras@cisco.com>2021-01-25 20:31:27 -0800
committerDave Barach <openvpp@barachs.net>2021-02-05 17:28:34 +0000
commit5398dfb2592d525018997a991a4f7bfde515adc4 (patch)
treed986e2bac410e8b630a0d5907b729550a28838da /src/svm
parent2b97f597c6705809201ce6a6846d46c47c0933ba (diff)
session svm: non blocking mq
Avoid synchronizing producers and the consumer. Instead, only use mutex or spinlock (if eventfds are configured) to synchronize producers. Type: improvement Signed-off-by: Florin Coras <fcoras@cisco.com> Change-Id: Ie2aafbdc2e07fced5d5e46ee2df6b30a186faa2f
Diffstat (limited to 'src/svm')
-rw-r--r--src/svm/message_queue.c177
-rw-r--r--src/svm/message_queue.h78
2 files changed, 181 insertions, 74 deletions
diff --git a/src/svm/message_queue.c b/src/svm/message_queue.c
index fdf9293b18c..b423826cb83 100644
--- a/src/svm/message_queue.c
+++ b/src/svm/message_queue.c
@@ -163,21 +163,36 @@ svm_msg_q_attach (svm_msg_q_t *mq, void *smq_base)
offset = sizeof (*ring) + ring->nitems * ring->elsize;
ring = (void *) ((u8 *) ring + offset);
}
+ clib_spinlock_init (&mq->q.lock);
}
void
svm_msg_q_free (svm_msg_q_t * mq)
{
clib_mem_free (mq->q.shr);
+ clib_spinlock_free (&mq->q.lock);
clib_mem_free (mq);
}
static void
-svm_msg_q_send_signal (svm_msg_q_t *mq)
+svm_msg_q_send_signal (svm_msg_q_t *mq, u8 is_consumer)
{
if (mq->q.evtfd == -1)
{
+ if (is_consumer)
+ {
+ int rv = pthread_mutex_lock (&mq->q.shr->mutex);
+ if (PREDICT_FALSE (rv == EOWNERDEAD))
+ {
+ rv = pthread_mutex_consistent (&mq->q.shr->mutex);
+ return;
+ }
+ }
+
(void) pthread_cond_broadcast (&mq->q.shr->condvar);
+
+ if (is_consumer)
+ pthread_mutex_unlock (&mq->q.shr->mutex);
}
else
{
@@ -232,7 +247,7 @@ svm_msg_q_lock_and_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index,
svm_msg_q_lock (mq);
while (svm_msg_q_is_full (mq)
|| svm_msg_q_ring_is_full (mq, ring_index))
- svm_msg_q_wait (mq);
+ svm_msg_q_wait (mq, SVM_MQ_WAIT_FULL);
*msg = svm_msg_q_alloc_msg_w_ring (mq, ring_index);
}
return 0;
@@ -253,7 +268,7 @@ svm_msg_q_alloc_msg (svm_msg_q_t * mq, u32 nbytes)
msg.ring_index = ring - mq->rings;
msg.elt_index = sr->tail;
sr->tail = (sr->tail + 1) % ring->nitems;
- clib_atomic_fetch_add_rel (&sr->cursize, 1);
+ clib_atomic_fetch_add_relax (&sr->cursize, 1);
break;
}
return msg;
@@ -271,7 +286,7 @@ svm_msg_q_free_msg (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
{
svm_msg_q_ring_shared_t *sr;
svm_msg_q_ring_t *ring;
- int need_signal;
+ u32 need_signal;
ASSERT (vec_len (mq->rings) > msg->ring_index);
ring = svm_msg_q_ring_inline (mq, msg->ring_index);
@@ -282,16 +297,17 @@ svm_msg_q_free_msg (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
}
else
{
- clib_warning ("message out of order");
+ clib_warning ("message out of order: elt %u head %u ring %u",
+ msg->elt_index, sr->head, msg->ring_index);
/* for now, expect messages to be processed in order */
ASSERT (0);
}
- need_signal = sr->cursize == ring->nitems;
- clib_atomic_fetch_sub_rel (&sr->cursize, 1);
+ need_signal = clib_atomic_load_relax_n (&sr->cursize) == ring->nitems;
+ clib_atomic_fetch_sub_relax (&sr->cursize, 1);
if (PREDICT_FALSE (need_signal))
- svm_msg_q_send_signal (mq);
+ svm_msg_q_send_signal (mq, 1 /* is consumer */);
}
static int
@@ -331,7 +347,7 @@ svm_msg_q_add_raw (svm_msg_q_t *mq, u8 *elem)
sz = clib_atomic_fetch_add_rel (&sq->cursize, 1);
if (!sz)
- svm_msg_q_send_signal (mq);
+ svm_msg_q_send_signal (mq, 0 /* is consumer */);
}
int
@@ -355,7 +371,7 @@ svm_msg_q_add (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, int nowait)
if (nowait)
return (-2);
while (svm_msg_q_is_full (mq))
- svm_msg_q_wait (mq);
+ svm_msg_q_wait (mq, SVM_MQ_WAIT_FULL);
}
svm_msg_q_add_raw (mq, (u8 *) msg);
@@ -373,8 +389,8 @@ svm_msg_q_add_and_unlock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
svm_msg_q_unlock (mq);
}
-static int
-svm_msg_q_sub_raw (svm_msg_q_t *mq, u8 *elem)
+int
+svm_msg_q_sub_raw (svm_msg_q_t *mq, svm_msg_q_msg_t *elem)
{
svm_msg_q_shared_queue_t *sq = mq->q.shr;
i8 *headp;
@@ -387,69 +403,76 @@ svm_msg_q_sub_raw (svm_msg_q_t *mq, u8 *elem)
sq->head = (sq->head + 1) % sq->maxsize;
- sz = clib_atomic_fetch_sub_rel (&sq->cursize, 1);
+ sz = clib_atomic_fetch_sub_relax (&sq->cursize, 1);
if (PREDICT_FALSE (sz == sq->maxsize))
- svm_msg_q_send_signal (mq);
+ svm_msg_q_send_signal (mq, 1 /* is consumer */);
return 0;
}
int
-svm_msg_q_sub (svm_msg_q_t * mq, svm_msg_q_msg_t * msg,
- svm_q_conditional_wait_t cond, u32 time)
+svm_msg_q_sub_raw_batch (svm_msg_q_t *mq, svm_msg_q_msg_t *msg_buf, u32 n_msgs)
{
- int rc = 0;
+ svm_msg_q_shared_queue_t *sq = mq->q.shr;
+ u32 sz, to_deq;
+ i8 *headp;
- if (cond == SVM_Q_NOWAIT)
+ sz = svm_msg_q_size (mq);
+ ASSERT (sz);
+ to_deq = clib_min (sz, n_msgs);
+
+ headp = (i8 *) (&sq->data[0] + sq->elsize * sq->head);
+
+ if (sq->head + to_deq < sq->maxsize)
{
- /* zero on success */
- if (svm_msg_q_try_lock (mq))
- {
- return (-1);
- }
+ clib_memcpy_fast (msg_buf, headp, sq->elsize * to_deq);
+ sq->head += to_deq;
}
else
- svm_msg_q_lock (mq);
+ {
+ u32 first_batch = sq->maxsize - sq->head;
+ clib_memcpy_fast (msg_buf, headp, sq->elsize * first_batch);
+ clib_memcpy_fast (msg_buf + first_batch, sq->data,
+ sq->elsize * (to_deq - first_batch));
+ sq->head = (sq->head + to_deq) % sq->maxsize;
+ }
+
+ clib_atomic_fetch_sub_relax (&sq->cursize, to_deq);
+ if (PREDICT_FALSE (sz == sq->maxsize))
+ svm_msg_q_send_signal (mq, 1 /* is consumer */);
+
+ return to_deq;
+}
+
+int
+svm_msg_q_sub (svm_msg_q_t *mq, svm_msg_q_msg_t *msg,
+ svm_q_conditional_wait_t cond, u32 time)
+{
+ int rc = 0;
- if (PREDICT_FALSE (svm_msg_q_is_empty (mq)))
+ if (svm_msg_q_is_empty (mq))
{
if (cond == SVM_Q_NOWAIT)
{
- svm_msg_q_unlock (mq);
return (-2);
}
else if (cond == SVM_Q_TIMEDWAIT)
{
- while (svm_msg_q_is_empty (mq) && rc == 0)
- rc = svm_msg_q_timedwait (mq, time);
-
- if (rc == ETIMEDOUT)
- {
- svm_msg_q_unlock (mq);
- return ETIMEDOUT;
- }
+ if ((rc = svm_msg_q_timedwait (mq, time)))
+ return rc;
}
else
{
- while (svm_msg_q_is_empty (mq))
- svm_msg_q_wait (mq);
+ svm_msg_q_wait (mq, SVM_MQ_WAIT_EMPTY);
}
}
- svm_msg_q_sub_raw (mq, (u8 *) msg);
-
- svm_msg_q_unlock (mq);
+ svm_msg_q_sub_raw (mq, msg);
return 0;
}
void
-svm_msg_q_sub_w_lock (svm_msg_q_t *mq, svm_msg_q_msg_t *msg)
-{
- svm_msg_q_sub_raw (mq, (u8 *) msg);
-}
-
-void
svm_msg_q_set_eventfd (svm_msg_q_t *mq, int fd)
{
mq->q.evtfd = fd;
@@ -465,29 +488,46 @@ svm_msg_q_alloc_eventfd (svm_msg_q_t *mq)
return 0;
}
-void
-svm_msg_q_wait (svm_msg_q_t *mq)
+int
+svm_msg_q_wait (svm_msg_q_t *mq, svm_msg_q_wait_type_t type)
{
+ u8 (*fn) (svm_msg_q_t *);
+ int rv;
+
+ fn = (type == SVM_MQ_WAIT_EMPTY) ? svm_msg_q_is_empty : svm_msg_q_is_full;
+
if (mq->q.evtfd == -1)
{
- pthread_cond_wait (&mq->q.shr->condvar, &mq->q.shr->mutex);
+ rv = pthread_mutex_lock (&mq->q.shr->mutex);
+ if (PREDICT_FALSE (rv == EOWNERDEAD))
+ {
+ rv = pthread_mutex_consistent (&mq->q.shr->mutex);
+ return rv;
+ }
+
+ while (fn (mq))
+ pthread_cond_wait (&mq->q.shr->condvar, &mq->q.shr->mutex);
+
+ pthread_mutex_unlock (&mq->q.shr->mutex);
}
else
{
u64 buf;
- int rv;
- svm_msg_q_unlock (mq);
- while ((rv = read (mq->q.evtfd, &buf, sizeof (buf))) < 0)
+ while (fn (mq))
{
- if (errno != EAGAIN)
+ while ((rv = read (mq->q.evtfd, &buf, sizeof (buf))) < 0)
{
- clib_unix_warning ("read error");
- return;
+ if (errno != EAGAIN)
+ {
+ clib_unix_warning ("read error");
+ return rv;
+ }
}
}
- svm_msg_q_lock (mq);
}
+
+ return 0;
}
int
@@ -495,11 +535,32 @@ svm_msg_q_timedwait (svm_msg_q_t *mq, double timeout)
{
if (mq->q.evtfd == -1)
{
+ svm_msg_q_shared_queue_t *sq = mq->q.shr;
struct timespec ts;
+ u32 sz;
+ int rv;
+
+ rv = pthread_mutex_lock (&sq->mutex);
+ if (PREDICT_FALSE (rv == EOWNERDEAD))
+ {
+ rv = pthread_mutex_consistent (&sq->mutex);
+ return rv;
+ }
+
+ /* check if we're still in a signalable state after grabbing lock */
+ sz = svm_msg_q_size (mq);
+ if (sz != 0 && sz != sq->maxsize)
+ {
+ pthread_mutex_unlock (&sq->mutex);
+ return 0;
+ }
+
ts.tv_sec = unix_time_now () + (u32) timeout;
ts.tv_nsec = (timeout - (u32) timeout) * 1e9;
- return pthread_cond_timedwait (&mq->q.shr->condvar, &mq->q.shr->mutex,
- &ts);
+ rv = pthread_cond_timedwait (&sq->condvar, &sq->mutex, &ts);
+
+ pthread_mutex_unlock (&sq->mutex);
+ return rv;
}
else
{
@@ -512,11 +573,9 @@ svm_msg_q_timedwait (svm_msg_q_t *mq, double timeout)
setsockopt (mq->q.evtfd, SOL_SOCKET, SO_RCVTIMEO, (const char *) &tv,
sizeof tv);
- svm_msg_q_unlock (mq);
rv = read (mq->q.evtfd, &buf, sizeof (buf));
if (rv < 0)
clib_warning ("read %u", errno);
- svm_msg_q_lock (mq);
return rv < 0 ? errno : 0;
}
diff --git a/src/svm/message_queue.h b/src/svm/message_queue.h
index 7716c6724d8..1ef773d9f0a 100644
--- a/src/svm/message_queue.h
+++ b/src/svm/message_queue.h
@@ -22,6 +22,7 @@
#include <vppinfra/clib.h>
#include <vppinfra/error.h>
+#include <vppinfra/lock.h>
#include <svm/queue.h>
typedef struct svm_msg_q_shr_queue_
@@ -41,6 +42,7 @@ typedef struct svm_msg_q_queue_
{
svm_msg_q_shared_queue_t *shr; /**< pointer to shared queue */
int evtfd; /**< producer/consumer eventfd */
+ clib_spinlock_t lock; /**< private lock for multi-producer */
} svm_msg_q_queue_t;
typedef struct svm_msg_q_ring_shared_
@@ -99,6 +101,13 @@ typedef union
} svm_msg_q_msg_t;
#define SVM_MQ_INVALID_MSG { .as_u64 = ~0 }
+
+typedef enum svm_msg_q_wait_type_
+{
+ SVM_MQ_WAIT_EMPTY,
+ SVM_MQ_WAIT_FULL
+} svm_msg_q_wait_type_t;
+
/**
* Allocate message queue
*
@@ -206,6 +215,7 @@ void svm_msg_q_add_and_unlock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg);
* Consumer dequeue one message from queue
*
* This returns the message pointing to the data in the message rings.
+ * Should only be used in single consumer scenarios as no locks are grabbed.
* The consumer is expected to call @ref svm_msg_q_free_msg once it
* finishes processing/copies the message data.
*
@@ -219,18 +229,34 @@ int svm_msg_q_sub (svm_msg_q_t * mq, svm_msg_q_msg_t * msg,
svm_q_conditional_wait_t cond, u32 time);
/**
- * Consumer dequeue one message from queue with mutex held
+ * Consumer dequeue one message from queue
*
- * Returns the message pointing to the data in the message rings under the
- * assumption that the message queue lock is already held. The consumer is
- * expected to call @ref svm_msg_q_free_msg once it finishes
+ * Returns the message pointing to the data in the message rings. Should only
+ * be used in single consumer scenarios as no locks are grabbed. The consumer
+ * is expected to call @ref svm_msg_q_free_msg once it finishes
* processing/copies the message data.
*
* @param mq message queue
* @param msg pointer to structure where message is to be received
* @return success status
*/
-void svm_msg_q_sub_w_lock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg);
+int svm_msg_q_sub_raw (svm_msg_q_t *mq, svm_msg_q_msg_t *elem);
+
+/**
+ * Consumer dequeue multiple messages from queue
+ *
+ * Returns the message pointing to the data in the message rings. Should only
+ * be used in single consumer scenarios as no locks are grabbed. The consumer
+ * is expected to call @ref svm_msg_q_free_msg once it finishes
+ * processing/copies the message data.
+ *
+ * @param mq message queue
+ * @param msg_buf pointer to array of messages to received
+ * @param n_msgs lengt of msg_buf array
+ * @return number of messages dequeued
+ */
+int svm_msg_q_sub_raw_batch (svm_msg_q_t *mq, svm_msg_q_msg_t *msg_buf,
+ u32 n_msgs);
/**
* Get data for message in queue
@@ -321,10 +347,17 @@ svm_msg_q_msg_is_invalid (svm_msg_q_msg_t * msg)
static inline int
svm_msg_q_try_lock (svm_msg_q_t * mq)
{
- int rv = pthread_mutex_trylock (&mq->q.shr->mutex);
- if (PREDICT_FALSE (rv == EOWNERDEAD))
- rv = pthread_mutex_consistent (&mq->q.shr->mutex);
- return rv;
+ if (mq->q.evtfd == -1)
+ {
+ int rv = pthread_mutex_trylock (&mq->q.shr->mutex);
+ if (PREDICT_FALSE (rv == EOWNERDEAD))
+ rv = pthread_mutex_consistent (&mq->q.shr->mutex);
+ return rv;
+ }
+ else
+ {
+ return !clib_spinlock_trylock (&mq->q.lock);
+ }
}
/**
@@ -333,10 +366,18 @@ svm_msg_q_try_lock (svm_msg_q_t * mq)
static inline int
svm_msg_q_lock (svm_msg_q_t * mq)
{
- int rv = pthread_mutex_lock (&mq->q.shr->mutex);
- if (PREDICT_FALSE (rv == EOWNERDEAD))
- rv = pthread_mutex_consistent (&mq->q.shr->mutex);
- return rv;
+ if (mq->q.evtfd == -1)
+ {
+ int rv = pthread_mutex_lock (&mq->q.shr->mutex);
+ if (PREDICT_FALSE (rv == EOWNERDEAD))
+ rv = pthread_mutex_consistent (&mq->q.shr->mutex);
+ return rv;
+ }
+ else
+ {
+ clib_spinlock_lock (&mq->q.lock);
+ return 0;
+ }
}
/**
@@ -345,7 +386,14 @@ svm_msg_q_lock (svm_msg_q_t * mq)
static inline void
svm_msg_q_unlock (svm_msg_q_t * mq)
{
- pthread_mutex_unlock (&mq->q.shr->mutex);
+ if (mq->q.evtfd == -1)
+ {
+ pthread_mutex_unlock (&mq->q.shr->mutex);
+ }
+ else
+ {
+ clib_spinlock_unlock (&mq->q.lock);
+ }
}
/**
@@ -354,7 +402,7 @@ svm_msg_q_unlock (svm_msg_q_t * mq)
* Must be called with mutex held. The queue only works non-blocking
* with eventfds, so handle blocking calls as an exception here.
*/
-void svm_msg_q_wait (svm_msg_q_t *mq);
+int svm_msg_q_wait (svm_msg_q_t *mq, svm_msg_q_wait_type_t type);
/**
* Timed wait for message queue event