summaryrefslogtreecommitdiffstats
path: root/src/svm
diff options
context:
space:
mode:
authorFlorin Coras <fcoras@cisco.com>2018-07-04 04:15:05 -0700
committerDamjan Marion <dmarion@me.com>2018-07-17 09:02:17 +0000
commit3c2fed5145d9e40a9ecd178c2866c813eddc6203 (patch)
tree7ff2408f3b1c4a52fb6d7cd091508de1ce950e5f /src/svm
parent5da96a77a84ae5414debbc46d390464d51010113 (diff)
session: use msg queue for events
Change-Id: I3c58367eec2243fe19b75be78a175c5261863e9e Signed-off-by: Florin Coras <fcoras@cisco.com>
Diffstat (limited to 'src/svm')
-rw-r--r--src/svm/message_queue.c102
-rw-r--r--src/svm/message_queue.h157
-rw-r--r--src/svm/queue.c18
-rw-r--r--src/svm/queue.h8
-rw-r--r--src/svm/svm_fifo.h6
-rw-r--r--src/svm/test_svm_message_queue.c4
6 files changed, 255 insertions, 40 deletions
diff --git a/src/svm/message_queue.c b/src/svm/message_queue.c
index 4f3e7642740..89411143c12 100644
--- a/src/svm/message_queue.c
+++ b/src/svm/message_queue.c
@@ -16,6 +16,25 @@
#include <svm/message_queue.h>
#include <vppinfra/mem.h>
+static inline svm_msg_q_ring_t *
+svm_msg_q_ring_inline (svm_msg_q_t * mq, u32 ring_index)
+{
+ return vec_elt_at_index (mq->rings, ring_index);
+}
+
+svm_msg_q_ring_t *
+svm_msg_q_ring (svm_msg_q_t * mq, u32 ring_index)
+{
+ return svm_msg_q_ring_inline (mq, ring_index);
+}
+
+static inline void *
+svm_msg_q_ring_data (svm_msg_q_ring_t * ring, u32 elt_index)
+{
+ ASSERT (elt_index < ring->nitems);
+ return (ring->data + elt_index * ring->elsize);
+}
+
svm_msg_q_t *
svm_msg_q_alloc (svm_msg_q_cfg_t * cfg)
{
@@ -63,6 +82,53 @@ svm_msg_q_free (svm_msg_q_t * mq)
}
svm_msg_q_msg_t
+svm_msg_q_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index)
+{
+ svm_msg_q_msg_t msg = {.as_u64 = ~0 };
+ svm_msg_q_ring_t *ring = svm_msg_q_ring_inline (mq, ring_index);
+
+ ASSERT (ring->cursize != ring->nitems);
+ msg.ring_index = ring - mq->rings;
+ msg.elt_index = ring->tail;
+ ring->tail = (ring->tail + 1) % ring->nitems;
+ __sync_fetch_and_add (&ring->cursize, 1);
+ return msg;
+}
+
+int
+svm_msg_q_lock_and_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index,
+ u8 noblock, svm_msg_q_msg_t * msg)
+{
+ if (noblock)
+ {
+ if (svm_msg_q_try_lock (mq))
+ return -1;
+ if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, ring_index)))
+ {
+ svm_msg_q_unlock (mq);
+ return -2;
+ }
+ *msg = svm_msg_q_alloc_msg_w_ring (mq, ring_index);
+ if (PREDICT_FALSE (svm_msg_q_msg_is_invalid (msg)))
+ {
+ svm_msg_q_unlock (mq);
+ return -2;
+ }
+ }
+ else
+ {
+ svm_msg_q_lock (mq);
+ *msg = svm_msg_q_alloc_msg_w_ring (mq, ring_index);
+ while (svm_msg_q_msg_is_invalid (msg))
+ {
+ svm_msg_q_wait (mq);
+ *msg = svm_msg_q_alloc_msg_w_ring (mq, ring_index);
+ }
+ }
+ return 0;
+}
+
+svm_msg_q_msg_t
svm_msg_q_alloc_msg (svm_msg_q_t * mq, u32 nbytes)
{
svm_msg_q_msg_t msg = {.as_u64 = ~0 };
@@ -81,23 +147,10 @@ svm_msg_q_alloc_msg (svm_msg_q_t * mq, u32 nbytes)
return msg;
}
-static inline svm_msg_q_ring_t *
-svm_msg_q_get_ring (svm_msg_q_t * mq, u32 ring_index)
-{
- return vec_elt_at_index (mq->rings, ring_index);
-}
-
-static inline void *
-svm_msg_q_ring_data (svm_msg_q_ring_t * ring, u32 elt_index)
-{
- ASSERT (elt_index < ring->nitems);
- return (ring->data + elt_index * ring->elsize);
-}
-
void *
svm_msg_q_msg_data (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
{
- svm_msg_q_ring_t *ring = svm_msg_q_get_ring (mq, msg->ring_index);
+ svm_msg_q_ring_t *ring = svm_msg_q_ring_inline (mq, msg->ring_index);
return svm_msg_q_ring_data (ring, msg->elt_index);
}
@@ -131,7 +184,7 @@ svm_msq_q_msg_is_valid (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
return 0;
ring = &mq->rings[msg->ring_index];
- dist1 = ((ring->nitems + msg->ring_index) - ring->head) % ring->nitems;
+ dist1 = ((ring->nitems + msg->elt_index) - ring->head) % ring->nitems;
if (ring->tail == ring->head)
dist2 = (ring->cursize == 0) ? 0 : ring->nitems;
else
@@ -140,10 +193,17 @@ svm_msq_q_msg_is_valid (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
}
int
-svm_msg_q_add (svm_msg_q_t * mq, svm_msg_q_msg_t msg, int nowait)
+svm_msg_q_add (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, int nowait)
+{
+ ASSERT (svm_msq_q_msg_is_valid (mq, msg));
+ return svm_queue_add (mq->q, (u8 *) msg, nowait);
+}
+
+void
+svm_msg_q_add_w_lock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
{
- ASSERT (svm_msq_q_msg_is_valid (mq, &msg));
- return svm_queue_add (mq->q, (u8 *) & msg, nowait);
+ ASSERT (svm_msq_q_msg_is_valid (mq, msg));
+ svm_queue_add_raw (mq->q, (u8 *) msg);
}
int
@@ -153,6 +213,12 @@ svm_msg_q_sub (svm_msg_q_t * mq, svm_msg_q_msg_t * msg,
return svm_queue_sub (mq->q, (u8 *) msg, cond, time);
}
+void
+svm_msg_q_sub_w_lock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
+{
+ svm_queue_sub_raw (mq->q, (u8 *) msg);
+}
+
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/src/svm/message_queue.h b/src/svm/message_queue.h
index 5ec8547016e..708a03d716e 100644
--- a/src/svm/message_queue.h
+++ b/src/svm/message_queue.h
@@ -21,14 +21,15 @@
#define SRC_SVM_MESSAGE_QUEUE_H_
#include <vppinfra/clib.h>
+#include <vppinfra/error.h>
#include <svm/queue.h>
typedef struct svm_msg_q_ring_
{
volatile u32 cursize; /**< current size of the ring */
u32 nitems; /**< max size of the ring */
- u32 head; /**< current head (for dequeue) */
- u32 tail; /**< current tail (for enqueue) */
+ volatile u32 head; /**< current head (for dequeue) */
+ volatile u32 tail; /**< current tail (for enqueue) */
u32 elsize; /**< size of an element */
u8 *data; /**< chunk of memory for msg data */
} svm_msg_q_ring_t;
@@ -64,6 +65,7 @@ typedef union
u64 as_u64;
} svm_msg_q_msg_t;
+#define SVM_MQ_INVALID_MSG { .as_u64 = ~0 }
/**
* Allocate message queue
*
@@ -98,6 +100,36 @@ void svm_msg_q_free (svm_msg_q_t * mq);
svm_msg_q_msg_t svm_msg_q_alloc_msg (svm_msg_q_t * mq, u32 nbytes);
/**
+ * Allocate message buffer on ring
+ *
+ * Message is allocated, on requested ring. The caller MUST check that
+ * the ring is not full.
+ *
+ * @param mq message queue
+ * @param ring_index ring on which the allocation should occur
+ * @return message structure pointing to the ring and position
+ * allocated
+ */
+svm_msg_q_msg_t svm_msg_q_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index);
+
+/**
+ * Lock message queue and allocate message buffer on ring
+ *
+ * This should be used when multiple writers/readers are expected to
+ * compete for the rings/queue. Message should be enqueued by calling
+ * @ref svm_msg_q_add_w_lock and the caller MUST unlock the queue once
+ * the message in enqueued.
+ *
+ * @param mq message queue
+ * @param ring_index ring on which the allocation should occur
+ * @param noblock flag that indicates if request should block
+ * @param msg pointer to message to be filled in
+ * @return 0 on success, negative number otherwise
+ */
+int svm_msg_q_lock_and_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index,
+ u8 noblock, svm_msg_q_msg_t * msg);
+
+/**
* Free message buffer
*
* Marks message buffer on ring as free.
@@ -106,6 +138,7 @@ svm_msg_q_msg_t svm_msg_q_alloc_msg (svm_msg_q_t * mq, u32 nbytes);
* @param msg message to be freed
*/
void svm_msg_q_free_msg (svm_msg_q_t * mq, svm_msg_q_msg_t * msg);
+
/**
* Producer enqueue one message to queue
*
@@ -117,7 +150,20 @@ void svm_msg_q_free_msg (svm_msg_q_t * mq, svm_msg_q_msg_t * msg);
* @param nowait flag to indicate if request is blocking or not
* @return success status
*/
-int svm_msg_q_add (svm_msg_q_t * mq, svm_msg_q_msg_t msg, int nowait);
+int svm_msg_q_add (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, int nowait);
+
+/**
+ * Producer enqueue one message to queue with mutex held
+ *
+ * Prior to calling this, the producer should've obtained a message buffer
+ * from one of the rings by calling @ref svm_msg_q_alloc_msg. It assumes
+ * the queue mutex is held.
+ *
+ * @param mq message queue
+ * @param msg message (pointer to ring position) to be enqueued
+ * @return success status
+ */
+void svm_msg_q_add_w_lock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg);
/**
* Consumer dequeue one message from queue
@@ -129,13 +175,28 @@ int svm_msg_q_add (svm_msg_q_t * mq, svm_msg_q_msg_t msg, int nowait);
* @param mq message queue
* @param msg pointer to structure where message is to be received
* @param cond flag that indicates if request should block or not
+ * @param time time to wait if condition it SVM_Q_TIMEDWAIT
* @return success status
*/
int svm_msg_q_sub (svm_msg_q_t * mq, svm_msg_q_msg_t * msg,
svm_q_conditional_wait_t cond, u32 time);
/**
- * Get data for message in queu
+ * Consumer dequeue one message from queue with mutex held
+ *
+ * Returns the message pointing to the data in the message rings under the
+ * assumption that the message queue lock is already held. The consumer is
+ * expected to call @ref svm_msg_q_free_msg once it finishes
+ * processing/copies the message data.
+ *
+ * @param mq message queue
+ * @param msg pointer to structure where message is to be received
+ * @return success status
+ */
+void svm_msg_q_sub_w_lock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg);
+
+/**
+ * Get data for message in queue
*
* @param mq message queue
* @param msg message for which the data is requested
@@ -143,6 +204,94 @@ int svm_msg_q_sub (svm_msg_q_t * mq, svm_msg_q_msg_t * msg,
*/
void *svm_msg_q_msg_data (svm_msg_q_t * mq, svm_msg_q_msg_t * msg);
+/**
+ * Get message queue ring
+ *
+ * @param mq message queue
+ * @param ring_index index of ring
+ * @return pointer to ring
+ */
+svm_msg_q_ring_t *svm_msg_q_ring (svm_msg_q_t * mq, u32 ring_index);
+
+/**
+ * Check if message queue is full
+ */
+static inline u8
+svm_msg_q_is_full (svm_msg_q_t * mq)
+{
+ return (mq->q->cursize == mq->q->maxsize);
+}
+
+static inline u8
+svm_msg_q_ring_is_full (svm_msg_q_t * mq, u32 ring_index)
+{
+ ASSERT (ring_index < vec_len (mq->rings));
+ return (mq->rings[ring_index].cursize == mq->rings[ring_index].nitems);
+}
+
+/**
+ * Check if message queue is empty
+ */
+static inline u8
+svm_msg_q_is_empty (svm_msg_q_t * mq)
+{
+ return (mq->q->cursize == 0);
+}
+
+/**
+ * Check length of message queue
+ */
+static inline u32
+svm_msg_q_size (svm_msg_q_t * mq)
+{
+ return mq->q->cursize;
+}
+
+/**
+ * Check if message is invalid
+ */
+static inline u8
+svm_msg_q_msg_is_invalid (svm_msg_q_msg_t * msg)
+{
+ return (msg->as_u64 == (u64) ~ 0);
+}
+
+/**
+ * Try locking message queue
+ */
+static inline int
+svm_msg_q_try_lock (svm_msg_q_t * mq)
+{
+ return pthread_mutex_trylock (&mq->q->mutex);
+}
+
+/**
+ * Lock, or block trying, the message queue
+ */
+static inline int
+svm_msg_q_lock (svm_msg_q_t * mq)
+{
+ return pthread_mutex_lock (&mq->q->mutex);
+}
+
+static inline void
+svm_msg_q_wait (svm_msg_q_t * mq)
+{
+ pthread_cond_wait (&mq->q->condvar, &mq->q->mutex);
+}
+
+/**
+ * Unlock message queue
+ */
+static inline void
+svm_msg_q_unlock (svm_msg_q_t * mq)
+{
+ /* The other side of the connection is not polling */
+ if (mq->q->cursize < (mq->q->maxsize / 8))
+ (void) pthread_cond_broadcast (&mq->q->condvar);
+ pthread_mutex_unlock (&mq->q->mutex);
+}
+
#endif /* SRC_SVM_MESSAGE_QUEUE_H_ */
/*
diff --git a/src/svm/queue.c b/src/svm/queue.c
index 96e40fc2aec..8e18f5832e3 100644
--- a/src/svm/queue.c
+++ b/src/svm/queue.c
@@ -154,26 +154,16 @@ svm_queue_add_nolock (svm_queue_t * q, u8 * elem)
return 0;
}
-int
+void
svm_queue_add_raw (svm_queue_t * q, u8 * elem)
{
i8 *tailp;
- if (PREDICT_FALSE (q->cursize == q->maxsize))
- {
- while (q->cursize == q->maxsize)
- ;
- }
-
tailp = (i8 *) (&q->data[0] + q->elsize * q->tail);
clib_memcpy (tailp, elem, q->elsize);
- q->tail++;
+ q->tail = (q->tail + 1) % q->maxsize;
q->cursize++;
-
- if (q->tail == q->maxsize)
- q->tail = 0;
- return 0;
}
@@ -414,11 +404,9 @@ svm_queue_sub_raw (svm_queue_t * q, u8 * elem)
headp = (i8 *) (&q->data[0] + q->elsize * q->head);
clib_memcpy (elem, headp, q->elsize);
- q->head++;
+ q->head = (q->head + 1) % q->maxsize;
q->cursize--;
- if (q->head == q->maxsize)
- q->head = 0;
return 0;
}
diff --git a/src/svm/queue.h b/src/svm/queue.h
index 856c17237d1..68a63d769b6 100644
--- a/src/svm/queue.h
+++ b/src/svm/queue.h
@@ -69,7 +69,13 @@ void svm_queue_unlock (svm_queue_t * q);
int svm_queue_is_full (svm_queue_t * q);
int svm_queue_add_nolock (svm_queue_t * q, u8 * elem);
int svm_queue_sub_raw (svm_queue_t * q, u8 * elem);
-int svm_queue_add_raw (svm_queue_t * q, u8 * elem);
+
+/**
+ * Add element to queue with mutex held
+ * @param q queue
+ * @param elem pointer element data to add
+ */
+void svm_queue_add_raw (svm_queue_t * q, u8 * elem);
/*
* DEPRECATED please use svm_queue_t instead
diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h
index 0d5a08b86ae..40182901db5 100644
--- a/src/svm/svm_fifo.h
+++ b/src/svm/svm_fifo.h
@@ -107,6 +107,12 @@ svm_fifo_max_dequeue (svm_fifo_t * f)
return f->cursize;
}
+static inline int
+svm_fifo_is_full (svm_fifo_t * f)
+{
+ return (f->cursize == f->nitems);
+}
+
static inline u32
svm_fifo_max_enqueue (svm_fifo_t * f)
{
diff --git a/src/svm/test_svm_message_queue.c b/src/svm/test_svm_message_queue.c
index 69ffd131ac2..758163ffeab 100644
--- a/src/svm/test_svm_message_queue.c
+++ b/src/svm/test_svm_message_queue.c
@@ -88,9 +88,9 @@ test1 (int verbose)
test1_error ("failed: msg alloc3");
*(u32 *)svm_msg_q_msg_data (mq, &msg2) = 123;
- svm_msg_q_add (mq, msg2, SVM_Q_NOWAIT);
+ svm_msg_q_add (mq, &msg2, SVM_Q_NOWAIT);
for (i = 0; i < 12; i++)
- svm_msg_q_add (mq, msg[i], SVM_Q_NOWAIT);
+ svm_msg_q_add (mq, &msg[i], SVM_Q_NOWAIT);
if (svm_msg_q_sub (mq, &msg2, SVM_Q_NOWAIT, 0))
test1_error ("failed: dequeue1");