summaryrefslogtreecommitdiffstats
path: root/src/svm
diff options
context:
space:
mode:
authorFlorin Coras <fcoras@cisco.com>2018-09-11 00:10:41 -0700
committerMarco Varlese <marco.varlese@suse.de>2018-09-12 09:13:16 +0000
commit41c9e04be0ca3a081926045e78dc969dab563532 (patch)
tree2b0e666792415ebd8fc3fed5847652321231e4a8 /src/svm
parentffb14b9554afa1e58c3657e0c91dda3135008274 (diff)
vcl: improve read and fifo event handling
Change-Id: Ic1c51818b8aa8dbd164e70bb3b7471868e5af6f6 Signed-off-by: Florin Coras <fcoras@cisco.com>
Diffstat (limited to 'src/svm')
-rw-r--r--src/svm/message_queue.c4
-rw-r--r--src/svm/svm_fifo.h10
2 files changed, 9 insertions, 5 deletions
diff --git a/src/svm/message_queue.c b/src/svm/message_queue.c
index d6a77e783e3..a73a56d8044 100644
--- a/src/svm/message_queue.c
+++ b/src/svm/message_queue.c
@@ -173,8 +173,7 @@ svm_msg_q_free_msg (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
{
svm_msg_q_ring_t *ring;
- if (vec_len (mq->rings) <= msg->ring_index)
- return;
+ ASSERT (vec_len (mq->rings) > msg->ring_index);
ring = &mq->rings[msg->ring_index];
if (msg->elt_index == ring->head)
{
@@ -182,6 +181,7 @@ svm_msg_q_free_msg (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
}
else
{
+ clib_warning ("message out of order");
/* for now, expect messages to be processed in order */
ASSERT (0);
}
diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h
index 40242614829..a8aea00996e 100644
--- a/src/svm/svm_fifo.h
+++ b/src/svm/svm_fifo.h
@@ -140,22 +140,26 @@ svm_fifo_has_ooo_data (svm_fifo_t * f)
/**
* Sets fifo event flag.
*
+ * Also acts as a release barrier.
+ *
* @return 1 if flag was not set.
*/
always_inline u8
svm_fifo_set_event (svm_fifo_t * f)
{
- /* Probably doesn't need to be atomic. Still, better avoid surprises */
- return __sync_lock_test_and_set (&f->has_event, 1) == 0;
+// return __sync_lock_test_and_set (&f->has_event, 1) == 0;
+// return __sync_bool_compare_and_swap (&f->has_event, 0, 1);
+ return !__atomic_exchange_n (&f->has_event, 1, __ATOMIC_RELEASE);
}
/**
* Unsets fifo event flag.
+ *
+ * Also acts as a release barrier.
*/
always_inline void
svm_fifo_unset_event (svm_fifo_t * f)
{
- /* Probably doesn't need to be atomic. Still, better avoid surprises */
__sync_lock_release (&f->has_event);
}