aboutsummaryrefslogtreecommitdiffstats
path: root/lib/librte_ring/rte_ring_c11_mem.h
diff options
context:
space:
mode:
Diffstat (limited to 'lib/librte_ring/rte_ring_c11_mem.h')
-rw-r--r--lib/librte_ring/rte_ring_c11_mem.h24
1 files changed, 18 insertions, 6 deletions
diff --git a/lib/librte_ring/rte_ring_c11_mem.h b/lib/librte_ring/rte_ring_c11_mem.h
index 94df3c4a..7bc74a4c 100644
--- a/lib/librte_ring/rte_ring_c11_mem.h
+++ b/lib/librte_ring/rte_ring_c11_mem.h
@@ -57,23 +57,27 @@ __rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp,
uint32_t *free_entries)
{
const uint32_t capacity = r->capacity;
+ uint32_t cons_tail;
unsigned int max = n;
int success;
+ *old_head = __atomic_load_n(&r->prod.head, __ATOMIC_ACQUIRE);
do {
/* Reset n to the initial burst count */
n = max;
- *old_head = __atomic_load_n(&r->prod.head,
+ /* load-acquire synchronize with store-release of ht->tail
+ * in update_tail.
+ */
+ cons_tail = __atomic_load_n(&r->cons.tail,
__ATOMIC_ACQUIRE);
- /*
- * The subtraction is done between two unsigned 32bits value
+ /* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
* *old_head > cons_tail). So 'free_entries' is always between 0
* and capacity (which is < size).
*/
- *free_entries = (capacity + r->cons.tail - *old_head);
+ *free_entries = (capacity + cons_tail - *old_head);
/* check that we have enough room in ring */
if (unlikely(n > *free_entries))
@@ -87,6 +91,7 @@ __rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp,
if (is_sp)
r->prod.head = *new_head, success = 1;
else
+ /* on failure, *old_head is updated */
success = __atomic_compare_exchange_n(&r->prod.head,
old_head, *new_head,
0, __ATOMIC_ACQUIRE,
@@ -125,13 +130,19 @@ __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
uint32_t *entries)
{
unsigned int max = n;
+ uint32_t prod_tail;
int success;
/* move cons.head atomically */
+ *old_head = __atomic_load_n(&r->cons.head, __ATOMIC_ACQUIRE);
do {
/* Restore n as it may change every loop */
n = max;
- *old_head = __atomic_load_n(&r->cons.head,
+
+ /* this load-acquire synchronize with store-release of ht->tail
+ * in update_tail.
+ */
+ prod_tail = __atomic_load_n(&r->prod.tail,
__ATOMIC_ACQUIRE);
/* The subtraction is done between two unsigned 32bits value
@@ -139,7 +150,7 @@ __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
* cons_head > prod_tail). So 'entries' is always between 0
* and size(ring)-1.
*/
- *entries = (r->prod.tail - *old_head);
+ *entries = (prod_tail - *old_head);
/* Set the actual entries for dequeue */
if (n > *entries)
@@ -152,6 +163,7 @@ __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
if (is_sc)
r->cons.head = *new_head, success = 1;
else
+ /* on failure, *old_head will be updated */
success = __atomic_compare_exchange_n(&r->cons.head,
old_head, *new_head,
0, __ATOMIC_ACQUIRE,