aboutsummaryrefslogtreecommitdiffstats
path: root/lib/librte_ring/rte_ring.h
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2017-05-16 14:51:32 +0200
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2017-05-16 16:20:45 +0200
commit7595afa4d30097c1177b69257118d8ad89a539be (patch)
tree4bfeadc905c977e45e54a90c42330553b8942e4e /lib/librte_ring/rte_ring.h
parentce3d555e43e3795b5d9507fcfc76b7a0a92fd0d6 (diff)
Imported Upstream version 17.05
Change-Id: Id1e419c5a214e4a18739663b91f0f9a549f1fdc6 Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'lib/librte_ring/rte_ring.h')
-rw-r--r--lib/librte_ring/rte_ring.h809
1 files changed, 324 insertions, 485 deletions
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 32b8c8d2..97f025a1 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -106,38 +106,30 @@ extern "C" {
enum rte_ring_queue_behavior {
RTE_RING_QUEUE_FIXED = 0, /* Enq/Deq a fixed number of items from a ring */
- RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items a possible from ring */
+ RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items as possible from ring */
};
-#ifdef RTE_LIBRTE_RING_DEBUG
-/**
- * A structure that stores the ring statistics (per-lcore).
- */
-struct rte_ring_debug_stats {
- uint64_t enq_success_bulk; /**< Successful enqueues number. */
- uint64_t enq_success_objs; /**< Objects successfully enqueued. */
- uint64_t enq_quota_bulk; /**< Successful enqueues above watermark. */
- uint64_t enq_quota_objs; /**< Objects enqueued above watermark. */
- uint64_t enq_fail_bulk; /**< Failed enqueues number. */
- uint64_t enq_fail_objs; /**< Objects that failed to be enqueued. */
- uint64_t deq_success_bulk; /**< Successful dequeues number. */
- uint64_t deq_success_objs; /**< Objects successfully dequeued. */
- uint64_t deq_fail_bulk; /**< Failed dequeues number. */
- uint64_t deq_fail_objs; /**< Objects that failed to be dequeued. */
-} __rte_cache_aligned;
-#endif
-
#define RTE_RING_MZ_PREFIX "RG_"
/**< The maximum length of a ring name. */
#define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
sizeof(RTE_RING_MZ_PREFIX) + 1)
-#ifndef RTE_RING_PAUSE_REP_COUNT
-#define RTE_RING_PAUSE_REP_COUNT 0 /**< Yield after pause num of times, no yield
- * if RTE_RING_PAUSE_REP not defined. */
+struct rte_memzone; /* forward declaration, so as not to require memzone.h */
+
+#if RTE_CACHE_LINE_SIZE < 128
+#define PROD_ALIGN (RTE_CACHE_LINE_SIZE * 2)
+#define CONS_ALIGN (RTE_CACHE_LINE_SIZE * 2)
+#else
+#define PROD_ALIGN RTE_CACHE_LINE_SIZE
+#define CONS_ALIGN RTE_CACHE_LINE_SIZE
#endif
-struct rte_memzone; /* forward declaration, so as not to require memzone.h */
+/* structure to hold a pair of head/tail values and other metadata */
+struct rte_ring_headtail {
+ volatile uint32_t head; /**< Prod/consumer head. */
+ volatile uint32_t tail; /**< Prod/consumer tail. */
+ uint32_t single; /**< True if single prod/cons */
+};
/**
* An RTE ring structure.
@@ -155,68 +147,29 @@ struct rte_ring {
* compatibility requirements, it could be changed to RTE_RING_NAMESIZE
* next time the ABI changes
*/
- char name[RTE_MEMZONE_NAMESIZE]; /**< Name of the ring. */
- int flags; /**< Flags supplied at creation. */
+ char name[RTE_MEMZONE_NAMESIZE] __rte_cache_aligned; /**< Name of the ring. */
+ int flags; /**< Flags supplied at creation. */
const struct rte_memzone *memzone;
/**< Memzone, if any, containing the rte_ring */
+ uint32_t size; /**< Size of ring. */
+ uint32_t mask; /**< Mask (size-1) of ring. */
/** Ring producer status. */
- struct prod {
- uint32_t watermark; /**< Maximum items before EDQUOT. */
- uint32_t sp_enqueue; /**< True, if single producer. */
- uint32_t size; /**< Size of ring. */
- uint32_t mask; /**< Mask (size-1) of ring. */
- volatile uint32_t head; /**< Producer head. */
- volatile uint32_t tail; /**< Producer tail. */
- } prod __rte_cache_aligned;
+ struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN);
/** Ring consumer status. */
- struct cons {
- uint32_t sc_dequeue; /**< True, if single consumer. */
- uint32_t size; /**< Size of the ring. */
- uint32_t mask; /**< Mask (size-1) of ring. */
- volatile uint32_t head; /**< Consumer head. */
- volatile uint32_t tail; /**< Consumer tail. */
-#ifdef RTE_RING_SPLIT_PROD_CONS
- } cons __rte_cache_aligned;
-#else
- } cons;
-#endif
-
-#ifdef RTE_LIBRTE_RING_DEBUG
- struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
-#endif
-
- void *ring[] __rte_cache_aligned; /**< Memory space of ring starts here.
- * not volatile so need to be careful
- * about compiler re-ordering */
+ struct rte_ring_headtail cons __rte_aligned(CONS_ALIGN);
};
#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
-#define RTE_RING_QUOT_EXCEED (1 << 31) /**< Quota exceed for burst ops */
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */
-/**
- * @internal When debug is enabled, store ring statistics.
- * @param r
- * A pointer to the ring.
- * @param name
- * The name of the statistics field to increment in the ring.
- * @param n
- * The number to add to the object-oriented statistics.
- */
-#ifdef RTE_LIBRTE_RING_DEBUG
-#define __RING_STAT_ADD(r, name, n) do { \
- unsigned __lcore_id = rte_lcore_id(); \
- if (__lcore_id < RTE_MAX_LCORE) { \
- r->stats[__lcore_id].name##_objs += n; \
- r->stats[__lcore_id].name##_bulk += 1; \
- } \
- } while(0)
-#else
-#define __RING_STAT_ADD(r, name, n) do {} while(0)
-#endif
+/* @internal defines for passing to the enqueue dequeue worker functions */
+#define __IS_SP 1
+#define __IS_MP 0
+#define __IS_SC 1
+#define __IS_MC 0
/**
* Calculate the memory size needed for a ring
@@ -321,26 +274,6 @@ struct rte_ring *rte_ring_create(const char *name, unsigned count,
void rte_ring_free(struct rte_ring *r);
/**
- * Change the high water mark.
- *
- * If *count* is 0, water marking is disabled. Otherwise, it is set to the
- * *count* value. The *count* value must be greater than 0 and less
- * than the ring size.
- *
- * This function can be called at any time (not necessarily at
- * initialization).
- *
- * @param r
- * A pointer to the ring structure.
- * @param count
- * The new water mark value.
- * @return
- * - 0: Success; water mark changed.
- * - -EINVAL: Invalid water mark value.
- */
-int rte_ring_set_water_mark(struct rte_ring *r, unsigned count);
-
-/**
* Dump the status of the ring to a file.
*
* @param f
@@ -353,171 +286,147 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
/* the actual enqueue of pointers on the ring.
* Placed here since identical code needed in both
* single and multi producer enqueue functions */
-#define ENQUEUE_PTRS() do { \
- const uint32_t size = r->prod.size; \
- uint32_t idx = prod_head & mask; \
+#define ENQUEUE_PTRS(r, ring_start, prod_head, obj_table, n, obj_type) do { \
+ unsigned int i; \
+ const uint32_t size = (r)->size; \
+ uint32_t idx = prod_head & (r)->mask; \
+ obj_type *ring = (obj_type *)ring_start; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
- r->ring[idx] = obj_table[i]; \
- r->ring[idx+1] = obj_table[i+1]; \
- r->ring[idx+2] = obj_table[i+2]; \
- r->ring[idx+3] = obj_table[i+3]; \
+ ring[idx] = obj_table[i]; \
+ ring[idx+1] = obj_table[i+1]; \
+ ring[idx+2] = obj_table[i+2]; \
+ ring[idx+3] = obj_table[i+3]; \
} \
switch (n & 0x3) { \
- case 3: r->ring[idx++] = obj_table[i++]; \
- case 2: r->ring[idx++] = obj_table[i++]; \
- case 1: r->ring[idx++] = obj_table[i++]; \
+ case 3: \
+ ring[idx++] = obj_table[i++]; /* fallthrough */ \
+ case 2: \
+ ring[idx++] = obj_table[i++]; /* fallthrough */ \
+ case 1: \
+ ring[idx++] = obj_table[i++]; \
} \
} else { \
for (i = 0; idx < size; i++, idx++)\
- r->ring[idx] = obj_table[i]; \
+ ring[idx] = obj_table[i]; \
for (idx = 0; i < n; i++, idx++) \
- r->ring[idx] = obj_table[i]; \
+ ring[idx] = obj_table[i]; \
} \
-} while(0)
+} while (0)
/* the actual copy of pointers on the ring to obj_table.
* Placed here since identical code needed in both
* single and multi consumer dequeue functions */
-#define DEQUEUE_PTRS() do { \
- uint32_t idx = cons_head & mask; \
- const uint32_t size = r->cons.size; \
+#define DEQUEUE_PTRS(r, ring_start, cons_head, obj_table, n, obj_type) do { \
+ unsigned int i; \
+ uint32_t idx = cons_head & (r)->mask; \
+ const uint32_t size = (r)->size; \
+ obj_type *ring = (obj_type *)ring_start; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
- obj_table[i] = r->ring[idx]; \
- obj_table[i+1] = r->ring[idx+1]; \
- obj_table[i+2] = r->ring[idx+2]; \
- obj_table[i+3] = r->ring[idx+3]; \
+ obj_table[i] = ring[idx]; \
+ obj_table[i+1] = ring[idx+1]; \
+ obj_table[i+2] = ring[idx+2]; \
+ obj_table[i+3] = ring[idx+3]; \
} \
switch (n & 0x3) { \
- case 3: obj_table[i++] = r->ring[idx++]; \
- case 2: obj_table[i++] = r->ring[idx++]; \
- case 1: obj_table[i++] = r->ring[idx++]; \
+ case 3: \
+ obj_table[i++] = ring[idx++]; /* fallthrough */ \
+ case 2: \
+ obj_table[i++] = ring[idx++]; /* fallthrough */ \
+ case 1: \
+ obj_table[i++] = ring[idx++]; \
} \
} else { \
for (i = 0; idx < size; i++, idx++) \
- obj_table[i] = r->ring[idx]; \
+ obj_table[i] = ring[idx]; \
for (idx = 0; i < n; i++, idx++) \
- obj_table[i] = r->ring[idx]; \
+ obj_table[i] = ring[idx]; \
} \
} while (0)
+static inline __attribute__((always_inline)) void
+update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
+ uint32_t single)
+{
+ /*
+ * If there are other enqueues/dequeues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ if (!single)
+ while (unlikely(ht->tail != old_val))
+ rte_pause();
+
+ ht->tail = new_val;
+}
+
/**
- * @internal Enqueue several objects on the ring (multi-producers safe).
- *
- * This function uses a "compare and set" instruction to move the
- * producer index atomically.
+ * @internal This function updates the producer head for enqueue
*
* @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects).
+ * A pointer to the ring structure
+ * @param is_sp
+ * Indicates whether multi-producer path is needed or not
* @param n
- * The number of objects to add in the ring from the obj_table.
+ * The number of elements we will want to enqueue, i.e. how far should the
+ * head be moved
* @param behavior
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param old_head
+ * Returns head value as it was before the move, i.e. where enqueue starts
+ * @param new_head
+ * Returns the current/new head value i.e. where enqueue finishes
+ * @param free_entries
+ * Returns the amount of free space in the ring BEFORE head was moved
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects enqueued.
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
-__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ uint32_t *old_head, uint32_t *new_head,
+ uint32_t *free_entries)
{
- uint32_t prod_head, prod_next;
- uint32_t cons_tail, free_entries;
- const unsigned max = n;
+ const uint32_t mask = r->mask;
+ unsigned int max = n;
int success;
- unsigned i, rep = 0;
- uint32_t mask = r->prod.mask;
- int ret;
-
- /* Avoid the unnecessary cmpset operation below, which is also
- * potentially harmful when n equals 0. */
- if (n == 0)
- return 0;
- /* move prod.head atomically */
do {
/* Reset n to the initial burst count */
n = max;
- prod_head = r->prod.head;
- cons_tail = r->cons.tail;
+ *old_head = r->prod.head;
+ const uint32_t cons_tail = r->cons.tail;
/* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
- * prod_head > cons_tail). So 'free_entries' is always between 0
+ * *old_head > cons_tail). So 'free_entries' is always between 0
* and size(ring)-1. */
- free_entries = (mask + cons_tail - prod_head);
+ *free_entries = (mask + cons_tail - *old_head);
/* check that we have enough room in ring */
- if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, enq_fail, n);
- return -ENOBUFS;
- }
- else {
- /* No free entry available */
- if (unlikely(free_entries == 0)) {
- __RING_STAT_ADD(r, enq_fail, n);
- return 0;
- }
-
- n = free_entries;
- }
- }
-
- prod_next = prod_head + n;
- success = rte_atomic32_cmpset(&r->prod.head, prod_head,
- prod_next);
+ if (unlikely(n > *free_entries))
+ n = (behavior == RTE_RING_QUEUE_FIXED) ?
+ 0 : *free_entries;
+
+ if (n == 0)
+ return 0;
+
+ *new_head = *old_head + n;
+ if (is_sp)
+ r->prod.head = *new_head, success = 1;
+ else
+ success = rte_atomic32_cmpset(&r->prod.head,
+ *old_head, *new_head);
} while (unlikely(success == 0));
-
- /* write entries in ring */
- ENQUEUE_PTRS();
- rte_smp_wmb();
-
- /* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
- (int)(n | RTE_RING_QUOT_EXCEED);
- __RING_STAT_ADD(r, enq_quota, n);
- }
- else {
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
- __RING_STAT_ADD(r, enq_success, n);
- }
-
- /*
- * If there are other enqueues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->prod.tail != prod_head)) {
- rte_pause();
-
- /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
- * for other thread finish. It gives pre-empted thread a chance
- * to proceed and finish with ring dequeue operation. */
- if (RTE_RING_PAUSE_REP_COUNT &&
- ++rep == RTE_RING_PAUSE_REP_COUNT) {
- rep = 0;
- sched_yield();
- }
- }
- r->prod.tail = prod_next;
- return ret;
+ return n;
}
/**
- * @internal Enqueue several objects on a ring (NOT multi-producers safe).
+ * @internal Enqueue several objects on the ring
*
- * @param r
+ * @param r
* A pointer to the ring structure.
* @param obj_table
* A pointer to a table of void * pointers (objects).
@@ -525,242 +434,142 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* The number of objects to add in the ring from the obj_table.
* @param behavior
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param is_sp
+ * Indicates whether to use single producer or multi-producer head update
+ * @param free_space
+ * returns the amount of space after the enqueue operation has finished
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects enqueued.
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
-__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ int is_sp, unsigned int *free_space)
{
- uint32_t prod_head, cons_tail;
- uint32_t prod_next, free_entries;
- unsigned i;
- uint32_t mask = r->prod.mask;
- int ret;
-
- prod_head = r->prod.head;
- cons_tail = r->cons.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * prod_head > cons_tail). So 'free_entries' is always between 0
- * and size(ring)-1. */
- free_entries = mask + cons_tail - prod_head;
-
- /* check that we have enough room in ring */
- if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, enq_fail, n);
- return -ENOBUFS;
- }
- else {
- /* No free entry available */
- if (unlikely(free_entries == 0)) {
- __RING_STAT_ADD(r, enq_fail, n);
- return 0;
- }
-
- n = free_entries;
- }
- }
-
- prod_next = prod_head + n;
- r->prod.head = prod_next;
-
- /* write entries in ring */
- ENQUEUE_PTRS();
+ uint32_t prod_head, prod_next;
+ uint32_t free_entries;
+
+ n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
+ &prod_head, &prod_next, &free_entries);
+ if (n == 0)
+ goto end;
+
+ ENQUEUE_PTRS(r, &r[1], prod_head, obj_table, n, void *);
rte_smp_wmb();
- /* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
- (int)(n | RTE_RING_QUOT_EXCEED);
- __RING_STAT_ADD(r, enq_quota, n);
- }
- else {
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
- __RING_STAT_ADD(r, enq_success, n);
- }
-
- r->prod.tail = prod_next;
- return ret;
+ update_tail(&r->prod, prod_head, prod_next, is_sp);
+end:
+ if (free_space != NULL)
+ *free_space = free_entries - n;
+ return n;
}
/**
- * @internal Dequeue several objects from a ring (multi-consumers safe). When
- * the request objects are more than the available objects, only dequeue the
- * actual number of objects
- *
- * This function uses a "compare and set" instruction to move the
- * consumer index atomically.
+ * @internal This function updates the consumer head for dequeue
*
* @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to the ring structure
+ * @param is_sc
+ * Indicates whether multi-consumer path is needed or not
* @param n
- * The number of objects to dequeue from the ring to the obj_table.
+ * The number of elements we will want to enqueue, i.e. how far should the
+ * head be moved
* @param behavior
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param old_head
+ * Returns head value as it was before the move, i.e. where dequeue starts
+ * @param new_head
+ * Returns the current/new head value i.e. where dequeue finishes
+ * @param entries
+ * Returns the number of entries in the ring BEFORE head was moved
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects dequeued.
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-
-static inline int __attribute__((always_inline))
-__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ uint32_t *old_head, uint32_t *new_head,
+ uint32_t *entries)
{
- uint32_t cons_head, prod_tail;
- uint32_t cons_next, entries;
- const unsigned max = n;
+ unsigned int max = n;
int success;
- unsigned i, rep = 0;
- uint32_t mask = r->prod.mask;
-
- /* Avoid the unnecessary cmpset operation below, which is also
- * potentially harmful when n equals 0. */
- if (n == 0)
- return 0;
/* move cons.head atomically */
do {
/* Restore n as it may change every loop */
n = max;
- cons_head = r->cons.head;
- prod_tail = r->prod.tail;
+ *old_head = r->cons.head;
+ const uint32_t prod_tail = r->prod.tail;
/* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
* cons_head > prod_tail). So 'entries' is always between 0
* and size(ring)-1. */
- entries = (prod_tail - cons_head);
+ *entries = (prod_tail - *old_head);
/* Set the actual entries for dequeue */
- if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, deq_fail, n);
- return -ENOENT;
- }
- else {
- if (unlikely(entries == 0)){
- __RING_STAT_ADD(r, deq_fail, n);
- return 0;
- }
-
- n = entries;
- }
- }
-
- cons_next = cons_head + n;
- success = rte_atomic32_cmpset(&r->cons.head, cons_head,
- cons_next);
+ if (n > *entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
+
+ if (unlikely(n == 0))
+ return 0;
+
+ *new_head = *old_head + n;
+ if (is_sc)
+ r->cons.head = *new_head, success = 1;
+ else
+ success = rte_atomic32_cmpset(&r->cons.head, *old_head,
+ *new_head);
} while (unlikely(success == 0));
-
- /* copy in table */
- DEQUEUE_PTRS();
- rte_smp_rmb();
-
- /*
- * If there are other dequeues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->cons.tail != cons_head)) {
- rte_pause();
-
- /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
- * for other thread finish. It gives pre-empted thread a chance
- * to proceed and finish with ring dequeue operation. */
- if (RTE_RING_PAUSE_REP_COUNT &&
- ++rep == RTE_RING_PAUSE_REP_COUNT) {
- rep = 0;
- sched_yield();
- }
- }
- __RING_STAT_ADD(r, deq_success, n);
- r->cons.tail = cons_next;
-
- return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
+ return n;
}
/**
- * @internal Dequeue several objects from a ring (NOT multi-consumers safe).
- * When the request objects are more than the available objects, only dequeue
- * the actual number of objects
+ * @internal Dequeue several objects from the ring
*
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to a table of void * pointers (objects).
* @param n
- * The number of objects to dequeue from the ring to the obj_table.
+ * The number of objects to pull from the ring.
* @param behavior
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param is_sc
+ * Indicates whether to use single consumer or multi-consumer head update
+ * @param available
+ * returns the number of remaining ring entries after the dequeue has finished
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects dequeued.
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
-__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ int is_sc, unsigned int *available)
{
- uint32_t cons_head, prod_tail;
- uint32_t cons_next, entries;
- unsigned i;
- uint32_t mask = r->prod.mask;
-
- cons_head = r->cons.head;
- prod_tail = r->prod.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * cons_head > prod_tail). So 'entries' is always between 0
- * and size(ring)-1. */
- entries = prod_tail - cons_head;
-
- if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, deq_fail, n);
- return -ENOENT;
- }
- else {
- if (unlikely(entries == 0)){
- __RING_STAT_ADD(r, deq_fail, n);
- return 0;
- }
-
- n = entries;
- }
- }
-
- cons_next = cons_head + n;
- r->cons.head = cons_next;
-
- /* copy in table */
- DEQUEUE_PTRS();
+ uint32_t cons_head, cons_next;
+ uint32_t entries;
+
+ n = __rte_ring_move_cons_head(r, is_sc, n, behavior,
+ &cons_head, &cons_next, &entries);
+ if (n == 0)
+ goto end;
+
+ DEQUEUE_PTRS(r, &r[1], cons_head, obj_table, n, void *);
rte_smp_rmb();
- __RING_STAT_ADD(r, deq_success, n);
- r->cons.tail = cons_next;
- return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
+ update_tail(&r->cons, cons_head, cons_next, is_sc);
+
+end:
+ if (available != NULL)
+ *available = entries - n;
+ return n;
}
/**
@@ -775,17 +584,18 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ * if non-NULL, returns the amount of space in the ring after the
+ * enqueue operation has finished.
* @return
- * - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_MP, free_space);
}
/**
@@ -797,17 +607,18 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ * if non-NULL, returns the amount of space in the ring after the
+ * enqueue operation has finished.
* @return
- * - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
- * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_SP, free_space);
}
/**
@@ -823,20 +634,18 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ * if non-NULL, returns the amount of space in the ring after the
+ * enqueue operation has finished.
* @return
- * - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
- * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- if (r->prod.sp_enqueue)
- return rte_ring_sp_enqueue_bulk(r, obj_table, n);
- else
- return rte_ring_mp_enqueue_bulk(r, obj_table, n);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ r->prod.single, free_space);
}
/**
@@ -851,14 +660,12 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_mp_enqueue_bulk(r, &obj, 1);
+ return rte_ring_mp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}
/**
@@ -870,14 +677,12 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_sp_enqueue_bulk(r, &obj, 1);
+ return rte_ring_sp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}
/**
@@ -893,17 +698,12 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
rte_ring_enqueue(struct rte_ring *r, void *obj)
{
- if (r->prod.sp_enqueue)
- return rte_ring_sp_enqueue(r, obj);
- else
- return rte_ring_mp_enqueue(r, obj);
+ return rte_ring_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}
/**
@@ -918,15 +718,18 @@ rte_ring_enqueue(struct rte_ring *r, void *obj)
* A pointer to a table of void * pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ * If non-NULL, returns the number of remaining ring entries after the
+ * dequeue has finished.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
-rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+static inline unsigned int __attribute__((always_inline))
+rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_MC, available);
}
/**
@@ -939,15 +742,18 @@ rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* @param n
* The number of objects to dequeue from the ring to the obj_table,
* must be strictly positive.
+ * @param available
+ * If non-NULL, returns the number of remaining ring entries after the
+ * dequeue has finished.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
-rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+static inline unsigned int __attribute__((always_inline))
+rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_SC, available);
}
/**
@@ -963,18 +769,18 @@ rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* A pointer to a table of void * pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ * If non-NULL, returns the number of remaining ring entries after the
+ * dequeue has finished.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue, no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
-rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+static inline unsigned int __attribute__((always_inline))
+rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
+ unsigned int *available)
{
- if (r->cons.sc_dequeue)
- return rte_ring_sc_dequeue_bulk(r, obj_table, n);
- else
- return rte_ring_mc_dequeue_bulk(r, obj_table, n);
+ return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ r->cons.single, available);
}
/**
@@ -995,7 +801,7 @@ rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
static inline int __attribute__((always_inline))
rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_mc_dequeue_bulk(r, obj_p, 1);
+ return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
}
/**
@@ -1013,7 +819,7 @@ rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_sc_dequeue_bulk(r, obj_p, 1);
+ return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
}
/**
@@ -1035,10 +841,7 @@ rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
- if (r->cons.sc_dequeue)
- return rte_ring_sc_dequeue(r, obj_p);
- else
- return rte_ring_mc_dequeue(r, obj_p);
+ return rte_ring_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
}
/**
@@ -1055,7 +858,7 @@ rte_ring_full(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return ((cons_tail - prod_tail - 1) & r->prod.mask) == 0;
+ return ((cons_tail - prod_tail - 1) & r->mask) == 0;
}
/**
@@ -1088,7 +891,7 @@ rte_ring_count(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return (prod_tail - cons_tail) & r->prod.mask;
+ return (prod_tail - cons_tail) & r->mask;
}
/**
@@ -1104,7 +907,21 @@ rte_ring_free_count(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return (cons_tail - prod_tail - 1) & r->prod.mask;
+ return (cons_tail - prod_tail - 1) & r->mask;
+}
+
+/**
+ * Return the size of the ring.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @return
+ * The number of elements which can be stored in the ring.
+ */
+static inline unsigned int
+rte_ring_get_size(const struct rte_ring *r)
+{
+ return r->size;
}
/**
@@ -1139,14 +956,18 @@ struct rte_ring *rte_ring_lookup(const char *name);
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ * if non-NULL, returns the amount of space in the ring after the
+ * enqueue operation has finished.
* @return
* - n: Actual number of objects enqueued.
*/
static inline unsigned __attribute__((always_inline))
rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_MP, free_space);
}
/**
@@ -1158,14 +979,18 @@ rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ * if non-NULL, returns the amount of space in the ring after the
+ * enqueue operation has finished.
* @return
* - n: Actual number of objects enqueued.
*/
static inline unsigned __attribute__((always_inline))
rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_SP, free_space);
}
/**
@@ -1181,17 +1006,18 @@ rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ * if non-NULL, returns the amount of space in the ring after the
+ * enqueue operation has finished.
* @return
* - n: Actual number of objects enqueued.
*/
static inline unsigned __attribute__((always_inline))
rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- if (r->prod.sp_enqueue)
- return rte_ring_sp_enqueue_burst(r, obj_table, n);
- else
- return rte_ring_mp_enqueue_burst(r, obj_table, n);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE,
+ r->prod.single, free_space);
}
/**
@@ -1208,13 +1034,18 @@ rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* A pointer to a table of void * pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ * If non-NULL, returns the number of remaining ring entries after the
+ * dequeue has finished.
* @return
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_MC, available);
}
/**
@@ -1228,13 +1059,18 @@ rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
* A pointer to a table of void * pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ * If non-NULL, returns the number of remaining ring entries after the
+ * dequeue has finished.
* @return
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_SC, available);
}
/**
@@ -1250,16 +1086,19 @@ rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
* A pointer to a table of void * pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ * If non-NULL, returns the number of remaining ring entries after the
+ * dequeue has finished.
* @return
* - Number of objects dequeued
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- if (r->cons.sc_dequeue)
- return rte_ring_sc_dequeue_burst(r, obj_table, n);
- else
- return rte_ring_mc_dequeue_burst(r, obj_table, n);
+ return __rte_ring_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE,
+ r->cons.single, available);
}
#ifdef __cplusplus