aboutsummaryrefslogtreecommitdiffstats
path: root/lib/librte_ring
diff options
context:
space:
mode:
Diffstat (limited to 'lib/librte_ring')
-rw-r--r--lib/librte_ring/rte_ring.c29
-rw-r--r--lib/librte_ring/rte_ring.h140
2 files changed, 107 insertions, 62 deletions
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 5f98c33f..036467f4 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -140,8 +140,22 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
r->flags = flags;
r->prod.single = (flags & RING_F_SP_ENQ) ? __IS_SP : __IS_MP;
r->cons.single = (flags & RING_F_SC_DEQ) ? __IS_SC : __IS_MC;
- r->size = count;
- r->mask = count - 1;
+
+ if (flags & RING_F_EXACT_SZ) {
+ r->size = rte_align32pow2(count + 1);
+ r->mask = r->size - 1;
+ r->capacity = count;
+ } else {
+ if ((!POWEROF2(count)) || (count > RTE_RING_SZ_MASK)) {
+ RTE_LOG(ERR, RING,
+ "Requested size is invalid, must be power of 2, and not exceed the size limit %u\n",
+ RTE_RING_SZ_MASK);
+ return -EINVAL;
+ }
+ r->size = count;
+ r->mask = count - 1;
+ r->capacity = r->mask;
+ }
r->prod.head = r->cons.head = 0;
r->prod.tail = r->cons.tail = 0;
@@ -160,10 +174,15 @@ rte_ring_create(const char *name, unsigned count, int socket_id,
ssize_t ring_size;
int mz_flags = 0;
struct rte_ring_list* ring_list = NULL;
+ const unsigned int requested_count = count;
int ret;
ring_list = RTE_TAILQ_CAST(rte_ring_tailq.head, rte_ring_list);
+ /* for an exact size ring, round up from count to a power of two */
+ if (flags & RING_F_EXACT_SZ)
+ count = rte_align32pow2(count + 1);
+
ring_size = rte_ring_get_memsize(count);
if (ring_size < 0) {
rte_errno = ring_size;
@@ -189,12 +208,13 @@ rte_ring_create(const char *name, unsigned count, int socket_id,
/* reserve a memory zone for this ring. If we can't get rte_config or
* we are secondary process, the memzone_reserve function will set
* rte_errno for us appropriately - hence no check in this this function */
- mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags);
+ mz = rte_memzone_reserve_aligned(mz_name, ring_size, socket_id,
+ mz_flags, __alignof__(*r));
if (mz != NULL) {
r = mz->addr;
/* no need to check return value here, we already checked the
* arguments above */
- rte_ring_init(r, name, count, flags);
+ rte_ring_init(r, name, requested_count, flags);
te->data = (void *) r;
r->memzone = mz;
@@ -262,6 +282,7 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)
fprintf(f, "ring <%s>@%p\n", r->name, r);
fprintf(f, " flags=%x\n", r->flags);
fprintf(f, " size=%"PRIu32"\n", r->size);
+ fprintf(f, " capacity=%"PRIu32"\n", r->capacity);
fprintf(f, " ct=%"PRIu32"\n", r->cons.tail);
fprintf(f, " ch=%"PRIu32"\n", r->cons.head);
fprintf(f, " pt=%"PRIu32"\n", r->prod.tail);
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 97f025a1..8f5a4937 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -101,6 +101,7 @@ extern "C" {
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memzone.h>
+#include <rte_pause.h>
#define RTE_TAILQ_RING_NAME "RTE_RING"
@@ -153,6 +154,7 @@ struct rte_ring {
/**< Memzone, if any, containing the rte_ring */
uint32_t size; /**< Size of ring. */
uint32_t mask; /**< Mask (size-1) of ring. */
+ uint32_t capacity; /**< Usable size of ring */
/** Ring producer status. */
struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN);
@@ -163,6 +165,15 @@ struct rte_ring {
#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
+/**
+ * Ring is to hold exactly requested number of entries.
+ * Without this flag set, the ring size requested must be a power of 2, and the
+ * usable space will be that size - 1. With the flag, the requested size will
+ * be rounded up to the next power of two, but the usable space will be exactly
+ * that requested. Worst case, if a power-of-2 size is requested, half the
+ * ring space will be wasted.
+ */
+#define RING_F_EXACT_SZ 0x0004
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */
/* @internal defines for passing to the enqueue dequeue worker functions */
@@ -345,7 +356,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
} \
} while (0)
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
uint32_t single)
{
@@ -383,13 +394,13 @@ update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
* Actual number of objects enqueued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline __attribute__((always_inline)) unsigned int
+static __rte_always_inline unsigned int
__rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
unsigned int n, enum rte_ring_queue_behavior behavior,
uint32_t *old_head, uint32_t *new_head,
uint32_t *free_entries)
{
- const uint32_t mask = r->mask;
+ const uint32_t capacity = r->capacity;
unsigned int max = n;
int success;
@@ -399,11 +410,13 @@ __rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
*old_head = r->prod.head;
const uint32_t cons_tail = r->cons.tail;
- /* The subtraction is done between two unsigned 32bits value
+ /*
+ * The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
* *old_head > cons_tail). So 'free_entries' is always between 0
- * and size(ring)-1. */
- *free_entries = (mask + cons_tail - *old_head);
+ * and capacity (which is < size).
+ */
+ *free_entries = (capacity + cons_tail - *old_head);
/* check that we have enough room in ring */
if (unlikely(n > *free_entries))
@@ -443,7 +456,7 @@ __rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
* Actual number of objects enqueued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline __attribute__((always_inline)) unsigned int
+static __rte_always_inline unsigned int
__rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
unsigned int n, enum rte_ring_queue_behavior behavior,
int is_sp, unsigned int *free_space)
@@ -489,7 +502,7 @@ end:
* - Actual number of objects dequeued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline __attribute__((always_inline)) unsigned int
+static __rte_always_inline unsigned int
__rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
unsigned int n, enum rte_ring_queue_behavior behavior,
uint32_t *old_head, uint32_t *new_head,
@@ -548,7 +561,7 @@ __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
* - Actual number of objects dequeued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline __attribute__((always_inline)) unsigned int
+static __rte_always_inline unsigned int
__rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned int n, enum rte_ring_queue_behavior behavior,
int is_sc, unsigned int *available)
@@ -590,7 +603,7 @@ end:
* @return
* The number of objects enqueued, either 0 or n
*/
-static inline unsigned int __attribute__((always_inline))
+static __rte_always_inline unsigned int
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
@@ -613,7 +626,7 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @return
* The number of objects enqueued, either 0 or n
*/
-static inline unsigned int __attribute__((always_inline))
+static __rte_always_inline unsigned int
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
@@ -640,7 +653,7 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @return
* The number of objects enqueued, either 0 or n
*/
-static inline unsigned int __attribute__((always_inline))
+static __rte_always_inline unsigned int
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
@@ -662,7 +675,7 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* - 0: Success; objects enqueued.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
{
return rte_ring_mp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
@@ -679,7 +692,7 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
* - 0: Success; objects enqueued.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
{
return rte_ring_sp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
@@ -700,7 +713,7 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
* - 0: Success; objects enqueued.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_ring_enqueue(struct rte_ring *r, void *obj)
{
return rte_ring_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
@@ -724,7 +737,7 @@ rte_ring_enqueue(struct rte_ring *r, void *obj)
* @return
* The number of objects dequeued, either 0 or n
*/
-static inline unsigned int __attribute__((always_inline))
+static __rte_always_inline unsigned int
rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
@@ -748,7 +761,7 @@ rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
* @return
* The number of objects dequeued, either 0 or n
*/
-static inline unsigned int __attribute__((always_inline))
+static __rte_always_inline unsigned int
rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
@@ -775,7 +788,7 @@ rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
* @return
* The number of objects dequeued, either 0 or n
*/
-static inline unsigned int __attribute__((always_inline))
+static __rte_always_inline unsigned int
rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
unsigned int *available)
{
@@ -798,10 +811,10 @@ rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
* - -ENOENT: Not enough entries in the ring to dequeue; no object is
* dequeued.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
+ return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
}
/**
@@ -816,10 +829,10 @@ rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
* - -ENOENT: Not enough entries in the ring to dequeue, no object is
* dequeued.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
+ return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
}
/**
@@ -838,76 +851,71 @@ rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
* - -ENOENT: Not enough entries in the ring to dequeue, no object is
* dequeued.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
return rte_ring_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
}
/**
- * Test if a ring is full.
+ * Return the number of entries in a ring.
*
* @param r
* A pointer to the ring structure.
* @return
- * - 1: The ring is full.
- * - 0: The ring is not full.
+ * The number of entries in the ring.
*/
-static inline int
-rte_ring_full(const struct rte_ring *r)
+static inline unsigned
+rte_ring_count(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return ((cons_tail - prod_tail - 1) & r->mask) == 0;
+ uint32_t count = (prod_tail - cons_tail) & r->mask;
+ return (count > r->capacity) ? r->capacity : count;
}
/**
- * Test if a ring is empty.
+ * Return the number of free entries in a ring.
*
* @param r
* A pointer to the ring structure.
* @return
- * - 1: The ring is empty.
- * - 0: The ring is not empty.
+ * The number of free entries in the ring.
*/
-static inline int
-rte_ring_empty(const struct rte_ring *r)
+static inline unsigned
+rte_ring_free_count(const struct rte_ring *r)
{
- uint32_t prod_tail = r->prod.tail;
- uint32_t cons_tail = r->cons.tail;
- return !!(cons_tail == prod_tail);
+ return r->capacity - rte_ring_count(r);
}
/**
- * Return the number of entries in a ring.
+ * Test if a ring is full.
*
* @param r
* A pointer to the ring structure.
* @return
- * The number of entries in the ring.
+ * - 1: The ring is full.
+ * - 0: The ring is not full.
*/
-static inline unsigned
-rte_ring_count(const struct rte_ring *r)
+static inline int
+rte_ring_full(const struct rte_ring *r)
{
- uint32_t prod_tail = r->prod.tail;
- uint32_t cons_tail = r->cons.tail;
- return (prod_tail - cons_tail) & r->mask;
+ return rte_ring_free_count(r) == 0;
}
/**
- * Return the number of free entries in a ring.
+ * Test if a ring is empty.
*
* @param r
* A pointer to the ring structure.
* @return
- * The number of free entries in the ring.
+ * - 1: The ring is empty.
+ * - 0: The ring is not empty.
*/
-static inline unsigned
-rte_ring_free_count(const struct rte_ring *r)
+static inline int
+rte_ring_empty(const struct rte_ring *r)
{
- uint32_t prod_tail = r->prod.tail;
- uint32_t cons_tail = r->cons.tail;
- return (cons_tail - prod_tail - 1) & r->mask;
+ return rte_ring_count(r) == 0;
}
/**
@@ -916,7 +924,9 @@ rte_ring_free_count(const struct rte_ring *r)
* @param r
* A pointer to the ring structure.
* @return
- * The number of elements which can be stored in the ring.
+ * The size of the data store used by the ring.
+ * NOTE: this is not the same as the usable space in the ring. To query that
+ * use ``rte_ring_get_capacity()``.
*/
static inline unsigned int
rte_ring_get_size(const struct rte_ring *r)
@@ -925,6 +935,20 @@ rte_ring_get_size(const struct rte_ring *r)
}
/**
+ * Return the number of elements which can be stored in the ring.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @return
+ * The usable size of the ring.
+ */
+static inline unsigned int
+rte_ring_get_capacity(const struct rte_ring *r)
+{
+ return r->capacity;
+}
+
+/**
* Dump the status of all rings on the console
*
* @param f
@@ -962,7 +986,7 @@ struct rte_ring *rte_ring_lookup(const char *name);
* @return
* - n: Actual number of objects enqueued.
*/
-static inline unsigned __attribute__((always_inline))
+static __rte_always_inline unsigned
rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
@@ -985,7 +1009,7 @@ rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* @return
* - n: Actual number of objects enqueued.
*/
-static inline unsigned __attribute__((always_inline))
+static __rte_always_inline unsigned
rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
@@ -1012,7 +1036,7 @@ rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* @return
* - n: Actual number of objects enqueued.
*/
-static inline unsigned __attribute__((always_inline))
+static __rte_always_inline unsigned
rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
@@ -1040,7 +1064,7 @@ rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* @return
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
-static inline unsigned __attribute__((always_inline))
+static __rte_always_inline unsigned
rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
@@ -1065,7 +1089,7 @@ rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
* @return
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
-static inline unsigned __attribute__((always_inline))
+static __rte_always_inline unsigned
rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
@@ -1092,7 +1116,7 @@ rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
* @return
* - Number of objects dequeued
*/
-static inline unsigned __attribute__((always_inline))
+static __rte_always_inline unsigned
rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{