diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2018-08-14 18:52:30 +0100 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2018-08-14 18:53:17 +0100 |
commit | b63264c8342e6a1b6971c79550d2af2024b6a4de (patch) | |
tree | 83114aac64286fe616506c0b3dfaec2ab86ef835 /lib/librte_ring | |
parent | ca33590b6af032bff57d9cc70455660466a654b2 (diff) |
New upstream version 18.08upstream/18.08
Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'lib/librte_ring')
-rw-r--r-- | lib/librte_ring/Makefile | 2 | ||||
-rw-r--r-- | lib/librte_ring/rte_ring.h | 27 | ||||
-rw-r--r-- | lib/librte_ring/rte_ring_c11_mem.h | 10 | ||||
-rw-r--r-- | lib/librte_ring/rte_ring_generic.h | 10 |
4 files changed, 22 insertions, 27 deletions
diff --git a/lib/librte_ring/Makefile b/lib/librte_ring/Makefile index bde8907d..21a36770 100644 --- a/lib/librte_ring/Makefile +++ b/lib/librte_ring/Makefile @@ -11,7 +11,7 @@ LDLIBS += -lrte_eal EXPORT_MAP := rte_ring_version.map -LIBABIVER := 1 +LIBABIVER := 2 # all source are stored in SRCS-y SRCS-$(CONFIG_RTE_LIBRTE_RING) := rte_ring.c diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h index 253cdc96..7a731d07 100644 --- a/lib/librte_ring/rte_ring.h +++ b/lib/librte_ring/rte_ring.h @@ -26,8 +26,9 @@ * - Bulk dequeue. * - Bulk enqueue. * - * Note: the ring implementation is not preemptable. A lcore must not - * be interrupted by another task that uses the same ring. + * Note: the ring implementation is not preemptible. Refer to Programmer's + * guide/Environment Abstraction Layer/Multiple pthread/Known Issues/rte_ring + * for more information. * */ @@ -62,14 +63,6 @@ enum rte_ring_queue_behavior { struct rte_memzone; /* forward declaration, so as not to require memzone.h */ -#if RTE_CACHE_LINE_SIZE < 128 -#define PROD_ALIGN (RTE_CACHE_LINE_SIZE * 2) -#define CONS_ALIGN (RTE_CACHE_LINE_SIZE * 2) -#else -#define PROD_ALIGN RTE_CACHE_LINE_SIZE -#define CONS_ALIGN RTE_CACHE_LINE_SIZE -#endif - /* structure to hold a pair of head/tail values and other metadata */ struct rte_ring_headtail { volatile uint32_t head; /**< Prod/consumer head. */ @@ -101,11 +94,15 @@ struct rte_ring { uint32_t mask; /**< Mask (size-1) of ring. */ uint32_t capacity; /**< Usable size of ring */ + char pad0 __rte_cache_aligned; /**< empty cache line */ + /** Ring producer status. */ - struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN); + struct rte_ring_headtail prod __rte_cache_aligned; + char pad1 __rte_cache_aligned; /**< empty cache line */ /** Ring consumer status. */ - struct rte_ring_headtail cons __rte_aligned(CONS_ALIGN); + struct rte_ring_headtail cons __rte_cache_aligned; + char pad2 __rte_cache_aligned; /**< empty cache line */ }; #define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */ @@ -339,7 +336,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r); static __rte_always_inline unsigned int __rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table, unsigned int n, enum rte_ring_queue_behavior behavior, - int is_sp, unsigned int *free_space) + unsigned int is_sp, unsigned int *free_space) { uint32_t prod_head, prod_next; uint32_t free_entries; @@ -381,12 +378,12 @@ end: static __rte_always_inline unsigned int __rte_ring_do_dequeue(struct rte_ring *r, void **obj_table, unsigned int n, enum rte_ring_queue_behavior behavior, - int is_sc, unsigned int *available) + unsigned int is_sc, unsigned int *available) { uint32_t cons_head, cons_next; uint32_t entries; - n = __rte_ring_move_cons_head(r, is_sc, n, behavior, + n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior, &cons_head, &cons_next, &entries); if (n == 0) goto end; diff --git a/lib/librte_ring/rte_ring_c11_mem.h b/lib/librte_ring/rte_ring_c11_mem.h index 08825ea5..94df3c4a 100644 --- a/lib/librte_ring/rte_ring_c11_mem.h +++ b/lib/librte_ring/rte_ring_c11_mem.h @@ -51,7 +51,7 @@ update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val, * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only. */ static __rte_always_inline unsigned int -__rte_ring_move_prod_head(struct rte_ring *r, int is_sp, +__rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp, unsigned int n, enum rte_ring_queue_behavior behavior, uint32_t *old_head, uint32_t *new_head, uint32_t *free_entries) @@ -66,14 +66,14 @@ __rte_ring_move_prod_head(struct rte_ring *r, int is_sp, *old_head = __atomic_load_n(&r->prod.head, __ATOMIC_ACQUIRE); - const uint32_t cons_tail = r->cons.tail; + /* * The subtraction is done between two unsigned 32bits value * (the result is always modulo 32 bits even if we have * *old_head > cons_tail). So 'free_entries' is always between 0 * and capacity (which is < size). */ - *free_entries = (capacity + cons_tail - *old_head); + *free_entries = (capacity + r->cons.tail - *old_head); /* check that we have enough room in ring */ if (unlikely(n > *free_entries)) @@ -133,13 +133,13 @@ __rte_ring_move_cons_head(struct rte_ring *r, int is_sc, n = max; *old_head = __atomic_load_n(&r->cons.head, __ATOMIC_ACQUIRE); - const uint32_t prod_tail = r->prod.tail; + /* The subtraction is done between two unsigned 32bits value * (the result is always modulo 32 bits even if we have * cons_head > prod_tail). So 'entries' is always between 0 * and size(ring)-1. */ - *entries = (prod_tail - *old_head); + *entries = (r->prod.tail - *old_head); /* Set the actual entries for dequeue */ if (n > *entries) diff --git a/lib/librte_ring/rte_ring_generic.h b/lib/librte_ring/rte_ring_generic.h index 5b110425..ea7dbe5b 100644 --- a/lib/librte_ring/rte_ring_generic.h +++ b/lib/librte_ring/rte_ring_generic.h @@ -53,7 +53,7 @@ update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val, * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only. */ static __rte_always_inline unsigned int -__rte_ring_move_prod_head(struct rte_ring *r, int is_sp, +__rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp, unsigned int n, enum rte_ring_queue_behavior behavior, uint32_t *old_head, uint32_t *new_head, uint32_t *free_entries) @@ -73,14 +73,13 @@ __rte_ring_move_prod_head(struct rte_ring *r, int is_sp, */ rte_smp_rmb(); - const uint32_t cons_tail = r->cons.tail; /* * The subtraction is done between two unsigned 32bits value * (the result is always modulo 32 bits even if we have * *old_head > cons_tail). So 'free_entries' is always between 0 * and capacity (which is < size). */ - *free_entries = (capacity + cons_tail - *old_head); + *free_entries = (capacity + r->cons.tail - *old_head); /* check that we have enough room in ring */ if (unlikely(n > *free_entries)) @@ -124,7 +123,7 @@ __rte_ring_move_prod_head(struct rte_ring *r, int is_sp, * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only. */ static __rte_always_inline unsigned int -__rte_ring_move_cons_head(struct rte_ring *r, int is_sc, +__rte_ring_move_cons_head(struct rte_ring *r, unsigned int is_sc, unsigned int n, enum rte_ring_queue_behavior behavior, uint32_t *old_head, uint32_t *new_head, uint32_t *entries) @@ -144,13 +143,12 @@ __rte_ring_move_cons_head(struct rte_ring *r, int is_sc, */ rte_smp_rmb(); - const uint32_t prod_tail = r->prod.tail; /* The subtraction is done between two unsigned 32bits value * (the result is always modulo 32 bits even if we have * cons_head > prod_tail). So 'entries' is always between 0 * and size(ring)-1. */ - *entries = (prod_tail - *old_head); + *entries = (r->prod.tail - *old_head); /* Set the actual entries for dequeue */ if (n > *entries) |