From 7595afa4d30097c1177b69257118d8ad89a539be Mon Sep 17 00:00:00 2001 From: Christian Ehrhardt Date: Tue, 16 May 2017 14:51:32 +0200 Subject: Imported Upstream version 17.05 Change-Id: Id1e419c5a214e4a18739663b91f0f9a549f1fdc6 Signed-off-by: Christian Ehrhardt --- lib/librte_mempool/Makefile | 4 - lib/librte_mempool/rte_mempool.c | 21 +-- lib/librte_mempool/rte_mempool.h | 275 +++-------------------------- lib/librte_mempool/rte_mempool_ring.c | 161 ----------------- lib/librte_mempool/rte_mempool_stack.c | 147 --------------- lib/librte_mempool/rte_mempool_version.map | 1 - 6 files changed, 35 insertions(+), 574 deletions(-) delete mode 100644 lib/librte_mempool/rte_mempool_ring.c delete mode 100644 lib/librte_mempool/rte_mempool_stack.c (limited to 'lib/librte_mempool') diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile index 057a6ab4..7b5bdfee 100644 --- a/lib/librte_mempool/Makefile +++ b/lib/librte_mempool/Makefile @@ -43,11 +43,7 @@ LIBABIVER := 2 # all source are stored in SRCS-y SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool.c SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_ops.c -SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_ring.c -SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_stack.c # install includes SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h -DEPDIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += lib/librte_eal lib/librte_ring - include $(RTE_SDK)/mk/rte.lib.mk diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index aa513b97..f65310f6 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -818,7 +818,6 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, goto exit_unlock; } mp->mz = mz; - mp->socket_id = socket_id; mp->size = n; mp->flags = flags; mp->socket_id = socket_id; @@ -869,6 +868,7 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags) { + int ret; struct rte_mempool *mp; mp = rte_mempool_create_empty(name, n, elt_size, cache_size, @@ -881,13 +881,16 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, * set the correct index into the table of ops structs. */ if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET)) - rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL); + ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL); else if (flags & MEMPOOL_F_SP_PUT) - rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL); + ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL); else if (flags & MEMPOOL_F_SC_GET) - rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL); + ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL); else - rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL); + ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL); + + if (ret) + goto fail; /* call the mempool priv initializer */ if (mp_init) @@ -998,12 +1001,6 @@ rte_mempool_in_use_count(const struct rte_mempool *mp) return mp->size - rte_mempool_avail_count(mp); } -unsigned int -rte_mempool_count(const struct rte_mempool *mp) -{ - return rte_mempool_avail_count(mp); -} - /* dump the cache status */ static unsigned rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp) @@ -1047,7 +1044,7 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp, /* Force to drop the "const" attribute. This is done only when * DEBUG is enabled */ tmp = (void *) obj_table_const; - obj_table = (void **) tmp; + obj_table = tmp; while (n--) { obj = obj_table[n]; diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 956ce04b..48bc8ea3 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -51,13 +51,15 @@ * meta-data in the object data and retrieve them when allocating a * new object. * - * Note: the mempool implementation is not preemptable. A lcore must - * not be interrupted by another task that uses the same mempool - * (because it uses a ring which is not preemptable). Also, mempool - * functions must not be used outside the DPDK environment: for - * example, in linuxapp environment, a thread that is not created by - * the EAL must not use mempools. This is due to the per-lcore cache - * that won't work as rte_lcore_id() will not return a correct value. + * Note: the mempool implementation is not preemptible. An lcore must not be + * interrupted by another task that uses the same mempool (because it uses a + * ring which is not preemptible). Also, usual mempool functions like + * rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL + * thread due to the internal per-lcore cache. Due to the lack of caching, + * rte_mempool_get() or rte_mempool_put() performance will suffer when called + * by non-EAL threads. Instead, non-EAL threads should call + * rte_mempool_generic_get() or rte_mempool_generic_put() with a user cache + * created with rte_mempool_cache_create(). */ #include @@ -357,7 +359,7 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp, * Prototype for implementation specific data provisioning function. * * The function should provide the implementation specific memory for - * for use by the other mempool ops functions in a given mempool ops struct. + * use by the other mempool ops functions in a given mempool ops struct. * E.g. the default ops provides an instance of the rte_ring for this purpose. * it will most likely point to a different type of data structure, and * will be transparent to the application programmer. @@ -551,7 +553,7 @@ int rte_mempool_register_ops(const struct rte_mempool_ops *ops); /** * Macro to statically register the ops of a mempool handler. * Note that the rte_mempool_register_ops fails silently here when - * more then RTE_MEMPOOL_MAX_OPS_IDX is registered. + * more than RTE_MEMPOOL_MAX_OPS_IDX is registered. */ #define MEMPOOL_REGISTER_OPS(ops) \ void mp_hdlr_init_##ops(void); \ @@ -654,7 +656,7 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); * when using rte_mempool_get() or rte_mempool_get_bulk() is * "single-consumer". Otherwise, it is "multi-consumers". * - MEMPOOL_F_NO_PHYS_CONTIG: If set, allocated objects won't - * necessarilly be contiguous in physical memory. + * necessarily be contiguous in physical memory. * @return * The pointer to the new allocated mempool, on success. NULL on error * with rte_errno set appropriately. Possible rte_errno values include: @@ -794,7 +796,7 @@ rte_mempool_free(struct rte_mempool *mp); * Add physically contiguous memory for objects in the pool at init * * Add a virtually and physically contiguous memory chunk in the pool - * where objects can be instanciated. + * where objects can be instantiated. * * If the given physical address is unknown (paddr = RTE_BAD_PHYS_ADDR), * the chunk doesn't need to be physically contiguous (only virtually), @@ -825,7 +827,7 @@ int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, * Add physical memory for objects in the pool at init * * Add a virtually contiguous memory chunk in the pool where objects can - * be instanciated. The physical addresses corresponding to the virtual + * be instantiated. The physical addresses corresponding to the virtual * area are described in paddr[], pg_num, pg_shift. * * @param mp @@ -856,7 +858,7 @@ int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr, * Add virtually contiguous memory for objects in the pool at init * * Add a virtually contiguous memory chunk in the pool where objects can - * be instanciated. + * be instantiated. * * @param mp * A pointer to the mempool structure. @@ -1038,19 +1040,15 @@ rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id) */ static inline void __attribute__((always_inline)) __mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, - unsigned n, struct rte_mempool_cache *cache, int flags) + unsigned n, struct rte_mempool_cache *cache) { void **cache_objs; /* increment stat now, adding in mempool always success */ __MEMPOOL_STAT_ADD(mp, put, n); - /* No cache provided or single producer */ - if (unlikely(cache == NULL || flags & MEMPOOL_F_SP_PUT)) - goto ring_enqueue; - - /* Go straight to ring if put would overflow mem allocated for cache */ - if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE)) + /* No cache provided or if put would overflow mem allocated for cache */ + if (unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE)) goto ring_enqueue; cache_objs = &cache->objs[cache->len]; @@ -1104,50 +1102,11 @@ ring_enqueue: */ static inline void __attribute__((always_inline)) rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, - unsigned n, struct rte_mempool_cache *cache, int flags) + unsigned n, struct rte_mempool_cache *cache, + __rte_unused int flags) { __mempool_check_cookies(mp, obj_table, n, 0); - __mempool_generic_put(mp, obj_table, n, cache, flags); -} - -/** - * @deprecated - * Put several objects back in the mempool (multi-producers safe). - * - * @param mp - * A pointer to the mempool structure. - * @param obj_table - * A pointer to a table of void * pointers (objects). - * @param n - * The number of objects to add in the mempool from the obj_table. - */ -__rte_deprecated -static inline void __attribute__((always_inline)) -rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table, - unsigned n) -{ - struct rte_mempool_cache *cache; - cache = rte_mempool_default_cache(mp, rte_lcore_id()); - rte_mempool_generic_put(mp, obj_table, n, cache, 0); -} - -/** - * @deprecated - * Put several objects back in the mempool (NOT multi-producers safe). - * - * @param mp - * A pointer to the mempool structure. - * @param obj_table - * A pointer to a table of void * pointers (objects). - * @param n - * The number of objects to add in the mempool from obj_table. - */ -__rte_deprecated -static inline void __attribute__((always_inline)) -rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table, - unsigned n) -{ - rte_mempool_generic_put(mp, obj_table, n, NULL, MEMPOOL_F_SP_PUT); + __mempool_generic_put(mp, obj_table, n, cache); } /** @@ -1173,40 +1132,6 @@ rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, rte_mempool_generic_put(mp, obj_table, n, cache, mp->flags); } -/** - * @deprecated - * Put one object in the mempool (multi-producers safe). - * - * @param mp - * A pointer to the mempool structure. - * @param obj - * A pointer to the object to be added. - */ -__rte_deprecated -static inline void __attribute__((always_inline)) -rte_mempool_mp_put(struct rte_mempool *mp, void *obj) -{ - struct rte_mempool_cache *cache; - cache = rte_mempool_default_cache(mp, rte_lcore_id()); - rte_mempool_generic_put(mp, &obj, 1, cache, 0); -} - -/** - * @deprecated - * Put one object back in the mempool (NOT multi-producers safe). - * - * @param mp - * A pointer to the mempool structure. - * @param obj - * A pointer to the object to be added. - */ -__rte_deprecated -static inline void __attribute__((always_inline)) -rte_mempool_sp_put(struct rte_mempool *mp, void *obj) -{ - rte_mempool_generic_put(mp, &obj, 1, NULL, MEMPOOL_F_SP_PUT); -} - /** * Put one object back in the mempool. * @@ -1244,15 +1169,14 @@ rte_mempool_put(struct rte_mempool *mp, void *obj) */ static inline int __attribute__((always_inline)) __mempool_generic_get(struct rte_mempool *mp, void **obj_table, - unsigned n, struct rte_mempool_cache *cache, int flags) + unsigned n, struct rte_mempool_cache *cache) { int ret; uint32_t index, len; void **cache_objs; - /* No cache provided or single consumer */ - if (unlikely(cache == NULL || flags & MEMPOOL_F_SC_GET || - n >= cache->size)) + /* No cache provided or cannot be satisfied from cache */ + if (unlikely(cache == NULL || n >= cache->size)) goto ring_dequeue; cache_objs = cache->objs; @@ -1326,71 +1250,15 @@ ring_dequeue: */ static inline int __attribute__((always_inline)) rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n, - struct rte_mempool_cache *cache, int flags) + struct rte_mempool_cache *cache, __rte_unused int flags) { int ret; - ret = __mempool_generic_get(mp, obj_table, n, cache, flags); + ret = __mempool_generic_get(mp, obj_table, n, cache); if (ret == 0) __mempool_check_cookies(mp, obj_table, n, 1); return ret; } -/** - * @deprecated - * Get several objects from the mempool (multi-consumers safe). - * - * If cache is enabled, objects will be retrieved first from cache, - * subsequently from the common pool. Note that it can return -ENOENT when - * the local cache and common pool are empty, even if cache from other - * lcores are full. - * - * @param mp - * A pointer to the mempool structure. - * @param obj_table - * A pointer to a table of void * pointers (objects) that will be filled. - * @param n - * The number of objects to get from mempool to obj_table. - * @return - * - 0: Success; objects taken. - * - -ENOENT: Not enough entries in the mempool; no object is retrieved. - */ -__rte_deprecated -static inline int __attribute__((always_inline)) -rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) -{ - struct rte_mempool_cache *cache; - cache = rte_mempool_default_cache(mp, rte_lcore_id()); - return rte_mempool_generic_get(mp, obj_table, n, cache, 0); -} - -/** - * @deprecated - * Get several objects from the mempool (NOT multi-consumers safe). - * - * If cache is enabled, objects will be retrieved first from cache, - * subsequently from the common pool. Note that it can return -ENOENT when - * the local cache and common pool are empty, even if cache from other - * lcores are full. - * - * @param mp - * A pointer to the mempool structure. - * @param obj_table - * A pointer to a table of void * pointers (objects) that will be filled. - * @param n - * The number of objects to get from the mempool to obj_table. - * @return - * - 0: Success; objects taken. - * - -ENOENT: Not enough entries in the mempool; no object is - * retrieved. - */ -__rte_deprecated -static inline int __attribute__((always_inline)) -rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) -{ - return rte_mempool_generic_get(mp, obj_table, n, NULL, - MEMPOOL_F_SC_GET); -} - /** * Get several objects from the mempool. * @@ -1421,56 +1289,6 @@ rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) return rte_mempool_generic_get(mp, obj_table, n, cache, mp->flags); } -/** - * @deprecated - * Get one object from the mempool (multi-consumers safe). - * - * If cache is enabled, objects will be retrieved first from cache, - * subsequently from the common pool. Note that it can return -ENOENT when - * the local cache and common pool are empty, even if cache from other - * lcores are full. - * - * @param mp - * A pointer to the mempool structure. - * @param obj_p - * A pointer to a void * pointer (object) that will be filled. - * @return - * - 0: Success; objects taken. - * - -ENOENT: Not enough entries in the mempool; no object is retrieved. - */ -__rte_deprecated -static inline int __attribute__((always_inline)) -rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p) -{ - struct rte_mempool_cache *cache; - cache = rte_mempool_default_cache(mp, rte_lcore_id()); - return rte_mempool_generic_get(mp, obj_p, 1, cache, 0); -} - -/** - * @deprecated - * Get one object from the mempool (NOT multi-consumers safe). - * - * If cache is enabled, objects will be retrieved first from cache, - * subsequently from the common pool. Note that it can return -ENOENT when - * the local cache and common pool are empty, even if cache from other - * lcores are full. - * - * @param mp - * A pointer to the mempool structure. - * @param obj_p - * A pointer to a void * pointer (object) that will be filled. - * @return - * - 0: Success; objects taken. - * - -ENOENT: Not enough entries in the mempool; no object is retrieved. - */ -__rte_deprecated -static inline int __attribute__((always_inline)) -rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p) -{ - return rte_mempool_generic_get(mp, obj_p, 1, NULL, MEMPOOL_F_SC_GET); -} - /** * Get one object from the mempool. * @@ -1511,22 +1329,6 @@ rte_mempool_get(struct rte_mempool *mp, void **obj_p) */ unsigned int rte_mempool_avail_count(const struct rte_mempool *mp); -/** - * @deprecated - * Return the number of entries in the mempool. - * - * When cache is enabled, this function has to browse the length of - * all lcores, so it should not be used in a data path, but only for - * debug purposes. - * - * @param mp - * A pointer to the mempool structure. - * @return - * The number of entries in the mempool. - */ -__rte_deprecated -unsigned rte_mempool_count(const struct rte_mempool *mp); - /** * Return the number of elements which have been allocated from the mempool * @@ -1542,31 +1344,6 @@ unsigned rte_mempool_count(const struct rte_mempool *mp); unsigned int rte_mempool_in_use_count(const struct rte_mempool *mp); -/** - * @deprecated - * Return the number of free entries in the mempool ring. - * i.e. how many entries can be freed back to the mempool. - * - * NOTE: This corresponds to the number of elements *allocated* from the - * memory pool, not the number of elements in the pool itself. To count - * the number elements currently available in the pool, use "rte_mempool_count" - * - * When cache is enabled, this function has to browse the length of - * all lcores, so it should not be used in a data path, but only for - * debug purposes. User-owned mempool caches are not accounted for. - * - * @param mp - * A pointer to the mempool structure. - * @return - * The number of free entries in the mempool. - */ -__rte_deprecated -static inline unsigned -rte_mempool_free_count(const struct rte_mempool *mp) -{ - return rte_mempool_in_use_count(mp); -} - /** * Test if the mempool is full. * diff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c deleted file mode 100644 index b9aa64dd..00000000 --- a/lib/librte_mempool/rte_mempool_ring.c +++ /dev/null @@ -1,161 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include - -#include -#include -#include - -static int -common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, - unsigned n) -{ - return rte_ring_mp_enqueue_bulk(mp->pool_data, obj_table, n); -} - -static int -common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table, - unsigned n) -{ - return rte_ring_sp_enqueue_bulk(mp->pool_data, obj_table, n); -} - -static int -common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) -{ - return rte_ring_mc_dequeue_bulk(mp->pool_data, obj_table, n); -} - -static int -common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) -{ - return rte_ring_sc_dequeue_bulk(mp->pool_data, obj_table, n); -} - -static unsigned -common_ring_get_count(const struct rte_mempool *mp) -{ - return rte_ring_count(mp->pool_data); -} - - -static int -common_ring_alloc(struct rte_mempool *mp) -{ - int rg_flags = 0, ret; - char rg_name[RTE_RING_NAMESIZE]; - struct rte_ring *r; - - ret = snprintf(rg_name, sizeof(rg_name), - RTE_MEMPOOL_MZ_FORMAT, mp->name); - if (ret < 0 || ret >= (int)sizeof(rg_name)) { - rte_errno = ENAMETOOLONG; - return -rte_errno; - } - - /* ring flags */ - if (mp->flags & MEMPOOL_F_SP_PUT) - rg_flags |= RING_F_SP_ENQ; - if (mp->flags & MEMPOOL_F_SC_GET) - rg_flags |= RING_F_SC_DEQ; - - /* - * Allocate the ring that will be used to store objects. - * Ring functions will return appropriate errors if we are - * running as a secondary process etc., so no checks made - * in this function for that condition. - */ - r = rte_ring_create(rg_name, rte_align32pow2(mp->size + 1), - mp->socket_id, rg_flags); - if (r == NULL) - return -rte_errno; - - mp->pool_data = r; - - return 0; -} - -static void -common_ring_free(struct rte_mempool *mp) -{ - rte_ring_free(mp->pool_data); -} - -/* - * The following 4 declarations of mempool ops structs address - * the need for the backward compatible mempool handlers for - * single/multi producers and single/multi consumers as dictated by the - * flags provided to the rte_mempool_create function - */ -static const struct rte_mempool_ops ops_mp_mc = { - .name = "ring_mp_mc", - .alloc = common_ring_alloc, - .free = common_ring_free, - .enqueue = common_ring_mp_enqueue, - .dequeue = common_ring_mc_dequeue, - .get_count = common_ring_get_count, -}; - -static const struct rte_mempool_ops ops_sp_sc = { - .name = "ring_sp_sc", - .alloc = common_ring_alloc, - .free = common_ring_free, - .enqueue = common_ring_sp_enqueue, - .dequeue = common_ring_sc_dequeue, - .get_count = common_ring_get_count, -}; - -static const struct rte_mempool_ops ops_mp_sc = { - .name = "ring_mp_sc", - .alloc = common_ring_alloc, - .free = common_ring_free, - .enqueue = common_ring_mp_enqueue, - .dequeue = common_ring_sc_dequeue, - .get_count = common_ring_get_count, -}; - -static const struct rte_mempool_ops ops_sp_mc = { - .name = "ring_sp_mc", - .alloc = common_ring_alloc, - .free = common_ring_free, - .enqueue = common_ring_sp_enqueue, - .dequeue = common_ring_mc_dequeue, - .get_count = common_ring_get_count, -}; - -MEMPOOL_REGISTER_OPS(ops_mp_mc); -MEMPOOL_REGISTER_OPS(ops_sp_sc); -MEMPOOL_REGISTER_OPS(ops_mp_sc); -MEMPOOL_REGISTER_OPS(ops_sp_mc); diff --git a/lib/librte_mempool/rte_mempool_stack.c b/lib/librte_mempool/rte_mempool_stack.c deleted file mode 100644 index 817f77e6..00000000 --- a/lib/librte_mempool/rte_mempool_stack.c +++ /dev/null @@ -1,147 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include - -struct rte_mempool_stack { - rte_spinlock_t sl; - - uint32_t size; - uint32_t len; - void *objs[]; -}; - -static int -stack_alloc(struct rte_mempool *mp) -{ - struct rte_mempool_stack *s; - unsigned n = mp->size; - int size = sizeof(*s) + (n+16)*sizeof(void *); - - /* Allocate our local memory structure */ - s = rte_zmalloc_socket("mempool-stack", - size, - RTE_CACHE_LINE_SIZE, - mp->socket_id); - if (s == NULL) { - RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n"); - return -ENOMEM; - } - - rte_spinlock_init(&s->sl); - - s->size = n; - mp->pool_data = s; - - return 0; -} - -static int -stack_enqueue(struct rte_mempool *mp, void * const *obj_table, - unsigned n) -{ - struct rte_mempool_stack *s = mp->pool_data; - void **cache_objs; - unsigned index; - - rte_spinlock_lock(&s->sl); - cache_objs = &s->objs[s->len]; - - /* Is there sufficient space in the stack ? */ - if ((s->len + n) > s->size) { - rte_spinlock_unlock(&s->sl); - return -ENOBUFS; - } - - /* Add elements back into the cache */ - for (index = 0; index < n; ++index, obj_table++) - cache_objs[index] = *obj_table; - - s->len += n; - - rte_spinlock_unlock(&s->sl); - return 0; -} - -static int -stack_dequeue(struct rte_mempool *mp, void **obj_table, - unsigned n) -{ - struct rte_mempool_stack *s = mp->pool_data; - void **cache_objs; - unsigned index, len; - - rte_spinlock_lock(&s->sl); - - if (unlikely(n > s->len)) { - rte_spinlock_unlock(&s->sl); - return -ENOENT; - } - - cache_objs = s->objs; - - for (index = 0, len = s->len - 1; index < n; - ++index, len--, obj_table++) - *obj_table = cache_objs[len]; - - s->len -= n; - rte_spinlock_unlock(&s->sl); - return 0; -} - -static unsigned -stack_get_count(const struct rte_mempool *mp) -{ - struct rte_mempool_stack *s = mp->pool_data; - - return s->len; -} - -static void -stack_free(struct rte_mempool *mp) -{ - rte_free((void *)(mp->pool_data)); -} - -static struct rte_mempool_ops ops_stack = { - .name = "stack", - .alloc = stack_alloc, - .free = stack_free, - .enqueue = stack_enqueue, - .dequeue = stack_dequeue, - .get_count = stack_get_count -}; - -MEMPOOL_REGISTER_OPS(ops_stack); diff --git a/lib/librte_mempool/rte_mempool_version.map b/lib/librte_mempool/rte_mempool_version.map index dee1c990..f9c07944 100644 --- a/lib/librte_mempool/rte_mempool_version.map +++ b/lib/librte_mempool/rte_mempool_version.map @@ -3,7 +3,6 @@ DPDK_2.0 { rte_mempool_audit; rte_mempool_calc_obj_size; - rte_mempool_count; rte_mempool_create; rte_mempool_dump; rte_mempool_list_dump; -- cgit 1.2.3-korg