From 97f17497d162afdb82c8704bf097f0fee3724b2e Mon Sep 17 00:00:00 2001 From: "C.J. Collier" Date: Tue, 14 Jun 2016 07:50:17 -0700 Subject: Imported Upstream version 16.04 Change-Id: I77eadcd8538a9122e4773cbe55b24033dc451757 Signed-off-by: C.J. Collier --- lib/librte_mempool/Makefile | 53 ++ lib/librte_mempool/rte_dom0_mempool.c | 133 +++ lib/librte_mempool/rte_mempool.c | 919 ++++++++++++++++++ lib/librte_mempool/rte_mempool.h | 1408 ++++++++++++++++++++++++++++ lib/librte_mempool/rte_mempool_version.map | 19 + 5 files changed, 2532 insertions(+) create mode 100644 lib/librte_mempool/Makefile create mode 100644 lib/librte_mempool/rte_dom0_mempool.c create mode 100644 lib/librte_mempool/rte_mempool.c create mode 100644 lib/librte_mempool/rte_mempool.h create mode 100644 lib/librte_mempool/rte_mempool_version.map (limited to 'lib/librte_mempool') diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile new file mode 100644 index 00000000..a6898eff --- /dev/null +++ b/lib/librte_mempool/Makefile @@ -0,0 +1,53 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_mempool.a + +CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 + +EXPORT_MAP := rte_mempool_version.map + +LIBABIVER := 1 + +# all source are stored in SRCS-y +SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool.c +ifeq ($(CONFIG_RTE_LIBRTE_XEN_DOM0),y) +SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_dom0_mempool.c +endif +# install includes +SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h + +DEPDIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += lib/librte_eal lib/librte_ring + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/lib/librte_mempool/rte_dom0_mempool.c b/lib/librte_mempool/rte_dom0_mempool.c new file mode 100644 index 00000000..0d6d7504 --- /dev/null +++ b/lib/librte_mempool/rte_dom0_mempool.c @@ -0,0 +1,133 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_mempool.h" + +static void +get_phys_map(void *va, phys_addr_t pa[], uint32_t pg_num, + uint32_t pg_sz, uint32_t memseg_id) +{ + uint32_t i; + uint64_t virt_addr, mfn_id; + struct rte_mem_config *mcfg; + uint32_t page_size = getpagesize(); + + /* get pointer to global configuration */ + mcfg = rte_eal_get_configuration()->mem_config; + virt_addr = (uintptr_t) mcfg->memseg[memseg_id].addr; + + for (i = 0; i != pg_num; i++) { + mfn_id = ((uintptr_t)va + i * pg_sz - virt_addr) / RTE_PGSIZE_2M; + pa[i] = mcfg->memseg[memseg_id].mfn[mfn_id] * page_size; + } +} + +/* create the mempool for supporting Dom0 */ +struct rte_mempool * +rte_dom0_mempool_create(const char *name, unsigned elt_num, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags) +{ + struct rte_mempool *mp = NULL; + phys_addr_t *pa; + char *va; + size_t sz; + uint32_t pg_num, pg_shift, pg_sz, total_size; + const struct rte_memzone *mz; + char mz_name[RTE_MEMZONE_NAMESIZE]; + int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY; + + pg_sz = RTE_PGSIZE_2M; + + pg_shift = rte_bsf32(pg_sz); + total_size = rte_mempool_calc_obj_size(elt_size, flags, NULL); + + /* calc max memory size and max number of pages needed. */ + sz = rte_mempool_xmem_size(elt_num, total_size, pg_shift) + + RTE_PGSIZE_2M; + pg_num = sz >> pg_shift; + + /* extract physical mappings of the allocated memory. */ + pa = calloc(pg_num, sizeof (*pa)); + if (pa == NULL) + return mp; + + snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_OBJ_NAME, name); + mz = rte_memzone_reserve(mz_name, sz, socket_id, mz_flags); + if (mz == NULL) { + free(pa); + return mp; + } + + va = (char *)RTE_ALIGN_CEIL((uintptr_t)mz->addr, RTE_PGSIZE_2M); + /* extract physical mappings of the allocated memory. */ + get_phys_map(va, pa, pg_num, pg_sz, mz->memseg_id); + + mp = rte_mempool_xmem_create(name, elt_num, elt_size, + cache_size, private_data_size, + mp_init, mp_init_arg, + obj_init, obj_init_arg, + socket_id, flags, va, pa, pg_num, pg_shift); + + free(pa); + + return mp; +} diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c new file mode 100644 index 00000000..f8781e17 --- /dev/null +++ b/lib/librte_mempool/rte_mempool.c @@ -0,0 +1,919 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_mempool.h" + +TAILQ_HEAD(rte_mempool_list, rte_tailq_entry); + +static struct rte_tailq_elem rte_mempool_tailq = { + .name = "RTE_MEMPOOL", +}; +EAL_REGISTER_TAILQ(rte_mempool_tailq) + +#define CACHE_FLUSHTHRESH_MULTIPLIER 1.5 +#define CALC_CACHE_FLUSHTHRESH(c) \ + ((typeof(c))((c) * CACHE_FLUSHTHRESH_MULTIPLIER)) + +/* + * return the greatest common divisor between a and b (fast algorithm) + * + */ +static unsigned get_gcd(unsigned a, unsigned b) +{ + unsigned c; + + if (0 == a) + return b; + if (0 == b) + return a; + + if (a < b) { + c = a; + a = b; + b = c; + } + + while (b != 0) { + c = a % b; + a = b; + b = c; + } + + return a; +} + +/* + * Depending on memory configuration, objects addresses are spread + * between channels and ranks in RAM: the pool allocator will add + * padding between objects. This function return the new size of the + * object. + */ +static unsigned optimize_object_size(unsigned obj_size) +{ + unsigned nrank, nchan; + unsigned new_obj_size; + + /* get number of channels */ + nchan = rte_memory_get_nchannel(); + if (nchan == 0) + nchan = 4; + + nrank = rte_memory_get_nrank(); + if (nrank == 0) + nrank = 1; + + /* process new object size */ + new_obj_size = (obj_size + RTE_MEMPOOL_ALIGN_MASK) / RTE_MEMPOOL_ALIGN; + while (get_gcd(new_obj_size, nrank * nchan) != 1) + new_obj_size++; + return new_obj_size * RTE_MEMPOOL_ALIGN; +} + +static void +mempool_add_elem(struct rte_mempool *mp, void *obj, uint32_t obj_idx, + rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg) +{ + struct rte_mempool_objhdr *hdr; + struct rte_mempool_objtlr *tlr __rte_unused; + + obj = (char *)obj + mp->header_size; + + /* set mempool ptr in header */ + hdr = RTE_PTR_SUB(obj, sizeof(*hdr)); + hdr->mp = mp; + +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2; + tlr = __mempool_get_trailer(obj); + tlr->cookie = RTE_MEMPOOL_TRAILER_COOKIE; +#endif + /* call the initializer */ + if (obj_init) + obj_init(mp, obj_init_arg, obj, obj_idx); + + /* enqueue in ring */ + rte_ring_sp_enqueue(mp->ring, obj); +} + +uint32_t +rte_mempool_obj_iter(void *vaddr, uint32_t elt_num, size_t elt_sz, size_t align, + const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift, + rte_mempool_obj_iter_t obj_iter, void *obj_iter_arg) +{ + uint32_t i, j, k; + uint32_t pgn, pgf; + uintptr_t end, start, va; + uintptr_t pg_sz; + + pg_sz = (uintptr_t)1 << pg_shift; + va = (uintptr_t)vaddr; + + i = 0; + j = 0; + + while (i != elt_num && j != pg_num) { + + start = RTE_ALIGN_CEIL(va, align); + end = start + elt_sz; + + /* index of the first page for the next element. */ + pgf = (end >> pg_shift) - (start >> pg_shift); + + /* index of the last page for the current element. */ + pgn = ((end - 1) >> pg_shift) - (start >> pg_shift); + pgn += j; + + /* do we have enough space left for the element. */ + if (pgn >= pg_num) + break; + + for (k = j; + k != pgn && + paddr[k] + pg_sz == paddr[k + 1]; + k++) + ; + + /* + * if next pgn chunks of memory physically continuous, + * use it to create next element. + * otherwise, just skip that chunk unused. + */ + if (k == pgn) { + if (obj_iter != NULL) + obj_iter(obj_iter_arg, (void *)start, + (void *)end, i); + va = end; + j += pgf; + i++; + } else { + va = RTE_ALIGN_CEIL((va + 1), pg_sz); + j++; + } + } + + return i; +} + +/* + * Populate mempool with the objects. + */ + +struct mempool_populate_arg { + struct rte_mempool *mp; + rte_mempool_obj_ctor_t *obj_init; + void *obj_init_arg; +}; + +static void +mempool_obj_populate(void *arg, void *start, void *end, uint32_t idx) +{ + struct mempool_populate_arg *pa = arg; + + mempool_add_elem(pa->mp, start, idx, pa->obj_init, pa->obj_init_arg); + pa->mp->elt_va_end = (uintptr_t)end; +} + +static void +mempool_populate(struct rte_mempool *mp, size_t num, size_t align, + rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg) +{ + uint32_t elt_sz; + struct mempool_populate_arg arg; + + elt_sz = mp->elt_size + mp->header_size + mp->trailer_size; + arg.mp = mp; + arg.obj_init = obj_init; + arg.obj_init_arg = obj_init_arg; + + mp->size = rte_mempool_obj_iter((void *)mp->elt_va_start, + num, elt_sz, align, + mp->elt_pa, mp->pg_num, mp->pg_shift, + mempool_obj_populate, &arg); +} + +uint32_t +rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, + struct rte_mempool_objsz *sz) +{ + struct rte_mempool_objsz lsz; + + sz = (sz != NULL) ? sz : &lsz; + + /* + * In header, we have at least the pointer to the pool, and + * optionaly a 64 bits cookie. + */ + sz->header_size = 0; + sz->header_size += sizeof(struct rte_mempool *); /* ptr to pool */ +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + sz->header_size += sizeof(uint64_t); /* cookie */ +#endif + if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) + sz->header_size = RTE_ALIGN_CEIL(sz->header_size, + RTE_MEMPOOL_ALIGN); + + /* trailer contains the cookie in debug mode */ + sz->trailer_size = 0; +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + sz->trailer_size += sizeof(uint64_t); /* cookie */ +#endif + /* element size is 8 bytes-aligned at least */ + sz->elt_size = RTE_ALIGN_CEIL(elt_size, sizeof(uint64_t)); + + /* expand trailer to next cache line */ + if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) { + sz->total_size = sz->header_size + sz->elt_size + + sz->trailer_size; + sz->trailer_size += ((RTE_MEMPOOL_ALIGN - + (sz->total_size & RTE_MEMPOOL_ALIGN_MASK)) & + RTE_MEMPOOL_ALIGN_MASK); + } + + /* + * increase trailer to add padding between objects in order to + * spread them across memory channels/ranks + */ + if ((flags & MEMPOOL_F_NO_SPREAD) == 0) { + unsigned new_size; + new_size = optimize_object_size(sz->header_size + sz->elt_size + + sz->trailer_size); + sz->trailer_size = new_size - sz->header_size - sz->elt_size; + } + + if (! rte_eal_has_hugepages()) { + /* + * compute trailer size so that pool elements fit exactly in + * a standard page + */ + int page_size = getpagesize(); + int new_size = page_size - sz->header_size - sz->elt_size; + if (new_size < 0 || (unsigned int)new_size < sz->trailer_size) { + printf("When hugepages are disabled, pool objects " + "can't exceed PAGE_SIZE: %d + %d + %d > %d\n", + sz->header_size, sz->elt_size, sz->trailer_size, + page_size); + return 0; + } + sz->trailer_size = new_size; + } + + /* this is the size of an object, including header and trailer */ + sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size; + + return sz->total_size; +} + + +/* + * Calculate maximum amount of memory required to store given number of objects. + */ +size_t +rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz, uint32_t pg_shift) +{ + size_t n, pg_num, pg_sz, sz; + + pg_sz = (size_t)1 << pg_shift; + + if ((n = pg_sz / elt_sz) > 0) { + pg_num = (elt_num + n - 1) / n; + sz = pg_num << pg_shift; + } else { + sz = RTE_ALIGN_CEIL(elt_sz, pg_sz) * elt_num; + } + + return sz; +} + +/* + * Calculate how much memory would be actually required with the + * given memory footprint to store required number of elements. + */ +static void +mempool_lelem_iter(void *arg, __rte_unused void *start, void *end, + __rte_unused uint32_t idx) +{ + *(uintptr_t *)arg = (uintptr_t)end; +} + +ssize_t +rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz, + const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift) +{ + uint32_t n; + uintptr_t va, uv; + size_t pg_sz, usz; + + pg_sz = (size_t)1 << pg_shift; + va = (uintptr_t)vaddr; + uv = va; + + if ((n = rte_mempool_obj_iter(vaddr, elt_num, elt_sz, 1, + paddr, pg_num, pg_shift, mempool_lelem_iter, + &uv)) != elt_num) { + return -(ssize_t)n; + } + + uv = RTE_ALIGN_CEIL(uv, pg_sz); + usz = uv - va; + return usz; +} + +#ifndef RTE_LIBRTE_XEN_DOM0 +/* stub if DOM0 support not configured */ +struct rte_mempool * +rte_dom0_mempool_create(const char *name __rte_unused, + unsigned n __rte_unused, + unsigned elt_size __rte_unused, + unsigned cache_size __rte_unused, + unsigned private_data_size __rte_unused, + rte_mempool_ctor_t *mp_init __rte_unused, + void *mp_init_arg __rte_unused, + rte_mempool_obj_ctor_t *obj_init __rte_unused, + void *obj_init_arg __rte_unused, + int socket_id __rte_unused, + unsigned flags __rte_unused) +{ + rte_errno = EINVAL; + return NULL; +} +#endif + +/* create the mempool */ +struct rte_mempool * +rte_mempool_create(const char *name, unsigned n, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags) +{ + if (rte_xen_dom0_supported()) + return rte_dom0_mempool_create(name, n, elt_size, + cache_size, private_data_size, + mp_init, mp_init_arg, + obj_init, obj_init_arg, + socket_id, flags); + else + return rte_mempool_xmem_create(name, n, elt_size, + cache_size, private_data_size, + mp_init, mp_init_arg, + obj_init, obj_init_arg, + socket_id, flags, + NULL, NULL, MEMPOOL_PG_NUM_DEFAULT, + MEMPOOL_PG_SHIFT_MAX); +} + +/* + * Create the mempool over already allocated chunk of memory. + * That external memory buffer can consists of physically disjoint pages. + * Setting vaddr to NULL, makes mempool to fallback to original behaviour + * and allocate space for mempool and it's elements as one big chunk of + * physically continuos memory. + * */ +struct rte_mempool * +rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags, void *vaddr, + const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift) +{ + char mz_name[RTE_MEMZONE_NAMESIZE]; + char rg_name[RTE_RING_NAMESIZE]; + struct rte_mempool_list *mempool_list; + struct rte_mempool *mp = NULL; + struct rte_tailq_entry *te = NULL; + struct rte_ring *r = NULL; + const struct rte_memzone *mz; + size_t mempool_size; + int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY; + int rg_flags = 0; + void *obj; + struct rte_mempool_objsz objsz; + void *startaddr; + int page_size = getpagesize(); + + /* compilation-time checks */ + RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) & + RTE_CACHE_LINE_MASK) != 0); +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 + RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) & + RTE_CACHE_LINE_MASK) != 0); + RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, local_cache) & + RTE_CACHE_LINE_MASK) != 0); +#endif +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) & + RTE_CACHE_LINE_MASK) != 0); + RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) & + RTE_CACHE_LINE_MASK) != 0); +#endif + + mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list); + + /* asked cache too big */ + if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE || + CALC_CACHE_FLUSHTHRESH(cache_size) > n) { + rte_errno = EINVAL; + return NULL; + } + + /* check that we have both VA and PA */ + if (vaddr != NULL && paddr == NULL) { + rte_errno = EINVAL; + return NULL; + } + + /* Check that pg_num and pg_shift parameters are valid. */ + if (pg_num < RTE_DIM(mp->elt_pa) || pg_shift > MEMPOOL_PG_SHIFT_MAX) { + rte_errno = EINVAL; + return NULL; + } + + /* "no cache align" imply "no spread" */ + if (flags & MEMPOOL_F_NO_CACHE_ALIGN) + flags |= MEMPOOL_F_NO_SPREAD; + + /* ring flags */ + if (flags & MEMPOOL_F_SP_PUT) + rg_flags |= RING_F_SP_ENQ; + if (flags & MEMPOOL_F_SC_GET) + rg_flags |= RING_F_SC_DEQ; + + /* calculate mempool object sizes. */ + if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) { + rte_errno = EINVAL; + return NULL; + } + + rte_rwlock_write_lock(RTE_EAL_MEMPOOL_RWLOCK); + + /* allocate the ring that will be used to store objects */ + /* Ring functions will return appropriate errors if we are + * running as a secondary process etc., so no checks made + * in this function for that condition */ + snprintf(rg_name, sizeof(rg_name), RTE_MEMPOOL_MZ_FORMAT, name); + r = rte_ring_create(rg_name, rte_align32pow2(n+1), socket_id, rg_flags); + if (r == NULL) + goto exit_unlock; + + /* + * reserve a memory zone for this mempool: private data is + * cache-aligned + */ + private_data_size = (private_data_size + + RTE_MEMPOOL_ALIGN_MASK) & (~RTE_MEMPOOL_ALIGN_MASK); + + if (! rte_eal_has_hugepages()) { + /* + * expand private data size to a whole page, so that the + * first pool element will start on a new standard page + */ + int head = sizeof(struct rte_mempool); + int new_size = (private_data_size + head) % page_size; + if (new_size) { + private_data_size += page_size - new_size; + } + } + + /* try to allocate tailq entry */ + te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0); + if (te == NULL) { + RTE_LOG(ERR, MEMPOOL, "Cannot allocate tailq entry!\n"); + goto exit_unlock; + } + + /* + * If user provided an external memory buffer, then use it to + * store mempool objects. Otherwise reserve a memzone that is large + * enough to hold mempool header and metadata plus mempool objects. + */ + mempool_size = MEMPOOL_HEADER_SIZE(mp, pg_num) + private_data_size; + mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN); + if (vaddr == NULL) + mempool_size += (size_t)objsz.total_size * n; + + if (! rte_eal_has_hugepages()) { + /* + * we want the memory pool to start on a page boundary, + * because pool elements crossing page boundaries would + * result in discontiguous physical addresses + */ + mempool_size += page_size; + } + + snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, name); + + mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags); + if (mz == NULL) + goto exit_unlock; + + if (rte_eal_has_hugepages()) { + startaddr = (void*)mz->addr; + } else { + /* align memory pool start address on a page boundary */ + unsigned long addr = (unsigned long)mz->addr; + if (addr & (page_size - 1)) { + addr += page_size; + addr &= ~(page_size - 1); + } + startaddr = (void*)addr; + } + + /* init the mempool structure */ + mp = startaddr; + memset(mp, 0, sizeof(*mp)); + snprintf(mp->name, sizeof(mp->name), "%s", name); + mp->phys_addr = mz->phys_addr; + mp->ring = r; + mp->size = n; + mp->flags = flags; + mp->elt_size = objsz.elt_size; + mp->header_size = objsz.header_size; + mp->trailer_size = objsz.trailer_size; + mp->cache_size = cache_size; + mp->cache_flushthresh = CALC_CACHE_FLUSHTHRESH(cache_size); + mp->private_data_size = private_data_size; + + /* calculate address of the first element for continuous mempool. */ + obj = (char *)mp + MEMPOOL_HEADER_SIZE(mp, pg_num) + + private_data_size; + obj = RTE_PTR_ALIGN_CEIL(obj, RTE_MEMPOOL_ALIGN); + + /* populate address translation fields. */ + mp->pg_num = pg_num; + mp->pg_shift = pg_shift; + mp->pg_mask = RTE_LEN2MASK(mp->pg_shift, typeof(mp->pg_mask)); + + /* mempool elements allocated together with mempool */ + if (vaddr == NULL) { + mp->elt_va_start = (uintptr_t)obj; + mp->elt_pa[0] = mp->phys_addr + + (mp->elt_va_start - (uintptr_t)mp); + + /* mempool elements in a separate chunk of memory. */ + } else { + mp->elt_va_start = (uintptr_t)vaddr; + memcpy(mp->elt_pa, paddr, sizeof (mp->elt_pa[0]) * pg_num); + } + + mp->elt_va_end = mp->elt_va_start; + + /* call the initializer */ + if (mp_init) + mp_init(mp, mp_init_arg); + + mempool_populate(mp, n, 1, obj_init, obj_init_arg); + + te->data = (void *) mp; + + rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + TAILQ_INSERT_TAIL(mempool_list, te, next); + rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK); + + return mp; + +exit_unlock: + rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK); + rte_ring_free(r); + rte_free(te); + + return NULL; +} + +/* Return the number of entries in the mempool */ +unsigned +rte_mempool_count(const struct rte_mempool *mp) +{ + unsigned count; + + count = rte_ring_count(mp->ring); + +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 + { + unsigned lcore_id; + if (mp->cache_size == 0) + return count; + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) + count += mp->local_cache[lcore_id].len; + } +#endif + + /* + * due to race condition (access to len is not locked), the + * total can be greater than size... so fix the result + */ + if (count > mp->size) + return mp->size; + return count; +} + +/* dump the cache status */ +static unsigned +rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp) +{ +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 + unsigned lcore_id; + unsigned count = 0; + unsigned cache_count; + + fprintf(f, " cache infos:\n"); + fprintf(f, " cache_size=%"PRIu32"\n", mp->cache_size); + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + cache_count = mp->local_cache[lcore_id].len; + fprintf(f, " cache_count[%u]=%u\n", lcore_id, cache_count); + count += cache_count; + } + fprintf(f, " total_cache_count=%u\n", count); + return count; +#else + RTE_SET_USED(mp); + fprintf(f, " cache disabled\n"); + return 0; +#endif +} + +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG +/* check cookies before and after objects */ +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +struct mempool_audit_arg { + const struct rte_mempool *mp; + uintptr_t obj_end; + uint32_t obj_num; +}; + +static void +mempool_obj_audit(void *arg, void *start, void *end, uint32_t idx) +{ + struct mempool_audit_arg *pa = arg; + void *obj; + + obj = (char *)start + pa->mp->header_size; + pa->obj_end = (uintptr_t)end; + pa->obj_num = idx + 1; + __mempool_check_cookies(pa->mp, &obj, 1, 2); +} + +static void +mempool_audit_cookies(const struct rte_mempool *mp) +{ + uint32_t elt_sz, num; + struct mempool_audit_arg arg; + + elt_sz = mp->elt_size + mp->header_size + mp->trailer_size; + + arg.mp = mp; + arg.obj_end = mp->elt_va_start; + arg.obj_num = 0; + + num = rte_mempool_obj_iter((void *)mp->elt_va_start, + mp->size, elt_sz, 1, + mp->elt_pa, mp->pg_num, mp->pg_shift, + mempool_obj_audit, &arg); + + if (num != mp->size) { + rte_panic("rte_mempool_obj_iter(mempool=%p, size=%u) " + "iterated only over %u elements\n", + mp, mp->size, num); + } else if (arg.obj_end != mp->elt_va_end || arg.obj_num != mp->size) { + rte_panic("rte_mempool_obj_iter(mempool=%p, size=%u) " + "last callback va_end: %#tx (%#tx expeceted), " + "num of objects: %u (%u expected)\n", + mp, mp->size, + arg.obj_end, mp->elt_va_end, + arg.obj_num, mp->size); + } +} + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic error "-Wcast-qual" +#endif +#else +#define mempool_audit_cookies(mp) do {} while(0) +#endif + +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 +/* check cookies before and after objects */ +static void +mempool_audit_cache(const struct rte_mempool *mp) +{ + /* check cache size consistency */ + unsigned lcore_id; + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (mp->local_cache[lcore_id].len > mp->cache_flushthresh) { + RTE_LOG(CRIT, MEMPOOL, "badness on cache[%u]\n", + lcore_id); + rte_panic("MEMPOOL: invalid cache len\n"); + } + } +} +#else +#define mempool_audit_cache(mp) do {} while(0) +#endif + + +/* check the consistency of mempool (size, cookies, ...) */ +void +rte_mempool_audit(const struct rte_mempool *mp) +{ + mempool_audit_cache(mp); + mempool_audit_cookies(mp); + + /* For case where mempool DEBUG is not set, and cache size is 0 */ + RTE_SET_USED(mp); +} + +/* dump the status of the mempool on the console */ +void +rte_mempool_dump(FILE *f, const struct rte_mempool *mp) +{ +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + struct rte_mempool_debug_stats sum; + unsigned lcore_id; +#endif + unsigned common_count; + unsigned cache_count; + + RTE_VERIFY(f != NULL); + RTE_VERIFY(mp != NULL); + + fprintf(f, "mempool <%s>@%p\n", mp->name, mp); + fprintf(f, " flags=%x\n", mp->flags); + fprintf(f, " ring=<%s>@%p\n", mp->ring->name, mp->ring); + fprintf(f, " phys_addr=0x%" PRIx64 "\n", mp->phys_addr); + fprintf(f, " size=%"PRIu32"\n", mp->size); + fprintf(f, " header_size=%"PRIu32"\n", mp->header_size); + fprintf(f, " elt_size=%"PRIu32"\n", mp->elt_size); + fprintf(f, " trailer_size=%"PRIu32"\n", mp->trailer_size); + fprintf(f, " total_obj_size=%"PRIu32"\n", + mp->header_size + mp->elt_size + mp->trailer_size); + + fprintf(f, " private_data_size=%"PRIu32"\n", mp->private_data_size); + fprintf(f, " pg_num=%"PRIu32"\n", mp->pg_num); + fprintf(f, " pg_shift=%"PRIu32"\n", mp->pg_shift); + fprintf(f, " pg_mask=%#tx\n", mp->pg_mask); + fprintf(f, " elt_va_start=%#tx\n", mp->elt_va_start); + fprintf(f, " elt_va_end=%#tx\n", mp->elt_va_end); + fprintf(f, " elt_pa[0]=0x%" PRIx64 "\n", mp->elt_pa[0]); + + if (mp->size != 0) + fprintf(f, " avg bytes/object=%#Lf\n", + (long double)(mp->elt_va_end - mp->elt_va_start) / + mp->size); + + cache_count = rte_mempool_dump_cache(f, mp); + common_count = rte_ring_count(mp->ring); + if ((cache_count + common_count) > mp->size) + common_count = mp->size - cache_count; + fprintf(f, " common_pool_count=%u\n", common_count); + + /* sum and dump statistics */ +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + memset(&sum, 0, sizeof(sum)); + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + sum.put_bulk += mp->stats[lcore_id].put_bulk; + sum.put_objs += mp->stats[lcore_id].put_objs; + sum.get_success_bulk += mp->stats[lcore_id].get_success_bulk; + sum.get_success_objs += mp->stats[lcore_id].get_success_objs; + sum.get_fail_bulk += mp->stats[lcore_id].get_fail_bulk; + sum.get_fail_objs += mp->stats[lcore_id].get_fail_objs; + } + fprintf(f, " stats:\n"); + fprintf(f, " put_bulk=%"PRIu64"\n", sum.put_bulk); + fprintf(f, " put_objs=%"PRIu64"\n", sum.put_objs); + fprintf(f, " get_success_bulk=%"PRIu64"\n", sum.get_success_bulk); + fprintf(f, " get_success_objs=%"PRIu64"\n", sum.get_success_objs); + fprintf(f, " get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk); + fprintf(f, " get_fail_objs=%"PRIu64"\n", sum.get_fail_objs); +#else + fprintf(f, " no statistics available\n"); +#endif + + rte_mempool_audit(mp); +} + +/* dump the status of all mempools on the console */ +void +rte_mempool_list_dump(FILE *f) +{ + const struct rte_mempool *mp = NULL; + struct rte_tailq_entry *te; + struct rte_mempool_list *mempool_list; + + mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list); + + rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK); + + TAILQ_FOREACH(te, mempool_list, next) { + mp = (struct rte_mempool *) te->data; + rte_mempool_dump(f, mp); + } + + rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK); +} + +/* search a mempool from its name */ +struct rte_mempool * +rte_mempool_lookup(const char *name) +{ + struct rte_mempool *mp = NULL; + struct rte_tailq_entry *te; + struct rte_mempool_list *mempool_list; + + mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list); + + rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK); + + TAILQ_FOREACH(te, mempool_list, next) { + mp = (struct rte_mempool *) te->data; + if (strncmp(name, mp->name, RTE_MEMPOOL_NAMESIZE) == 0) + break; + } + + rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK); + + if (te == NULL) { + rte_errno = ENOENT; + return NULL; + } + + return mp; +} + +void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *), + void *arg) +{ + struct rte_tailq_entry *te = NULL; + struct rte_mempool_list *mempool_list; + + mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list); + + rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK); + + TAILQ_FOREACH(te, mempool_list, next) { + (*func)((struct rte_mempool *) te->data, arg); + } + + rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK); +} diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h new file mode 100644 index 00000000..9745bf0d --- /dev/null +++ b/lib/librte_mempool/rte_mempool.h @@ -0,0 +1,1408 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_MEMPOOL_H_ +#define _RTE_MEMPOOL_H_ + +/** + * @file + * RTE Mempool. + * + * A memory pool is an allocator of fixed-size object. It is + * identified by its name, and uses a ring to store free objects. It + * provides some other optional services, like a per-core object + * cache, and an alignment helper to ensure that objects are padded + * to spread them equally on all RAM channels, ranks, and so on. + * + * Objects owned by a mempool should never be added in another + * mempool. When an object is freed using rte_mempool_put() or + * equivalent, the object data is not modified; the user can save some + * meta-data in the object data and retrieve them when allocating a + * new object. + * + * Note: the mempool implementation is not preemptable. A lcore must + * not be interrupted by another task that uses the same mempool + * (because it uses a ring which is not preemptable). Also, mempool + * functions must not be used outside the DPDK environment: for + * example, in linuxapp environment, a thread that is not created by + * the EAL must not use mempools. This is due to the per-lcore cache + * that won't work as rte_lcore_id() will not return a correct value. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL /**< Header cookie. */ +#define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL /**< Header cookie. */ +#define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/ + +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG +/** + * A structure that stores the mempool statistics (per-lcore). + */ +struct rte_mempool_debug_stats { + uint64_t put_bulk; /**< Number of puts. */ + uint64_t put_objs; /**< Number of objects successfully put. */ + uint64_t get_success_bulk; /**< Successful allocation number. */ + uint64_t get_success_objs; /**< Objects successfully allocated. */ + uint64_t get_fail_bulk; /**< Failed allocation number. */ + uint64_t get_fail_objs; /**< Objects that failed to be allocated. */ +} __rte_cache_aligned; +#endif + +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 +/** + * A structure that stores a per-core object cache. + */ +struct rte_mempool_cache { + unsigned len; /**< Cache len */ + /* + * Cache is allocated to this size to allow it to overflow in certain + * cases to avoid needless emptying of cache. + */ + void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */ +} __rte_cache_aligned; +#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */ + +/** + * A structure that stores the size of mempool elements. + */ +struct rte_mempool_objsz { + uint32_t elt_size; /**< Size of an element. */ + uint32_t header_size; /**< Size of header (before elt). */ + uint32_t trailer_size; /**< Size of trailer (after elt). */ + uint32_t total_size; + /**< Total size of an object (header + elt + trailer). */ +}; + +#define RTE_MEMPOOL_NAMESIZE 32 /**< Maximum length of a memory pool. */ +#define RTE_MEMPOOL_MZ_PREFIX "MP_" + +/* "MP_" */ +#define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s" + +#ifdef RTE_LIBRTE_XEN_DOM0 + +/* "_MP_elt" */ +#define RTE_MEMPOOL_OBJ_NAME "%s_" RTE_MEMPOOL_MZ_PREFIX "elt" + +#else + +#define RTE_MEMPOOL_OBJ_NAME RTE_MEMPOOL_MZ_FORMAT + +#endif /* RTE_LIBRTE_XEN_DOM0 */ + +#define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1) + +/** Mempool over one chunk of physically continuous memory */ +#define MEMPOOL_PG_NUM_DEFAULT 1 + +#ifndef RTE_MEMPOOL_ALIGN +#define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE +#endif + +#define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1) + +/** + * Mempool object header structure + * + * Each object stored in mempools are prefixed by this header structure, + * it allows to retrieve the mempool pointer from the object. When debug + * is enabled, a cookie is also added in this structure preventing + * corruptions and double-frees. + */ +struct rte_mempool_objhdr { + struct rte_mempool *mp; /**< The mempool owning the object. */ +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + uint64_t cookie; /**< Debug cookie. */ +#endif +}; + +/** + * Mempool object trailer structure + * + * In debug mode, each object stored in mempools are suffixed by this + * trailer structure containing a cookie preventing memory corruptions. + */ +struct rte_mempool_objtlr { +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + uint64_t cookie; /**< Debug cookie. */ +#endif +}; + +/** + * The RTE mempool structure. + */ +struct rte_mempool { + char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */ + struct rte_ring *ring; /**< Ring to store objects. */ + phys_addr_t phys_addr; /**< Phys. addr. of mempool struct. */ + int flags; /**< Flags of the mempool. */ + uint32_t size; /**< Size of the mempool. */ + uint32_t cache_size; /**< Size of per-lcore local cache. */ + uint32_t cache_flushthresh; + /**< Threshold before we flush excess elements. */ + + uint32_t elt_size; /**< Size of an element. */ + uint32_t header_size; /**< Size of header (before elt). */ + uint32_t trailer_size; /**< Size of trailer (after elt). */ + + unsigned private_data_size; /**< Size of private data. */ + +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 + /** Per-lcore local cache. */ + struct rte_mempool_cache local_cache[RTE_MAX_LCORE]; +#endif + +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + /** Per-lcore statistics. */ + struct rte_mempool_debug_stats stats[RTE_MAX_LCORE]; +#endif + + /* Address translation support, starts from next cache line. */ + + /** Number of elements in the elt_pa array. */ + uint32_t pg_num __rte_cache_aligned; + uint32_t pg_shift; /**< LOG2 of the physical pages. */ + uintptr_t pg_mask; /**< physical page mask value. */ + uintptr_t elt_va_start; + /**< Virtual address of the first mempool object. */ + uintptr_t elt_va_end; + /**< Virtual address of the mempool object. */ + phys_addr_t elt_pa[MEMPOOL_PG_NUM_DEFAULT]; + /**< Array of physical page addresses for the mempool objects buffer. */ + +} __rte_cache_aligned; + +#define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread in memory. */ +#define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/ +#define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/ +#define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/ + +/** + * @internal When debug is enabled, store some statistics. + * + * @param mp + * Pointer to the memory pool. + * @param name + * Name of the statistics field to increment in the memory pool. + * @param n + * Number to add to the object-oriented statistics. + */ +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG +#define __MEMPOOL_STAT_ADD(mp, name, n) do { \ + unsigned __lcore_id = rte_lcore_id(); \ + if (__lcore_id < RTE_MAX_LCORE) { \ + mp->stats[__lcore_id].name##_objs += n; \ + mp->stats[__lcore_id].name##_bulk += 1; \ + } \ + } while(0) +#else +#define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0) +#endif + +/** + * Calculate the size of the mempool header. + * + * @param mp + * Pointer to the memory pool. + * @param pgn + * Number of pages used to store mempool objects. + */ +#define MEMPOOL_HEADER_SIZE(mp, pgn) (sizeof(*(mp)) + \ + RTE_ALIGN_CEIL(((pgn) - RTE_DIM((mp)->elt_pa)) * \ + sizeof ((mp)->elt_pa[0]), RTE_CACHE_LINE_SIZE)) + +/** + * Return true if the whole mempool is in contiguous memory. + */ +#define MEMPOOL_IS_CONTIG(mp) \ + ((mp)->pg_num == MEMPOOL_PG_NUM_DEFAULT && \ + (mp)->phys_addr == (mp)->elt_pa[0]) + +/* return the header of a mempool object (internal) */ +static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj) +{ + return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj, sizeof(struct rte_mempool_objhdr)); +} + +/** + * Return a pointer to the mempool owning this object. + * + * @param obj + * An object that is owned by a pool. If this is not the case, + * the behavior is undefined. + * @return + * A pointer to the mempool structure. + */ +static inline struct rte_mempool *rte_mempool_from_obj(void *obj) +{ + struct rte_mempool_objhdr *hdr = __mempool_get_header(obj); + return hdr->mp; +} + +/* return the trailer of a mempool object (internal) */ +static inline struct rte_mempool_objtlr *__mempool_get_trailer(void *obj) +{ + struct rte_mempool *mp = rte_mempool_from_obj(obj); + return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size); +} + +/** + * @internal Check and update cookies or panic. + * + * @param mp + * Pointer to the memory pool. + * @param obj_table_const + * Pointer to a table of void * pointers (objects). + * @param n + * Index of object in object table. + * @param free + * - 0: object is supposed to be allocated, mark it as free + * - 1: object is supposed to be free, mark it as allocated + * - 2: just check that cookie is valid (free or allocated) + */ +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif +static inline void __mempool_check_cookies(const struct rte_mempool *mp, + void * const *obj_table_const, + unsigned n, int free) +{ + struct rte_mempool_objhdr *hdr; + struct rte_mempool_objtlr *tlr; + uint64_t cookie; + void *tmp; + void *obj; + void **obj_table; + + /* Force to drop the "const" attribute. This is done only when + * DEBUG is enabled */ + tmp = (void *) obj_table_const; + obj_table = (void **) tmp; + + while (n--) { + obj = obj_table[n]; + + if (rte_mempool_from_obj(obj) != mp) + rte_panic("MEMPOOL: object is owned by another " + "mempool\n"); + + hdr = __mempool_get_header(obj); + cookie = hdr->cookie; + + if (free == 0) { + if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) { + rte_log_set_history(0); + RTE_LOG(CRIT, MEMPOOL, + "obj=%p, mempool=%p, cookie=%" PRIx64 "\n", + obj, (const void *) mp, cookie); + rte_panic("MEMPOOL: bad header cookie (put)\n"); + } + hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2; + } + else if (free == 1) { + if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) { + rte_log_set_history(0); + RTE_LOG(CRIT, MEMPOOL, + "obj=%p, mempool=%p, cookie=%" PRIx64 "\n", + obj, (const void *) mp, cookie); + rte_panic("MEMPOOL: bad header cookie (get)\n"); + } + hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE1; + } + else if (free == 2) { + if (cookie != RTE_MEMPOOL_HEADER_COOKIE1 && + cookie != RTE_MEMPOOL_HEADER_COOKIE2) { + rte_log_set_history(0); + RTE_LOG(CRIT, MEMPOOL, + "obj=%p, mempool=%p, cookie=%" PRIx64 "\n", + obj, (const void *) mp, cookie); + rte_panic("MEMPOOL: bad header cookie (audit)\n"); + } + } + tlr = __mempool_get_trailer(obj); + cookie = tlr->cookie; + if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) { + rte_log_set_history(0); + RTE_LOG(CRIT, MEMPOOL, + "obj=%p, mempool=%p, cookie=%" PRIx64 "\n", + obj, (const void *) mp, cookie); + rte_panic("MEMPOOL: bad trailer cookie\n"); + } + } +} +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic error "-Wcast-qual" +#endif +#else +#define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0) +#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */ + +/** + * A mempool object iterator callback function. + */ +typedef void (*rte_mempool_obj_iter_t)(void * /*obj_iter_arg*/, + void * /*obj_start*/, + void * /*obj_end*/, + uint32_t /*obj_index */); + +/** + * Call a function for each mempool object in a memory chunk + * + * Iterate across objects of the given size and alignment in the + * provided chunk of memory. The given memory buffer can consist of + * disjointed physical pages. + * + * For each object, call the provided callback (if any). This function + * is used to populate a mempool, or walk through all the elements of a + * mempool, or estimate how many elements of the given size could be + * created in the given memory buffer. + * + * @param vaddr + * Virtual address of the memory buffer. + * @param elt_num + * Maximum number of objects to iterate through. + * @param elt_sz + * Size of each object. + * @param align + * Alignment of each object. + * @param paddr + * Array of physical addresses of the pages that comprises given memory + * buffer. + * @param pg_num + * Number of elements in the paddr array. + * @param pg_shift + * LOG2 of the physical pages size. + * @param obj_iter + * Object iterator callback function (could be NULL). + * @param obj_iter_arg + * User defined parameter for the object iterator callback function. + * + * @return + * Number of objects iterated through. + */ +uint32_t rte_mempool_obj_iter(void *vaddr, + uint32_t elt_num, size_t elt_sz, size_t align, + const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift, + rte_mempool_obj_iter_t obj_iter, void *obj_iter_arg); + +/** + * An object constructor callback function for mempool. + * + * Arguments are the mempool, the opaque pointer given by the user in + * rte_mempool_create(), the pointer to the element and the index of + * the element in the pool. + */ +typedef void (rte_mempool_obj_ctor_t)(struct rte_mempool *, void *, + void *, unsigned); + +/** + * A mempool constructor callback function. + * + * Arguments are the mempool and the opaque pointer given by the user in + * rte_mempool_create(). + */ +typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); + +/** + * Create a new mempool named *name* in memory. + * + * This function uses ``memzone_reserve()`` to allocate memory. The + * pool contains n elements of elt_size. Its size is set to n. + * All elements of the mempool are allocated together with the mempool header, + * in one physically continuous chunk of memory. + * + * @param name + * The name of the mempool. + * @param n + * The number of elements in the mempool. The optimum size (in terms of + * memory usage) for a mempool is when n is a power of two minus one: + * n = (2^q - 1). + * @param elt_size + * The size of each element. + * @param cache_size + * If cache_size is non-zero, the rte_mempool library will try to + * limit the accesses to the common lockless pool, by maintaining a + * per-lcore object cache. This argument must be lower or equal to + * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose + * cache_size to have "n modulo cache_size == 0": if this is + * not the case, some elements will always stay in the pool and will + * never be used. The access to the per-lcore table is of course + * faster than the multi-producer/consumer pool. The cache can be + * disabled if the cache_size argument is set to 0; it can be useful to + * avoid losing objects in cache. Note that even if not used, the + * memory space for cache is always reserved in a mempool structure, + * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0. + * @param private_data_size + * The size of the private data appended after the mempool + * structure. This is useful for storing some private data after the + * mempool structure, as is done for rte_mbuf_pool for example. + * @param mp_init + * A function pointer that is called for initialization of the pool, + * before object initialization. The user can initialize the private + * data in this function if needed. This parameter can be NULL if + * not needed. + * @param mp_init_arg + * An opaque pointer to data that can be used in the mempool + * constructor function. + * @param obj_init + * A function pointer that is called for each object at + * initialization of the pool. The user can set some meta data in + * objects if needed. This parameter can be NULL if not needed. + * The obj_init() function takes the mempool pointer, the init_arg, + * the object pointer and the object number as parameters. + * @param obj_init_arg + * An opaque pointer to data that can be used as an argument for + * each call to the object constructor function. + * @param socket_id + * The *socket_id* argument is the socket identifier in the case of + * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA + * constraint for the reserved zone. + * @param flags + * The *flags* arguments is an OR of following flags: + * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread + * between channels in RAM: the pool allocator will add padding + * between objects depending on the hardware configuration. See + * Memory alignment constraints for details. If this flag is set, + * the allocator will just align them to a cache line. + * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are + * cache-aligned. This flag removes this constraint, and no + * padding will be present between objects. This flag implies + * MEMPOOL_F_NO_SPREAD. + * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior + * when using rte_mempool_put() or rte_mempool_put_bulk() is + * "single-producer". Otherwise, it is "multi-producers". + * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior + * when using rte_mempool_get() or rte_mempool_get_bulk() is + * "single-consumer". Otherwise, it is "multi-consumers". + * @return + * The pointer to the new allocated mempool, on success. NULL on error + * with rte_errno set appropriately. Possible rte_errno values include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure + * - E_RTE_SECONDARY - function was called from a secondary process instance + * - EINVAL - cache size provided is too large + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone + */ +struct rte_mempool * +rte_mempool_create(const char *name, unsigned n, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags); + +/** + * Create a new mempool named *name* in memory. + * + * This function uses ``memzone_reserve()`` to allocate memory. The + * pool contains n elements of elt_size. Its size is set to n. + * Depending on the input parameters, mempool elements can be either allocated + * together with the mempool header, or an externally provided memory buffer + * could be used to store mempool objects. In later case, that external + * memory buffer can consist of set of disjoint physical pages. + * + * @param name + * The name of the mempool. + * @param n + * The number of elements in the mempool. The optimum size (in terms of + * memory usage) for a mempool is when n is a power of two minus one: + * n = (2^q - 1). + * @param elt_size + * The size of each element. + * @param cache_size + * If cache_size is non-zero, the rte_mempool library will try to + * limit the accesses to the common lockless pool, by maintaining a + * per-lcore object cache. This argument must be lower or equal to + * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose + * cache_size to have "n modulo cache_size == 0": if this is + * not the case, some elements will always stay in the pool and will + * never be used. The access to the per-lcore table is of course + * faster than the multi-producer/consumer pool. The cache can be + * disabled if the cache_size argument is set to 0; it can be useful to + * avoid losing objects in cache. Note that even if not used, the + * memory space for cache is always reserved in a mempool structure, + * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0. + * @param private_data_size + * The size of the private data appended after the mempool + * structure. This is useful for storing some private data after the + * mempool structure, as is done for rte_mbuf_pool for example. + * @param mp_init + * A function pointer that is called for initialization of the pool, + * before object initialization. The user can initialize the private + * data in this function if needed. This parameter can be NULL if + * not needed. + * @param mp_init_arg + * An opaque pointer to data that can be used in the mempool + * constructor function. + * @param obj_init + * A function pointer that is called for each object at + * initialization of the pool. The user can set some meta data in + * objects if needed. This parameter can be NULL if not needed. + * The obj_init() function takes the mempool pointer, the init_arg, + * the object pointer and the object number as parameters. + * @param obj_init_arg + * An opaque pointer to data that can be used as an argument for + * each call to the object constructor function. + * @param socket_id + * The *socket_id* argument is the socket identifier in the case of + * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA + * constraint for the reserved zone. + * @param flags + * The *flags* arguments is an OR of following flags: + * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread + * between channels in RAM: the pool allocator will add padding + * between objects depending on the hardware configuration. See + * Memory alignment constraints for details. If this flag is set, + * the allocator will just align them to a cache line. + * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are + * cache-aligned. This flag removes this constraint, and no + * padding will be present between objects. This flag implies + * MEMPOOL_F_NO_SPREAD. + * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior + * when using rte_mempool_put() or rte_mempool_put_bulk() is + * "single-producer". Otherwise, it is "multi-producers". + * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior + * when using rte_mempool_get() or rte_mempool_get_bulk() is + * "single-consumer". Otherwise, it is "multi-consumers". + * @param vaddr + * Virtual address of the externally allocated memory buffer. + * Will be used to store mempool objects. + * @param paddr + * Array of physical addresses of the pages that comprises given memory + * buffer. + * @param pg_num + * Number of elements in the paddr array. + * @param pg_shift + * LOG2 of the physical pages size. + * @return + * The pointer to the new allocated mempool, on success. NULL on error + * with rte_errno set appropriately. Possible rte_errno values include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure + * - E_RTE_SECONDARY - function was called from a secondary process instance + * - EINVAL - cache size provided is too large + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone + */ +struct rte_mempool * +rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags, void *vaddr, + const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift); + +/** + * Create a new mempool named *name* in memory on Xen Dom0. + * + * This function uses ``rte_mempool_xmem_create()`` to allocate memory. The + * pool contains n elements of elt_size. Its size is set to n. + * All elements of the mempool are allocated together with the mempool header, + * and memory buffer can consist of set of disjoint physical pages. + * + * @param name + * The name of the mempool. + * @param n + * The number of elements in the mempool. The optimum size (in terms of + * memory usage) for a mempool is when n is a power of two minus one: + * n = (2^q - 1). + * @param elt_size + * The size of each element. + * @param cache_size + * If cache_size is non-zero, the rte_mempool library will try to + * limit the accesses to the common lockless pool, by maintaining a + * per-lcore object cache. This argument must be lower or equal to + * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose + * cache_size to have "n modulo cache_size == 0": if this is + * not the case, some elements will always stay in the pool and will + * never be used. The access to the per-lcore table is of course + * faster than the multi-producer/consumer pool. The cache can be + * disabled if the cache_size argument is set to 0; it can be useful to + * avoid losing objects in cache. Note that even if not used, the + * memory space for cache is always reserved in a mempool structure, + * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0. + * @param private_data_size + * The size of the private data appended after the mempool + * structure. This is useful for storing some private data after the + * mempool structure, as is done for rte_mbuf_pool for example. + * @param mp_init + * A function pointer that is called for initialization of the pool, + * before object initialization. The user can initialize the private + * data in this function if needed. This parameter can be NULL if + * not needed. + * @param mp_init_arg + * An opaque pointer to data that can be used in the mempool + * constructor function. + * @param obj_init + * A function pointer that is called for each object at + * initialization of the pool. The user can set some meta data in + * objects if needed. This parameter can be NULL if not needed. + * The obj_init() function takes the mempool pointer, the init_arg, + * the object pointer and the object number as parameters. + * @param obj_init_arg + * An opaque pointer to data that can be used as an argument for + * each call to the object constructor function. + * @param socket_id + * The *socket_id* argument is the socket identifier in the case of + * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA + * constraint for the reserved zone. + * @param flags + * The *flags* arguments is an OR of following flags: + * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread + * between channels in RAM: the pool allocator will add padding + * between objects depending on the hardware configuration. See + * Memory alignment constraints for details. If this flag is set, + * the allocator will just align them to a cache line. + * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are + * cache-aligned. This flag removes this constraint, and no + * padding will be present between objects. This flag implies + * MEMPOOL_F_NO_SPREAD. + * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior + * when using rte_mempool_put() or rte_mempool_put_bulk() is + * "single-producer". Otherwise, it is "multi-producers". + * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior + * when using rte_mempool_get() or rte_mempool_get_bulk() is + * "single-consumer". Otherwise, it is "multi-consumers". + * @return + * The pointer to the new allocated mempool, on success. NULL on error + * with rte_errno set appropriately. Possible rte_errno values include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure + * - E_RTE_SECONDARY - function was called from a secondary process instance + * - EINVAL - cache size provided is too large + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone + */ +struct rte_mempool * +rte_dom0_mempool_create(const char *name, unsigned n, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags); + + +/** + * Dump the status of the mempool to the console. + * + * @param f + * A pointer to a file for output + * @param mp + * A pointer to the mempool structure. + */ +void rte_mempool_dump(FILE *f, const struct rte_mempool *mp); + +/** + * @internal Put several objects back in the mempool; used internally. + * @param mp + * A pointer to the mempool structure. + * @param obj_table + * A pointer to a table of void * pointers (objects). + * @param n + * The number of objects to store back in the mempool, must be strictly + * positive. + * @param is_mp + * Mono-producer (0) or multi-producers (1). + */ +static inline void __attribute__((always_inline)) +__mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, + unsigned n, int is_mp) +{ +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 + struct rte_mempool_cache *cache; + uint32_t index; + void **cache_objs; + unsigned lcore_id = rte_lcore_id(); + uint32_t cache_size = mp->cache_size; + uint32_t flushthresh = mp->cache_flushthresh; +#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */ + + /* increment stat now, adding in mempool always success */ + __MEMPOOL_STAT_ADD(mp, put, n); + +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 + /* cache is not enabled or single producer or non-EAL thread */ + if (unlikely(cache_size == 0 || is_mp == 0 || + lcore_id >= RTE_MAX_LCORE)) + goto ring_enqueue; + + /* Go straight to ring if put would overflow mem allocated for cache */ + if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE)) + goto ring_enqueue; + + cache = &mp->local_cache[lcore_id]; + cache_objs = &cache->objs[cache->len]; + + /* + * The cache follows the following algorithm + * 1. Add the objects to the cache + * 2. Anything greater than the cache min value (if it crosses the + * cache flush threshold) is flushed to the ring. + */ + + /* Add elements back into the cache */ + for (index = 0; index < n; ++index, obj_table++) + cache_objs[index] = *obj_table; + + cache->len += n; + + if (cache->len >= flushthresh) { + rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size], + cache->len - cache_size); + cache->len = cache_size; + } + + return; + +ring_enqueue: +#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */ + + /* push remaining objects in ring */ +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + if (is_mp) { + if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0) + rte_panic("cannot put objects in mempool\n"); + } + else { + if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0) + rte_panic("cannot put objects in mempool\n"); + } +#else + if (is_mp) + rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n); + else + rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n); +#endif +} + + +/** + * Put several objects back in the mempool (multi-producers safe). + * + * @param mp + * A pointer to the mempool structure. + * @param obj_table + * A pointer to a table of void * pointers (objects). + * @param n + * The number of objects to add in the mempool from the obj_table. + */ +static inline void __attribute__((always_inline)) +rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table, + unsigned n) +{ + __mempool_check_cookies(mp, obj_table, n, 0); + __mempool_put_bulk(mp, obj_table, n, 1); +} + +/** + * Put several objects back in the mempool (NOT multi-producers safe). + * + * @param mp + * A pointer to the mempool structure. + * @param obj_table + * A pointer to a table of void * pointers (objects). + * @param n + * The number of objects to add in the mempool from obj_table. + */ +static inline void +rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table, + unsigned n) +{ + __mempool_check_cookies(mp, obj_table, n, 0); + __mempool_put_bulk(mp, obj_table, n, 0); +} + +/** + * Put several objects back in the mempool. + * + * This function calls the multi-producer or the single-producer + * version depending on the default behavior that was specified at + * mempool creation time (see flags). + * + * @param mp + * A pointer to the mempool structure. + * @param obj_table + * A pointer to a table of void * pointers (objects). + * @param n + * The number of objects to add in the mempool from obj_table. + */ +static inline void __attribute__((always_inline)) +rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, + unsigned n) +{ + __mempool_check_cookies(mp, obj_table, n, 0); + __mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT)); +} + +/** + * Put one object in the mempool (multi-producers safe). + * + * @param mp + * A pointer to the mempool structure. + * @param obj + * A pointer to the object to be added. + */ +static inline void __attribute__((always_inline)) +rte_mempool_mp_put(struct rte_mempool *mp, void *obj) +{ + rte_mempool_mp_put_bulk(mp, &obj, 1); +} + +/** + * Put one object back in the mempool (NOT multi-producers safe). + * + * @param mp + * A pointer to the mempool structure. + * @param obj + * A pointer to the object to be added. + */ +static inline void __attribute__((always_inline)) +rte_mempool_sp_put(struct rte_mempool *mp, void *obj) +{ + rte_mempool_sp_put_bulk(mp, &obj, 1); +} + +/** + * Put one object back in the mempool. + * + * This function calls the multi-producer or the single-producer + * version depending on the default behavior that was specified at + * mempool creation time (see flags). + * + * @param mp + * A pointer to the mempool structure. + * @param obj + * A pointer to the object to be added. + */ +static inline void __attribute__((always_inline)) +rte_mempool_put(struct rte_mempool *mp, void *obj) +{ + rte_mempool_put_bulk(mp, &obj, 1); +} + +/** + * @internal Get several objects from the mempool; used internally. + * @param mp + * A pointer to the mempool structure. + * @param obj_table + * A pointer to a table of void * pointers (objects). + * @param n + * The number of objects to get, must be strictly positive. + * @param is_mc + * Mono-consumer (0) or multi-consumers (1). + * @return + * - >=0: Success; number of objects supplied. + * - <0: Error; code of ring dequeue function. + */ +static inline int __attribute__((always_inline)) +__mempool_get_bulk(struct rte_mempool *mp, void **obj_table, + unsigned n, int is_mc) +{ + int ret; +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 + struct rte_mempool_cache *cache; + uint32_t index, len; + void **cache_objs; + unsigned lcore_id = rte_lcore_id(); + uint32_t cache_size = mp->cache_size; + + /* cache is not enabled or single consumer */ + if (unlikely(cache_size == 0 || is_mc == 0 || + n >= cache_size || lcore_id >= RTE_MAX_LCORE)) + goto ring_dequeue; + + cache = &mp->local_cache[lcore_id]; + cache_objs = cache->objs; + + /* Can this be satisfied from the cache? */ + if (cache->len < n) { + /* No. Backfill the cache first, and then fill from it */ + uint32_t req = n + (cache_size - cache->len); + + /* How many do we require i.e. number to fill the cache + the request */ + ret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req); + if (unlikely(ret < 0)) { + /* + * In the offchance that we are buffer constrained, + * where we are not able to allocate cache + n, go to + * the ring directly. If that fails, we are truly out of + * buffers. + */ + goto ring_dequeue; + } + + cache->len += req; + } + + /* Now fill in the response ... */ + for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++) + *obj_table = cache_objs[len]; + + cache->len -= n; + + __MEMPOOL_STAT_ADD(mp, get_success, n); + + return 0; + +ring_dequeue: +#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */ + + /* get remaining objects from ring */ + if (is_mc) + ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n); + else + ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n); + + if (ret < 0) + __MEMPOOL_STAT_ADD(mp, get_fail, n); + else + __MEMPOOL_STAT_ADD(mp, get_success, n); + + return ret; +} + +/** + * Get several objects from the mempool (multi-consumers safe). + * + * If cache is enabled, objects will be retrieved first from cache, + * subsequently from the common pool. Note that it can return -ENOENT when + * the local cache and common pool are empty, even if cache from other + * lcores are full. + * + * @param mp + * A pointer to the mempool structure. + * @param obj_table + * A pointer to a table of void * pointers (objects) that will be filled. + * @param n + * The number of objects to get from mempool to obj_table. + * @return + * - 0: Success; objects taken. + * - -ENOENT: Not enough entries in the mempool; no object is retrieved. + */ +static inline int __attribute__((always_inline)) +rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) +{ + int ret; + ret = __mempool_get_bulk(mp, obj_table, n, 1); + if (ret == 0) + __mempool_check_cookies(mp, obj_table, n, 1); + return ret; +} + +/** + * Get several objects from the mempool (NOT multi-consumers safe). + * + * If cache is enabled, objects will be retrieved first from cache, + * subsequently from the common pool. Note that it can return -ENOENT when + * the local cache and common pool are empty, even if cache from other + * lcores are full. + * + * @param mp + * A pointer to the mempool structure. + * @param obj_table + * A pointer to a table of void * pointers (objects) that will be filled. + * @param n + * The number of objects to get from the mempool to obj_table. + * @return + * - 0: Success; objects taken. + * - -ENOENT: Not enough entries in the mempool; no object is + * retrieved. + */ +static inline int __attribute__((always_inline)) +rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) +{ + int ret; + ret = __mempool_get_bulk(mp, obj_table, n, 0); + if (ret == 0) + __mempool_check_cookies(mp, obj_table, n, 1); + return ret; +} + +/** + * Get several objects from the mempool. + * + * This function calls the multi-consumers or the single-consumer + * version, depending on the default behaviour that was specified at + * mempool creation time (see flags). + * + * If cache is enabled, objects will be retrieved first from cache, + * subsequently from the common pool. Note that it can return -ENOENT when + * the local cache and common pool are empty, even if cache from other + * lcores are full. + * + * @param mp + * A pointer to the mempool structure. + * @param obj_table + * A pointer to a table of void * pointers (objects) that will be filled. + * @param n + * The number of objects to get from the mempool to obj_table. + * @return + * - 0: Success; objects taken + * - -ENOENT: Not enough entries in the mempool; no object is retrieved. + */ +static inline int __attribute__((always_inline)) +rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) +{ + int ret; + ret = __mempool_get_bulk(mp, obj_table, n, + !(mp->flags & MEMPOOL_F_SC_GET)); + if (ret == 0) + __mempool_check_cookies(mp, obj_table, n, 1); + return ret; +} + +/** + * Get one object from the mempool (multi-consumers safe). + * + * If cache is enabled, objects will be retrieved first from cache, + * subsequently from the common pool. Note that it can return -ENOENT when + * the local cache and common pool are empty, even if cache from other + * lcores are full. + * + * @param mp + * A pointer to the mempool structure. + * @param obj_p + * A pointer to a void * pointer (object) that will be filled. + * @return + * - 0: Success; objects taken. + * - -ENOENT: Not enough entries in the mempool; no object is retrieved. + */ +static inline int __attribute__((always_inline)) +rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p) +{ + return rte_mempool_mc_get_bulk(mp, obj_p, 1); +} + +/** + * Get one object from the mempool (NOT multi-consumers safe). + * + * If cache is enabled, objects will be retrieved first from cache, + * subsequently from the common pool. Note that it can return -ENOENT when + * the local cache and common pool are empty, even if cache from other + * lcores are full. + * + * @param mp + * A pointer to the mempool structure. + * @param obj_p + * A pointer to a void * pointer (object) that will be filled. + * @return + * - 0: Success; objects taken. + * - -ENOENT: Not enough entries in the mempool; no object is retrieved. + */ +static inline int __attribute__((always_inline)) +rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p) +{ + return rte_mempool_sc_get_bulk(mp, obj_p, 1); +} + +/** + * Get one object from the mempool. + * + * This function calls the multi-consumers or the single-consumer + * version, depending on the default behavior that was specified at + * mempool creation (see flags). + * + * If cache is enabled, objects will be retrieved first from cache, + * subsequently from the common pool. Note that it can return -ENOENT when + * the local cache and common pool are empty, even if cache from other + * lcores are full. + * + * @param mp + * A pointer to the mempool structure. + * @param obj_p + * A pointer to a void * pointer (object) that will be filled. + * @return + * - 0: Success; objects taken. + * - -ENOENT: Not enough entries in the mempool; no object is retrieved. + */ +static inline int __attribute__((always_inline)) +rte_mempool_get(struct rte_mempool *mp, void **obj_p) +{ + return rte_mempool_get_bulk(mp, obj_p, 1); +} + +/** + * Return the number of entries in the mempool. + * + * When cache is enabled, this function has to browse the length of + * all lcores, so it should not be used in a data path, but only for + * debug purposes. + * + * @param mp + * A pointer to the mempool structure. + * @return + * The number of entries in the mempool. + */ +unsigned rte_mempool_count(const struct rte_mempool *mp); + +/** + * Return the number of free entries in the mempool ring. + * i.e. how many entries can be freed back to the mempool. + * + * NOTE: This corresponds to the number of elements *allocated* from the + * memory pool, not the number of elements in the pool itself. To count + * the number elements currently available in the pool, use "rte_mempool_count" + * + * When cache is enabled, this function has to browse the length of + * all lcores, so it should not be used in a data path, but only for + * debug purposes. + * + * @param mp + * A pointer to the mempool structure. + * @return + * The number of free entries in the mempool. + */ +static inline unsigned +rte_mempool_free_count(const struct rte_mempool *mp) +{ + return mp->size - rte_mempool_count(mp); +} + +/** + * Test if the mempool is full. + * + * When cache is enabled, this function has to browse the length of all + * lcores, so it should not be used in a data path, but only for debug + * purposes. + * + * @param mp + * A pointer to the mempool structure. + * @return + * - 1: The mempool is full. + * - 0: The mempool is not full. + */ +static inline int +rte_mempool_full(const struct rte_mempool *mp) +{ + return !!(rte_mempool_count(mp) == mp->size); +} + +/** + * Test if the mempool is empty. + * + * When cache is enabled, this function has to browse the length of all + * lcores, so it should not be used in a data path, but only for debug + * purposes. + * + * @param mp + * A pointer to the mempool structure. + * @return + * - 1: The mempool is empty. + * - 0: The mempool is not empty. + */ +static inline int +rte_mempool_empty(const struct rte_mempool *mp) +{ + return !!(rte_mempool_count(mp) == 0); +} + +/** + * Return the physical address of elt, which is an element of the pool mp. + * + * @param mp + * A pointer to the mempool structure. + * @param elt + * A pointer (virtual address) to the element of the pool. + * @return + * The physical address of the elt element. + */ +static inline phys_addr_t +rte_mempool_virt2phy(const struct rte_mempool *mp, const void *elt) +{ + if (rte_eal_has_hugepages()) { + uintptr_t off; + + off = (const char *)elt - (const char *)mp->elt_va_start; + return mp->elt_pa[off >> mp->pg_shift] + (off & mp->pg_mask); + } else { + /* + * If huge pages are disabled, we cannot assume the + * memory region to be physically contiguous. + * Lookup for each element. + */ + return rte_mem_virt2phy(elt); + } +} + +/** + * Check the consistency of mempool objects. + * + * Verify the coherency of fields in the mempool structure. Also check + * that the cookies of mempool objects (even the ones that are not + * present in pool) have a correct value. If not, a panic will occur. + * + * @param mp + * A pointer to the mempool structure. + */ +void rte_mempool_audit(const struct rte_mempool *mp); + +/** + * Return a pointer to the private data in an mempool structure. + * + * @param mp + * A pointer to the mempool structure. + * @return + * A pointer to the private data. + */ +static inline void *rte_mempool_get_priv(struct rte_mempool *mp) +{ + return (char *)mp + MEMPOOL_HEADER_SIZE(mp, mp->pg_num); +} + +/** + * Dump the status of all mempools on the console + * + * @param f + * A pointer to a file for output + */ +void rte_mempool_list_dump(FILE *f); + +/** + * Search a mempool from its name + * + * @param name + * The name of the mempool. + * @return + * The pointer to the mempool matching the name, or NULL if not found. + * NULL on error + * with rte_errno set appropriately. Possible rte_errno values include: + * - ENOENT - required entry not available to return. + * + */ +struct rte_mempool *rte_mempool_lookup(const char *name); + +/** + * Get the header, trailer and total size of a mempool element. + * + * Given a desired size of the mempool element and mempool flags, + * calculates header, trailer, body and total sizes of the mempool object. + * + * @param elt_size + * The size of each element. + * @param flags + * The flags used for the mempool creation. + * Consult rte_mempool_create() for more information about possible values. + * The size of each element. + * @param sz + * The calculated detailed size the mempool object. May be NULL. + * @return + * Total size of the mempool object. + */ +uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, + struct rte_mempool_objsz *sz); + +/** + * Get the size of memory required to store mempool elements. + * + * Calculate the maximum amount of memory required to store given number + * of objects. Assume that the memory buffer will be aligned at page + * boundary. + * + * Note that if object size is bigger then page size, then it assumes + * that pages are grouped in subsets of physically continuous pages big + * enough to store at least one object. + * + * @param elt_num + * Number of elements. + * @param elt_sz + * The size of each element. + * @param pg_shift + * LOG2 of the physical pages size. + * @return + * Required memory size aligned at page boundary. + */ +size_t rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz, + uint32_t pg_shift); + +/** + * Get the size of memory required to store mempool elements. + * + * Calculate how much memory would be actually required with the given + * memory footprint to store required number of objects. + * + * @param vaddr + * Virtual address of the externally allocated memory buffer. + * Will be used to store mempool objects. + * @param elt_num + * Number of elements. + * @param elt_sz + * The size of each element. + * @param paddr + * Array of physical addresses of the pages that comprises given memory + * buffer. + * @param pg_num + * Number of elements in the paddr array. + * @param pg_shift + * LOG2 of the physical pages size. + * @return + * On success, the number of bytes needed to store given number of + * objects, aligned to the given page size. If the provided memory + * buffer is too small, return a negative value whose absolute value + * is the actual number of elements that can be stored in that buffer. + */ +ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz, + const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift); + +/** + * Walk list of all memory pools + * + * @param func + * Iterator function + * @param arg + * Argument passed to iterator + */ +void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *arg), + void *arg); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_MEMPOOL_H_ */ diff --git a/lib/librte_mempool/rte_mempool_version.map b/lib/librte_mempool/rte_mempool_version.map new file mode 100644 index 00000000..17151e08 --- /dev/null +++ b/lib/librte_mempool/rte_mempool_version.map @@ -0,0 +1,19 @@ +DPDK_2.0 { + global: + + rte_dom0_mempool_create; + rte_mempool_audit; + rte_mempool_calc_obj_size; + rte_mempool_count; + rte_mempool_create; + rte_mempool_dump; + rte_mempool_list_dump; + rte_mempool_lookup; + rte_mempool_obj_iter; + rte_mempool_walk; + rte_mempool_xmem_create; + rte_mempool_xmem_size; + rte_mempool_xmem_usage; + + local: *; +}; -- cgit 1.2.3-korg