From b63264c8342e6a1b6971c79550d2af2024b6a4de Mon Sep 17 00:00:00 2001 From: Luca Boccassi Date: Tue, 14 Aug 2018 18:52:30 +0100 Subject: New upstream version 18.08 Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a Signed-off-by: Luca Boccassi --- lib/librte_mempool/Makefile | 7 +- lib/librte_mempool/meson.build | 18 +- lib/librte_mempool/rte_mempool.c | 546 ++++++++++++------------- lib/librte_mempool/rte_mempool.h | 588 ++++++++++++++++----------- lib/librte_mempool/rte_mempool_ops.c | 51 ++- lib/librte_mempool/rte_mempool_ops_default.c | 70 ++++ lib/librte_mempool/rte_mempool_version.map | 23 +- 7 files changed, 743 insertions(+), 560 deletions(-) create mode 100644 lib/librte_mempool/rte_mempool_ops_default.c (limited to 'lib/librte_mempool') diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile index 24e735a3..20bf63fb 100644 --- a/lib/librte_mempool/Makefile +++ b/lib/librte_mempool/Makefile @@ -7,15 +7,20 @@ include $(RTE_SDK)/mk/rte.vars.mk LIB = librte_mempool.a CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 +CFLAGS += -DALLOW_EXPERIMENTAL_API LDLIBS += -lrte_eal -lrte_ring EXPORT_MAP := rte_mempool_version.map -LIBABIVER := 3 +LIBABIVER := 5 + +# memseg walk is not yet part of stable API +CFLAGS += -DALLOW_EXPERIMENTAL_API # all source are stored in SRCS-y SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool.c SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_ops.c +SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_ops_default.c # install includes SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h diff --git a/lib/librte_mempool/meson.build b/lib/librte_mempool/meson.build index 7a4f3dae..38d7ae89 100644 --- a/lib/librte_mempool/meson.build +++ b/lib/librte_mempool/meson.build @@ -1,7 +1,21 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation -version = 2 -sources = files('rte_mempool.c', 'rte_mempool_ops.c') +allow_experimental_apis = true + +extra_flags = [] + +foreach flag: extra_flags + if cc.has_argument(flag) + cflags += flag + endif +endforeach + +version = 5 +sources = files('rte_mempool.c', 'rte_mempool_ops.c', + 'rte_mempool_ops_default.c') headers = files('rte_mempool.h') deps += ['ring'] + +# memseg walk is not yet part of stable API +allow_experimental_apis = true diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index 54f7f4ba..03e6b5f7 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -3,6 +3,7 @@ * Copyright(c) 2016 6WIND S.A. */ +#include #include #include #include @@ -98,8 +99,31 @@ static unsigned optimize_object_size(unsigned obj_size) return new_obj_size * RTE_MEMPOOL_ALIGN; } +static int +find_min_pagesz(const struct rte_memseg_list *msl, void *arg) +{ + size_t *min = arg; + + if (msl->page_sz < *min) + *min = msl->page_sz; + + return 0; +} + +static size_t +get_min_page_size(void) +{ + size_t min_pagesz = SIZE_MAX; + + rte_memseg_list_walk(find_min_pagesz, &min_pagesz); + + return min_pagesz == SIZE_MAX ? (size_t) getpagesize() : min_pagesz; +} + + static void -mempool_add_elem(struct rte_mempool *mp, void *obj, rte_iova_t iova) +mempool_add_elem(struct rte_mempool *mp, __rte_unused void *opaque, + void *obj, rte_iova_t iova) { struct rte_mempool_objhdr *hdr; struct rte_mempool_objtlr *tlr __rte_unused; @@ -116,9 +140,6 @@ mempool_add_elem(struct rte_mempool *mp, void *obj, rte_iova_t iova) tlr = __mempool_get_trailer(obj); tlr->cookie = RTE_MEMPOOL_TRAILER_COOKIE; #endif - - /* enqueue in ring */ - rte_mempool_ops_enqueue_bulk(mp, &obj, 1); } /* call obj_cb() for each mempool element */ @@ -204,92 +225,6 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, return sz->total_size; } - -/* - * Calculate maximum amount of memory required to store given number of objects. - */ -size_t -rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift, - unsigned int flags) -{ - size_t obj_per_page, pg_num, pg_sz; - unsigned int mask; - - mask = MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS | MEMPOOL_F_CAPA_PHYS_CONTIG; - if ((flags & mask) == mask) - /* alignment need one additional object */ - elt_num += 1; - - if (total_elt_sz == 0) - return 0; - - if (pg_shift == 0) - return total_elt_sz * elt_num; - - pg_sz = (size_t)1 << pg_shift; - obj_per_page = pg_sz / total_elt_sz; - if (obj_per_page == 0) - return RTE_ALIGN_CEIL(total_elt_sz, pg_sz) * elt_num; - - pg_num = (elt_num + obj_per_page - 1) / obj_per_page; - return pg_num << pg_shift; -} - -/* - * Calculate how much memory would be actually required with the - * given memory footprint to store required number of elements. - */ -ssize_t -rte_mempool_xmem_usage(__rte_unused void *vaddr, uint32_t elt_num, - size_t total_elt_sz, const rte_iova_t iova[], uint32_t pg_num, - uint32_t pg_shift, unsigned int flags) -{ - uint32_t elt_cnt = 0; - rte_iova_t start, end; - uint32_t iova_idx; - size_t pg_sz = (size_t)1 << pg_shift; - unsigned int mask; - - mask = MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS | MEMPOOL_F_CAPA_PHYS_CONTIG; - if ((flags & mask) == mask) - /* alignment need one additional object */ - elt_num += 1; - - /* if iova is NULL, assume contiguous memory */ - if (iova == NULL) { - start = 0; - end = pg_sz * pg_num; - iova_idx = pg_num; - } else { - start = iova[0]; - end = iova[0] + pg_sz; - iova_idx = 1; - } - while (elt_cnt < elt_num) { - - if (end - start >= total_elt_sz) { - /* enough contiguous memory, add an object */ - start += total_elt_sz; - elt_cnt++; - } else if (iova_idx < pg_num) { - /* no room to store one obj, add a page */ - if (end == iova[iova_idx]) { - end += pg_sz; - } else { - start = iova[iova_idx]; - end = iova[iova_idx] + pg_sz; - } - iova_idx++; - - } else { - /* no more page, return how many elements fit */ - return -(size_t)elt_cnt; - } - } - - return (size_t)iova_idx << pg_shift; -} - /* free a memchunk allocated with rte_memzone_reserve() */ static void rte_mempool_memchunk_mz_free(__rte_unused struct rte_mempool_memhdr *memhdr, @@ -323,6 +258,21 @@ rte_mempool_free_memchunks(struct rte_mempool *mp) } } +static int +mempool_ops_alloc_once(struct rte_mempool *mp) +{ + int ret; + + /* create the internal ring if not already done */ + if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) { + ret = rte_mempool_ops_alloc(mp); + if (ret != 0) + return ret; + mp->flags |= MEMPOOL_F_POOL_CREATED; + } + return 0; +} + /* Add objects in the pool, using a physically contiguous memory * zone. Return the number of objects added, or a negative value * on error. @@ -332,51 +282,19 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque) { - unsigned total_elt_sz; - unsigned int mp_capa_flags; unsigned i = 0; size_t off; struct rte_mempool_memhdr *memhdr; int ret; - /* create the internal ring if not already done */ - if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) { - ret = rte_mempool_ops_alloc(mp); - if (ret != 0) - return ret; - mp->flags |= MEMPOOL_F_POOL_CREATED; - } - - /* Notify memory area to mempool */ - ret = rte_mempool_ops_register_memory_area(mp, vaddr, iova, len); - if (ret != -ENOTSUP && ret < 0) + ret = mempool_ops_alloc_once(mp); + if (ret != 0) return ret; /* mempool is already populated */ if (mp->populated_size >= mp->size) return -ENOSPC; - total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; - - /* Get mempool capabilities */ - mp_capa_flags = 0; - ret = rte_mempool_ops_get_capabilities(mp, &mp_capa_flags); - if ((ret < 0) && (ret != -ENOTSUP)) - return ret; - - /* update mempool capabilities */ - mp->flags |= mp_capa_flags; - - /* Detect pool area has sufficient space for elements */ - if (mp_capa_flags & MEMPOOL_F_CAPA_PHYS_CONTIG) { - if (len < total_elt_sz * mp->size) { - RTE_LOG(ERR, MEMPOOL, - "pool area %" PRIx64 " not enough\n", - (uint64_t)len); - return -ENOSPC; - } - } - memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0); if (memhdr == NULL) return -ENOMEM; @@ -388,89 +306,34 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, memhdr->free_cb = free_cb; memhdr->opaque = opaque; - if (mp_capa_flags & MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS) - /* align object start address to a multiple of total_elt_sz */ - off = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz); - else if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN) + if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN) off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr; else off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_CACHE_LINE_SIZE) - vaddr; - while (off + total_elt_sz <= len && mp->populated_size < mp->size) { - off += mp->header_size; - if (iova == RTE_BAD_IOVA) - mempool_add_elem(mp, (char *)vaddr + off, - RTE_BAD_IOVA); - else - mempool_add_elem(mp, (char *)vaddr + off, iova + off); - off += mp->elt_size + mp->trailer_size; - i++; + if (off > len) { + ret = -EINVAL; + goto fail; } + i = rte_mempool_ops_populate(mp, mp->size - mp->populated_size, + (char *)vaddr + off, + (iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off), + len - off, mempool_add_elem, NULL); + /* not enough room to store one object */ - if (i == 0) - return -EINVAL; + if (i == 0) { + ret = -EINVAL; + goto fail; + } STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next); mp->nb_mem_chunks++; return i; -} - -int -rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, - phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, - void *opaque) -{ - return rte_mempool_populate_iova(mp, vaddr, paddr, len, free_cb, opaque); -} - -/* Add objects in the pool, using a table of physical pages. Return the - * number of objects added, or a negative value on error. - */ -int -rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr, - const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift, - rte_mempool_memchunk_free_cb_t *free_cb, void *opaque) -{ - uint32_t i, n; - int ret, cnt = 0; - size_t pg_sz = (size_t)1 << pg_shift; - - /* mempool must not be populated */ - if (mp->nb_mem_chunks != 0) - return -EEXIST; - - if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) - return rte_mempool_populate_iova(mp, vaddr, RTE_BAD_IOVA, - pg_num * pg_sz, free_cb, opaque); - - for (i = 0; i < pg_num && mp->populated_size < mp->size; i += n) { - - /* populate with the largest group of contiguous pages */ - for (n = 1; (i + n) < pg_num && - iova[i + n - 1] + pg_sz == iova[i + n]; n++) - ; - - ret = rte_mempool_populate_iova(mp, vaddr + i * pg_sz, - iova[i], n * pg_sz, free_cb, opaque); - if (ret < 0) { - rte_mempool_free_memchunks(mp); - return ret; - } - /* no need to call the free callback for next chunks */ - free_cb = NULL; - cnt += ret; - } - return cnt; -} -int -rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr, - const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift, - rte_mempool_memchunk_free_cb_t *free_cb, void *opaque) -{ - return rte_mempool_populate_iova_tab(mp, vaddr, paddr, pg_num, pg_shift, - free_cb, opaque); +fail: + rte_free(memhdr); + return ret; } /* Populate the mempool with a virtual area. Return the number of @@ -485,16 +348,13 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, size_t off, phys_len; int ret, cnt = 0; - /* mempool must not be populated */ - if (mp->nb_mem_chunks != 0) - return -EEXIST; /* address and len must be page-aligned */ if (RTE_PTR_ALIGN_CEIL(addr, pg_sz) != addr) return -EINVAL; if (RTE_ALIGN_CEIL(len, pg_sz) != len) return -EINVAL; - if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) + if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) return rte_mempool_populate_iova(mp, addr, RTE_BAD_IOVA, len, free_cb, opaque); @@ -544,39 +404,94 @@ rte_mempool_populate_default(struct rte_mempool *mp) unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY; char mz_name[RTE_MEMZONE_NAMESIZE]; const struct rte_memzone *mz; - size_t size, total_elt_sz, align, pg_sz, pg_shift; + ssize_t mem_size; + size_t align, pg_sz, pg_shift; rte_iova_t iova; unsigned mz_id, n; - unsigned int mp_flags; int ret; + bool no_contig, try_contig, no_pageshift; + + ret = mempool_ops_alloc_once(mp); + if (ret != 0) + return ret; /* mempool must not be populated */ if (mp->nb_mem_chunks != 0) return -EEXIST; - /* Get mempool capabilities */ - mp_flags = 0; - ret = rte_mempool_ops_get_capabilities(mp, &mp_flags); - if ((ret < 0) && (ret != -ENOTSUP)) - return ret; + no_contig = mp->flags & MEMPOOL_F_NO_IOVA_CONTIG; - /* update mempool capabilities */ - mp->flags |= mp_flags; + /* + * the following section calculates page shift and page size values. + * + * these values impact the result of calc_mem_size operation, which + * returns the amount of memory that should be allocated to store the + * desired number of objects. when not zero, it allocates more memory + * for the padding between objects, to ensure that an object does not + * cross a page boundary. in other words, page size/shift are to be set + * to zero if mempool elements won't care about page boundaries. + * there are several considerations for page size and page shift here. + * + * if we don't need our mempools to have physically contiguous objects, + * then just set page shift and page size to 0, because the user has + * indicated that there's no need to care about anything. + * + * if we do need contiguous objects, there is also an option to reserve + * the entire mempool memory as one contiguous block of memory, in + * which case the page shift and alignment wouldn't matter as well. + * + * if we require contiguous objects, but not necessarily the entire + * mempool reserved space to be contiguous, then there are two options. + * + * if our IO addresses are virtual, not actual physical (IOVA as VA + * case), then no page shift needed - our memory allocation will give us + * contiguous IO memory as far as the hardware is concerned, so + * act as if we're getting contiguous memory. + * + * if our IO addresses are physical, we may get memory from bigger + * pages, or we might get memory from smaller pages, and how much of it + * we require depends on whether we want bigger or smaller pages. + * However, requesting each and every memory size is too much work, so + * what we'll do instead is walk through the page sizes available, pick + * the smallest one and set up page shift to match that one. We will be + * wasting some space this way, but it's much nicer than looping around + * trying to reserve each and every page size. + * + * However, since size calculation will produce page-aligned sizes, it + * makes sense to first try and see if we can reserve the entire memzone + * in one contiguous chunk as well (otherwise we might end up wasting a + * 1G page on a 10MB memzone). If we fail to get enough contiguous + * memory, then we'll go and reserve space page-by-page. + */ + no_pageshift = no_contig || rte_eal_iova_mode() == RTE_IOVA_VA; + try_contig = !no_contig && !no_pageshift && rte_eal_has_hugepages(); - if (rte_eal_has_hugepages()) { - pg_shift = 0; /* not needed, zone is physically contiguous */ + if (no_pageshift) { pg_sz = 0; - align = RTE_CACHE_LINE_SIZE; + pg_shift = 0; + } else if (try_contig) { + pg_sz = get_min_page_size(); + pg_shift = rte_bsf32(pg_sz); } else { pg_sz = getpagesize(); pg_shift = rte_bsf32(pg_sz); - align = pg_sz; } - total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) { - size = rte_mempool_xmem_size(n, total_elt_sz, pg_shift, - mp->flags); + size_t min_chunk_size; + unsigned int flags; + + if (try_contig || no_pageshift) + mem_size = rte_mempool_ops_calc_mem_size(mp, n, + 0, &min_chunk_size, &align); + else + mem_size = rte_mempool_ops_calc_mem_size(mp, n, + pg_shift, &min_chunk_size, &align); + + if (mem_size < 0) { + ret = mem_size; + goto fail; + } ret = snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id); @@ -585,30 +500,70 @@ rte_mempool_populate_default(struct rte_mempool *mp) goto fail; } - mz = rte_memzone_reserve_aligned(mz_name, size, - mp->socket_id, mz_flags, align); - /* not enough memory, retry with the biggest zone we have */ - if (mz == NULL) + flags = mz_flags; + + /* if we're trying to reserve contiguous memory, add appropriate + * memzone flag. + */ + if (try_contig) + flags |= RTE_MEMZONE_IOVA_CONTIG; + + mz = rte_memzone_reserve_aligned(mz_name, mem_size, + mp->socket_id, flags, align); + + /* if we were trying to allocate contiguous memory, failed and + * minimum required contiguous chunk fits minimum page, adjust + * memzone size to the page size, and try again. + */ + if (mz == NULL && try_contig && min_chunk_size <= pg_sz) { + try_contig = false; + flags &= ~RTE_MEMZONE_IOVA_CONTIG; + + mem_size = rte_mempool_ops_calc_mem_size(mp, n, + pg_shift, &min_chunk_size, &align); + if (mem_size < 0) { + ret = mem_size; + goto fail; + } + + mz = rte_memzone_reserve_aligned(mz_name, mem_size, + mp->socket_id, flags, align); + } + /* don't try reserving with 0 size if we were asked to reserve + * IOVA-contiguous memory. + */ + if (min_chunk_size < (size_t)mem_size && mz == NULL) { + /* not enough memory, retry with the biggest zone we + * have + */ mz = rte_memzone_reserve_aligned(mz_name, 0, - mp->socket_id, mz_flags, align); + mp->socket_id, flags, + RTE_MAX(pg_sz, align)); + } if (mz == NULL) { ret = -rte_errno; goto fail; } - if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) + if (mz->len < min_chunk_size) { + rte_memzone_free(mz); + ret = -ENOMEM; + goto fail; + } + + if (no_contig) iova = RTE_BAD_IOVA; else iova = mz->iova; - if (rte_eal_has_hugepages()) + if (no_pageshift || try_contig) ret = rte_mempool_populate_iova(mp, mz->addr, iova, mz->len, rte_mempool_memchunk_mz_free, (void *)(uintptr_t)mz); else ret = rte_mempool_populate_virt(mp, mz->addr, - mz->len, pg_sz, + RTE_ALIGN_FLOOR(mz->len, pg_sz), pg_sz, rte_mempool_memchunk_mz_free, (void *)(uintptr_t)mz); if (ret < 0) { @@ -625,16 +580,18 @@ rte_mempool_populate_default(struct rte_mempool *mp) } /* return the memory size required for mempool objects in anonymous mem */ -static size_t +static ssize_t get_anon_size(const struct rte_mempool *mp) { - size_t size, total_elt_sz, pg_sz, pg_shift; + ssize_t size; + size_t pg_sz, pg_shift; + size_t min_chunk_size; + size_t align; pg_sz = getpagesize(); pg_shift = rte_bsf32(pg_sz); - total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; - size = rte_mempool_xmem_size(mp->size, total_elt_sz, pg_shift, - mp->flags); + size = rte_mempool_ops_calc_mem_size(mp, mp->size, pg_shift, + &min_chunk_size, &align); return size; } @@ -644,25 +601,45 @@ static void rte_mempool_memchunk_anon_free(struct rte_mempool_memhdr *memhdr, void *opaque) { - munmap(opaque, get_anon_size(memhdr->mp)); + ssize_t size; + + /* + * Calculate size since memhdr->len has contiguous chunk length + * which may be smaller if anon map is split into many contiguous + * chunks. Result must be the same as we calculated on populate. + */ + size = get_anon_size(memhdr->mp); + if (size < 0) + return; + + munmap(opaque, size); } /* populate the mempool with an anonymous mapping */ int rte_mempool_populate_anon(struct rte_mempool *mp) { - size_t size; + ssize_t size; int ret; char *addr; /* mempool is already populated, error */ - if (!STAILQ_EMPTY(&mp->mem_list)) { + if ((!STAILQ_EMPTY(&mp->mem_list)) || mp->nb_mem_chunks != 0) { rte_errno = EINVAL; return 0; } - /* get chunk of virtually continuous memory */ + ret = mempool_ops_alloc_once(mp); + if (ret != 0) + return ret; + size = get_anon_size(mp); + if (size < 0) { + rte_errno = -size; + return 0; + } + + /* get chunk of virtually continuous memory */ addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); if (addr == MAP_FAILED) { @@ -795,6 +772,12 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list); + /* asked for zero items */ + if (n == 0) { + rte_errno = EINVAL; + return NULL; + } + /* asked cache too big */ if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE || CALC_CACHE_FLUSHTHRESH(cache_size) > n) { @@ -944,66 +927,6 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, return NULL; } -/* - * Create the mempool over already allocated chunk of memory. - * That external memory buffer can consists of physically disjoint pages. - * Setting vaddr to NULL, makes mempool to fallback to rte_mempool_create() - * behavior. - */ -struct rte_mempool * -rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, - unsigned cache_size, unsigned private_data_size, - rte_mempool_ctor_t *mp_init, void *mp_init_arg, - rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, - int socket_id, unsigned flags, void *vaddr, - const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift) -{ - struct rte_mempool *mp = NULL; - int ret; - - /* no virtual address supplied, use rte_mempool_create() */ - if (vaddr == NULL) - return rte_mempool_create(name, n, elt_size, cache_size, - private_data_size, mp_init, mp_init_arg, - obj_init, obj_init_arg, socket_id, flags); - - /* check that we have both VA and PA */ - if (iova == NULL) { - rte_errno = EINVAL; - return NULL; - } - - /* Check that pg_shift parameter is valid. */ - if (pg_shift > MEMPOOL_PG_SHIFT_MAX) { - rte_errno = EINVAL; - return NULL; - } - - mp = rte_mempool_create_empty(name, n, elt_size, cache_size, - private_data_size, socket_id, flags); - if (mp == NULL) - return NULL; - - /* call the mempool priv initializer */ - if (mp_init) - mp_init(mp, mp_init_arg); - - ret = rte_mempool_populate_iova_tab(mp, vaddr, iova, pg_num, pg_shift, - NULL, NULL); - if (ret < 0 || ret != (int)mp->size) - goto fail; - - /* call the object initializers */ - if (obj_init) - rte_mempool_obj_iter(mp, obj_init, obj_init_arg); - - return mp; - - fail: - rte_mempool_free(mp); - return NULL; -} - /* Return the number of entries in the mempool */ unsigned int rte_mempool_avail_count(const struct rte_mempool *mp) @@ -1132,6 +1055,36 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp, #endif } +void +rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp, + void * const *first_obj_table_const, unsigned int n, int free) +{ +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + struct rte_mempool_info info; + const size_t total_elt_sz = + mp->header_size + mp->elt_size + mp->trailer_size; + unsigned int i, j; + + rte_mempool_ops_get_info(mp, &info); + + for (i = 0; i < n; ++i) { + void *first_obj = first_obj_table_const[i]; + + for (j = 0; j < info.contig_block_size; ++j) { + void *obj; + + obj = (void *)((uintptr_t)first_obj + j * total_elt_sz); + rte_mempool_check_cookies(mp, &obj, 1, free); + } + } +#else + RTE_SET_USED(mp); + RTE_SET_USED(first_obj_table_const); + RTE_SET_USED(n); + RTE_SET_USED(free); +#endif +} + #ifdef RTE_LIBRTE_MEMPOOL_DEBUG static void mempool_obj_audit(struct rte_mempool *mp, __rte_unused void *opaque, @@ -1197,6 +1150,7 @@ void rte_mempool_dump(FILE *f, struct rte_mempool *mp) { #ifdef RTE_LIBRTE_MEMPOOL_DEBUG + struct rte_mempool_info info; struct rte_mempool_debug_stats sum; unsigned lcore_id; #endif @@ -1238,6 +1192,7 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp) /* sum and dump statistics */ #ifdef RTE_LIBRTE_MEMPOOL_DEBUG + rte_mempool_ops_get_info(mp, &info); memset(&sum, 0, sizeof(sum)); for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { sum.put_bulk += mp->stats[lcore_id].put_bulk; @@ -1246,6 +1201,8 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp) sum.get_success_objs += mp->stats[lcore_id].get_success_objs; sum.get_fail_bulk += mp->stats[lcore_id].get_fail_bulk; sum.get_fail_objs += mp->stats[lcore_id].get_fail_objs; + sum.get_success_blks += mp->stats[lcore_id].get_success_blks; + sum.get_fail_blks += mp->stats[lcore_id].get_fail_blks; } fprintf(f, " stats:\n"); fprintf(f, " put_bulk=%"PRIu64"\n", sum.put_bulk); @@ -1254,6 +1211,11 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp) fprintf(f, " get_success_objs=%"PRIu64"\n", sum.get_success_objs); fprintf(f, " get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk); fprintf(f, " get_fail_objs=%"PRIu64"\n", sum.get_fail_objs); + if (info.contig_block_size > 0) { + fprintf(f, " get_success_blks=%"PRIu64"\n", + sum.get_success_blks); + fprintf(f, " get_fail_blks=%"PRIu64"\n", sum.get_fail_blks); + } #else fprintf(f, " no statistics available\n"); #endif diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 8b1b7f7e..7c9cd9a2 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -70,6 +70,10 @@ struct rte_mempool_debug_stats { uint64_t get_success_objs; /**< Objects successfully allocated. */ uint64_t get_fail_bulk; /**< Failed allocation number. */ uint64_t get_fail_objs; /**< Objects that failed to be allocated. */ + /** Successful allocation number of contiguous blocks. */ + uint64_t get_success_blks; + /** Failed allocation number of contiguous blocks. */ + uint64_t get_fail_blks; } __rte_cache_aligned; #endif @@ -189,6 +193,20 @@ struct rte_mempool_memhdr { void *opaque; /**< Argument passed to the free callback */ }; +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Additional information about the mempool + * + * The structure is cache-line aligned to avoid ABI breakages in + * a number of cases when something small is added. + */ +struct rte_mempool_info { + /** Number of objects in the contiguous block */ + unsigned int contig_block_size; +} __rte_cache_aligned; + /** * The RTE mempool structure. */ @@ -244,25 +262,8 @@ struct rte_mempool { #define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/ #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/ #define MEMPOOL_F_POOL_CREATED 0x0010 /**< Internal: pool is created. */ -#define MEMPOOL_F_NO_PHYS_CONTIG 0x0020 /**< Don't need physically contiguous objs. */ -/** - * This capability flag is advertised by a mempool handler, if the whole - * memory area containing the objects must be physically contiguous. - * Note: This flag should not be passed by application. - */ -#define MEMPOOL_F_CAPA_PHYS_CONTIG 0x0040 -/** - * This capability flag is advertised by a mempool handler. Used for a case - * where mempool driver wants object start address(vaddr) aligned to block - * size(/ total element size). - * - * Note: - * - This flag should not be passed by application. - * Flag used for mempool driver only. - * - Mempool driver must also set MEMPOOL_F_CAPA_PHYS_CONTIG flag along with - * MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS. - */ -#define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080 +#define MEMPOOL_F_NO_IOVA_CONTIG 0x0020 /**< Don't need IOVA contiguous objs. */ +#define MEMPOOL_F_NO_PHYS_CONTIG MEMPOOL_F_NO_IOVA_CONTIG /* deprecated */ /** * @internal When debug is enabled, store some statistics. @@ -282,8 +283,16 @@ struct rte_mempool { mp->stats[__lcore_id].name##_bulk += 1; \ } \ } while(0) +#define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do { \ + unsigned int __lcore_id = rte_lcore_id(); \ + if (__lcore_id < RTE_MAX_LCORE) { \ + mp->stats[__lcore_id].name##_blks += n; \ + mp->stats[__lcore_id].name##_bulk += 1; \ + } \ + } while (0) #else #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0) +#define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do {} while (0) #endif /** @@ -351,6 +360,38 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp, #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0) #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */ +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * @internal Check contiguous object blocks and update cookies or panic. + * + * @param mp + * Pointer to the memory pool. + * @param first_obj_table_const + * Pointer to a table of void * pointers (first object of the contiguous + * object blocks). + * @param n + * Number of contiguous object blocks. + * @param free + * - 0: object is supposed to be allocated, mark it as free + * - 1: object is supposed to be free, mark it as allocated + * - 2: just check that cookie is valid (free or allocated) + */ +void rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp, + void * const *first_obj_table_const, unsigned int n, int free); + +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG +#define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \ + free) \ + rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \ + free) +#else +#define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \ + free) \ + do {} while (0) +#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */ + #define RTE_MEMPOOL_OPS_NAMESIZE 32 /**< Max length of ops struct name. */ /** @@ -382,22 +423,136 @@ typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp, typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp, void **obj_table, unsigned int n); +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Dequeue a number of contiquous object blocks from the external pool. + */ +typedef int (*rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp, + void **first_obj_table, unsigned int n); + /** * Return the number of available objects in the external pool. */ typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp); /** - * Get the mempool capabilities. + * Calculate memory size required to store given number of objects. + * + * If mempool objects are not required to be IOVA-contiguous + * (the flag MEMPOOL_F_NO_IOVA_CONTIG is set), min_chunk_size defines + * virtually contiguous chunk size. Otherwise, if mempool objects must + * be IOVA-contiguous (the flag MEMPOOL_F_NO_IOVA_CONTIG is clear), + * min_chunk_size defines IOVA-contiguous chunk size. + * + * @param[in] mp + * Pointer to the memory pool. + * @param[in] obj_num + * Number of objects. + * @param[in] pg_shift + * LOG2 of the physical pages size. If set to 0, ignore page boundaries. + * @param[out] min_chunk_size + * Location for minimum size of the memory chunk which may be used to + * store memory pool objects. + * @param[out] align + * Location for required memory chunk alignment. + * @return + * Required memory size aligned at page boundary. + */ +typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align); + +/** + * Default way to calculate memory size required to store given number of + * objects. + * + * If page boundaries may be ignored, it is just a product of total + * object size including header and trailer and number of objects. + * Otherwise, it is a number of pages required to store given number of + * objects without crossing page boundary. + * + * Note that if object size is bigger than page size, then it assumes + * that pages are grouped in subsets of physically continuous pages big + * enough to store at least one object. + * + * Minimum size of memory chunk is a maximum of the page size and total + * element size. + * + * Required memory chunk alignment is a maximum of page size and cache + * line size. */ -typedef int (*rte_mempool_get_capabilities_t)(const struct rte_mempool *mp, - unsigned int *flags); +ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align); /** - * Notify new memory area to mempool. + * Function to be called for each populated object. + * + * @param[in] mp + * A pointer to the mempool structure. + * @param[in] opaque + * An opaque pointer passed to iterator. + * @param[in] vaddr + * Object virtual address. + * @param[in] iova + * Input/output virtual address of the object or RTE_BAD_IOVA. */ -typedef int (*rte_mempool_ops_register_memory_area_t) -(const struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len); +typedef void (rte_mempool_populate_obj_cb_t)(struct rte_mempool *mp, + void *opaque, void *vaddr, rte_iova_t iova); + +/** + * Populate memory pool objects using provided memory chunk. + * + * Populated objects should be enqueued to the pool, e.g. using + * rte_mempool_ops_enqueue_bulk(). + * + * If the given IO address is unknown (iova = RTE_BAD_IOVA), + * the chunk doesn't need to be physically contiguous (only virtually), + * and allocated objects may span two pages. + * + * @param[in] mp + * A pointer to the mempool structure. + * @param[in] max_objs + * Maximum number of objects to be populated. + * @param[in] vaddr + * The virtual address of memory that should be used to store objects. + * @param[in] iova + * The IO address + * @param[in] len + * The length of memory in bytes. + * @param[in] obj_cb + * Callback function to be executed for each populated object. + * @param[in] obj_cb_arg + * An opaque pointer passed to the callback function. + * @return + * The number of objects added on success. + * On error, no objects are populated and a negative errno is returned. + */ +typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp, + unsigned int max_objs, + void *vaddr, rte_iova_t iova, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg); + +/** + * Default way to populate memory pool object using provided memory + * chunk: just slice objects one by one. + */ +int rte_mempool_op_populate_default(struct rte_mempool *mp, + unsigned int max_objs, + void *vaddr, rte_iova_t iova, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Get some additional information about a mempool. + */ +typedef int (*rte_mempool_get_info_t)(const struct rte_mempool *mp, + struct rte_mempool_info *info); + /** Structure defining mempool operations structure */ struct rte_mempool_ops { @@ -408,13 +563,23 @@ struct rte_mempool_ops { rte_mempool_dequeue_t dequeue; /**< Dequeue an object. */ rte_mempool_get_count get_count; /**< Get qty of available objs. */ /** - * Get the mempool capabilities + * Optional callback to calculate memory size required to + * store specified number of objects. + */ + rte_mempool_calc_mem_size_t calc_mem_size; + /** + * Optional callback to populate mempool objects using + * provided memory chunk. + */ + rte_mempool_populate_t populate; + /** + * Get mempool info */ - rte_mempool_get_capabilities_t get_capabilities; + rte_mempool_get_info_t get_info; /** - * Notify new memory area to mempool + * Dequeue a number of contiguous object blocks. */ - rte_mempool_ops_register_memory_area_t register_memory_area; + rte_mempool_dequeue_contig_blocks_t dequeue_contig_blocks; } __rte_cache_aligned; #define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */ @@ -492,6 +657,30 @@ rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp, return ops->dequeue(mp, obj_table, n); } +/** + * @internal Wrapper for mempool_ops dequeue_contig_blocks callback. + * + * @param[in] mp + * Pointer to the memory pool. + * @param[out] first_obj_table + * Pointer to a table of void * pointers (first objects). + * @param[in] n + * Number of blocks to get. + * @return + * - 0: Success; got n objects. + * - <0: Error; code of dequeue function. + */ +static inline int +rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool *mp, + void **first_obj_table, unsigned int n) +{ + struct rte_mempool_ops *ops; + + ops = rte_mempool_get_ops(mp->ops_index); + RTE_ASSERT(ops->dequeue_contig_blocks != NULL); + return ops->dequeue_contig_blocks(mp, first_obj_table, n); +} + /** * @internal wrapper for mempool_ops enqueue callback. * @@ -527,41 +716,74 @@ unsigned rte_mempool_ops_get_count(const struct rte_mempool *mp); /** - * @internal wrapper for mempool_ops get_capabilities callback. + * @internal wrapper for mempool_ops calc_mem_size callback. + * API to calculate size of memory required to store specified number of + * object. * - * @param mp [in] + * @param[in] mp * Pointer to the memory pool. - * @param flags [out] - * Pointer to the mempool flags. + * @param[in] obj_num + * Number of objects. + * @param[in] pg_shift + * LOG2 of the physical pages size. If set to 0, ignore page boundaries. + * @param[out] min_chunk_size + * Location for minimum size of the memory chunk which may be used to + * store memory pool objects. + * @param[out] align + * Location for required memory chunk alignment. * @return - * - 0: Success; The mempool driver has advertised his pool capabilities in - * flags param. - * - -ENOTSUP - doesn't support get_capabilities ops (valid case). - * - Otherwise, pool create fails. + * Required memory size aligned at page boundary. */ -int -rte_mempool_ops_get_capabilities(const struct rte_mempool *mp, - unsigned int *flags); +ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align); + /** - * @internal wrapper for mempool_ops register_memory_area callback. - * API to notify the mempool handler when a new memory area is added to pool. + * @internal wrapper for mempool_ops populate callback. * - * @param mp + * Populate memory pool objects using provided memory chunk. + * + * @param[in] mp + * A pointer to the mempool structure. + * @param[in] max_objs + * Maximum number of objects to be populated. + * @param[in] vaddr + * The virtual address of memory that should be used to store objects. + * @param[in] iova + * The IO address + * @param[in] len + * The length of memory in bytes. + * @param[in] obj_cb + * Callback function to be executed for each populated object. + * @param[in] obj_cb_arg + * An opaque pointer passed to the callback function. + * @return + * The number of objects added on success. + * On error, no objects are populated and a negative errno is returned. + */ +int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs, + void *vaddr, rte_iova_t iova, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, + void *obj_cb_arg); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Wrapper for mempool_ops get_info callback. + * + * @param[in] mp * Pointer to the memory pool. - * @param vaddr - * Pointer to the buffer virtual address. - * @param iova - * Pointer to the buffer IO address. - * @param len - * Pool size. + * @param[out] info + * Pointer to the rte_mempool_info structure * @return - * - 0: Success; - * - -ENOTSUP - doesn't support register_memory_area ops (valid error case). - * - Otherwise, rte_mempool_populate_phys fails thus pool create fails. + * - 0: Success; The mempool driver supports retrieving supplementary + * mempool information + * - -ENOTSUP - doesn't support get_info ops (valid case). */ -int -rte_mempool_ops_register_memory_area(const struct rte_mempool *mp, - char *vaddr, rte_iova_t iova, size_t len); +__rte_experimental +int rte_mempool_ops_get_info(const struct rte_mempool *mp, + struct rte_mempool_info *info); /** * @internal wrapper for mempool_ops free callback. @@ -710,8 +932,8 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior * when using rte_mempool_get() or rte_mempool_get_bulk() is * "single-consumer". Otherwise, it is "multi-consumers". - * - MEMPOOL_F_NO_PHYS_CONTIG: If set, allocated objects won't - * necessarily be contiguous in physical memory. + * - MEMPOOL_F_NO_IOVA_CONTIG: If set, allocated objects won't + * necessarily be contiguous in IO memory. * @return * The pointer to the new allocated mempool, on success. NULL on error * with rte_errno set appropriately. Possible rte_errno values include: @@ -729,72 +951,6 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags); -/** - * Create a new mempool named *name* in memory. - * - * The pool contains n elements of elt_size. Its size is set to n. - * This function uses ``memzone_reserve()`` to allocate the mempool header - * (and the objects if vaddr is NULL). - * Depending on the input parameters, mempool elements can be either allocated - * together with the mempool header, or an externally provided memory buffer - * could be used to store mempool objects. In later case, that external - * memory buffer can consist of set of disjoint physical pages. - * - * @param name - * The name of the mempool. - * @param n - * The number of elements in the mempool. The optimum size (in terms of - * memory usage) for a mempool is when n is a power of two minus one: - * n = (2^q - 1). - * @param elt_size - * The size of each element. - * @param cache_size - * Size of the cache. See rte_mempool_create() for details. - * @param private_data_size - * The size of the private data appended after the mempool - * structure. This is useful for storing some private data after the - * mempool structure, as is done for rte_mbuf_pool for example. - * @param mp_init - * A function pointer that is called for initialization of the pool, - * before object initialization. The user can initialize the private - * data in this function if needed. This parameter can be NULL if - * not needed. - * @param mp_init_arg - * An opaque pointer to data that can be used in the mempool - * constructor function. - * @param obj_init - * A function called for each object at initialization of the pool. - * See rte_mempool_create() for details. - * @param obj_init_arg - * An opaque pointer passed to the object constructor function. - * @param socket_id - * The *socket_id* argument is the socket identifier in the case of - * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA - * constraint for the reserved zone. - * @param flags - * Flags controlling the behavior of the mempool. See - * rte_mempool_create() for details. - * @param vaddr - * Virtual address of the externally allocated memory buffer. - * Will be used to store mempool objects. - * @param iova - * Array of IO addresses of the pages that comprises given memory buffer. - * @param pg_num - * Number of elements in the iova array. - * @param pg_shift - * LOG2 of the physical pages size. - * @return - * The pointer to the new allocated mempool, on success. NULL on error - * with rte_errno set appropriately. See rte_mempool_create() for details. - */ -struct rte_mempool * -rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, - unsigned cache_size, unsigned private_data_size, - rte_mempool_ctor_t *mp_init, void *mp_init_arg, - rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, - int socket_id, unsigned flags, void *vaddr, - const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift); - /** * Create an empty mempool * @@ -877,46 +1033,6 @@ int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque); -__rte_deprecated -int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, - phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, - void *opaque); - -/** - * Add physical memory for objects in the pool at init - * - * Add a virtually contiguous memory chunk in the pool where objects can - * be instantiated. The IO addresses corresponding to the virtual - * area are described in iova[], pg_num, pg_shift. - * - * @param mp - * A pointer to the mempool structure. - * @param vaddr - * The virtual address of memory that should be used to store objects. - * @param iova - * An array of IO addresses of each page composing the virtual area. - * @param pg_num - * Number of elements in the iova array. - * @param pg_shift - * LOG2 of the physical pages size. - * @param free_cb - * The callback used to free this chunk when destroying the mempool. - * @param opaque - * An opaque argument passed to free_cb. - * @return - * The number of objects added on success. - * On error, the chunks are not added in the memory list of the - * mempool and a negative errno is returned. - */ -int rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr, - const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift, - rte_mempool_memchunk_free_cb_t *free_cb, void *opaque); - -__rte_deprecated -int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr, - const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift, - rte_mempool_memchunk_free_cb_t *free_cb, void *opaque); - /** * Add virtually contiguous memory for objects in the pool at init * @@ -1048,22 +1164,6 @@ rte_mempool_cache_create(uint32_t size, int socket_id); void rte_mempool_cache_free(struct rte_mempool_cache *cache); -/** - * Flush a user-owned mempool cache to the specified mempool. - * - * @param cache - * A pointer to the mempool cache. - * @param mp - * A pointer to the mempool. - */ -static __rte_always_inline void -rte_mempool_cache_flush(struct rte_mempool_cache *cache, - struct rte_mempool *mp) -{ - rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len); - cache->len = 0; -} - /** * Get a pointer to the per-lcore default mempool cache. * @@ -1086,6 +1186,26 @@ rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id) return &mp->local_cache[lcore_id]; } +/** + * Flush a user-owned mempool cache to the specified mempool. + * + * @param cache + * A pointer to the mempool cache. + * @param mp + * A pointer to the mempool. + */ +static __rte_always_inline void +rte_mempool_cache_flush(struct rte_mempool_cache *cache, + struct rte_mempool *mp) +{ + if (cache == NULL) + cache = rte_mempool_default_cache(mp, rte_lcore_id()); + if (cache == NULL || cache->len == 0) + return; + rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len); + cache->len = 0; +} + /** * @internal Put several objects back in the mempool; used internally. * @param mp @@ -1365,6 +1485,49 @@ rte_mempool_get(struct rte_mempool *mp, void **obj_p) return rte_mempool_get_bulk(mp, obj_p, 1); } +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Get a contiguous blocks of objects from the mempool. + * + * If cache is enabled, consider to flush it first, to reuse objects + * as soon as possible. + * + * The application should check that the driver supports the operation + * by calling rte_mempool_ops_get_info() and checking that `contig_block_size` + * is not zero. + * + * @param mp + * A pointer to the mempool structure. + * @param first_obj_table + * A pointer to a pointer to the first object in each block. + * @param n + * The number of blocks to get from mempool. + * @return + * - 0: Success; blocks taken. + * - -ENOBUFS: Not enough entries in the mempool; no object is retrieved. + * - -EOPNOTSUPP: The mempool driver does not support block dequeue + */ +static __rte_always_inline int +__rte_experimental +rte_mempool_get_contig_blocks(struct rte_mempool *mp, + void **first_obj_table, unsigned int n) +{ + int ret; + + ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n); + if (ret == 0) { + __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_success, n); + __mempool_contig_blocks_check_cookies(mp, first_obj_table, n, + 1); + } else { + __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_fail, n); + } + + return ret; +} + /** * Return the number of entries in the mempool. * @@ -1439,7 +1602,7 @@ rte_mempool_empty(const struct rte_mempool *mp) * A pointer (virtual address) to the element of the pool. * @return * The IO address of the elt element. - * If the mempool was created with MEMPOOL_F_NO_PHYS_CONTIG, the + * If the mempool was created with MEMPOOL_F_NO_IOVA_CONTIG, the * returned value is RTE_BAD_IOVA. */ static inline rte_iova_t @@ -1451,13 +1614,6 @@ rte_mempool_virt2iova(const void *elt) return hdr->iova; } -__rte_deprecated -static inline phys_addr_t -rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt) -{ - return rte_mempool_virt2iova(elt); -} - /** * Check the consistency of mempool objects. * @@ -1526,64 +1682,6 @@ struct rte_mempool *rte_mempool_lookup(const char *name); uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, struct rte_mempool_objsz *sz); -/** - * Get the size of memory required to store mempool elements. - * - * Calculate the maximum amount of memory required to store given number - * of objects. Assume that the memory buffer will be aligned at page - * boundary. - * - * Note that if object size is bigger then page size, then it assumes - * that pages are grouped in subsets of physically continuous pages big - * enough to store at least one object. - * - * @param elt_num - * Number of elements. - * @param total_elt_sz - * The size of each element, including header and trailer, as returned - * by rte_mempool_calc_obj_size(). - * @param pg_shift - * LOG2 of the physical pages size. If set to 0, ignore page boundaries. - * @param flags - * The mempool flags. - * @return - * Required memory size aligned at page boundary. - */ -size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, - uint32_t pg_shift, unsigned int flags); - -/** - * Get the size of memory required to store mempool elements. - * - * Calculate how much memory would be actually required with the given - * memory footprint to store required number of objects. - * - * @param vaddr - * Virtual address of the externally allocated memory buffer. - * Will be used to store mempool objects. - * @param elt_num - * Number of elements. - * @param total_elt_sz - * The size of each element, including header and trailer, as returned - * by rte_mempool_calc_obj_size(). - * @param iova - * Array of IO addresses of the pages that comprises given memory buffer. - * @param pg_num - * Number of elements in the iova array. - * @param pg_shift - * LOG2 of the physical pages size. - * @param flags - * The mempool flags. - * @return - * On success, the number of bytes needed to store given number of - * objects, aligned to the given page size. If the provided memory - * buffer is too small, return a negative value whose absolute value - * is the actual number of elements that can be stored in that buffer. - */ -ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, - size_t total_elt_sz, const rte_iova_t iova[], uint32_t pg_num, - uint32_t pg_shift, unsigned int flags); - /** * Walk list of all memory pools * diff --git a/lib/librte_mempool/rte_mempool_ops.c b/lib/librte_mempool/rte_mempool_ops.c index 0732255c..a27e1fa5 100644 --- a/lib/librte_mempool/rte_mempool_ops.c +++ b/lib/librte_mempool/rte_mempool_ops.c @@ -57,8 +57,10 @@ rte_mempool_register_ops(const struct rte_mempool_ops *h) ops->enqueue = h->enqueue; ops->dequeue = h->dequeue; ops->get_count = h->get_count; - ops->get_capabilities = h->get_capabilities; - ops->register_memory_area = h->register_memory_area; + ops->calc_mem_size = h->calc_mem_size; + ops->populate = h->populate; + ops->get_info = h->get_info; + ops->dequeue_contig_blocks = h->dequeue_contig_blocks; rte_spinlock_unlock(&rte_mempool_ops_table.sl); @@ -97,32 +99,57 @@ rte_mempool_ops_get_count(const struct rte_mempool *mp) return ops->get_count(mp); } -/* wrapper to get external mempool capabilities. */ +/* wrapper to notify new memory area to external mempool */ +ssize_t +rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align) +{ + struct rte_mempool_ops *ops; + + ops = rte_mempool_get_ops(mp->ops_index); + + if (ops->calc_mem_size == NULL) + return rte_mempool_op_calc_mem_size_default(mp, obj_num, + pg_shift, min_chunk_size, align); + + return ops->calc_mem_size(mp, obj_num, pg_shift, min_chunk_size, align); +} + +/* wrapper to populate memory pool objects using provided memory chunk */ int -rte_mempool_ops_get_capabilities(const struct rte_mempool *mp, - unsigned int *flags) +rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs, + void *vaddr, rte_iova_t iova, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, + void *obj_cb_arg) { struct rte_mempool_ops *ops; ops = rte_mempool_get_ops(mp->ops_index); - RTE_FUNC_PTR_OR_ERR_RET(ops->get_capabilities, -ENOTSUP); - return ops->get_capabilities(mp, flags); + if (ops->populate == NULL) + return rte_mempool_op_populate_default(mp, max_objs, vaddr, + iova, len, obj_cb, + obj_cb_arg); + + return ops->populate(mp, max_objs, vaddr, iova, len, obj_cb, + obj_cb_arg); } -/* wrapper to notify new memory area to external mempool */ +/* wrapper to get additional mempool info */ int -rte_mempool_ops_register_memory_area(const struct rte_mempool *mp, char *vaddr, - rte_iova_t iova, size_t len) +rte_mempool_ops_get_info(const struct rte_mempool *mp, + struct rte_mempool_info *info) { struct rte_mempool_ops *ops; ops = rte_mempool_get_ops(mp->ops_index); - RTE_FUNC_PTR_OR_ERR_RET(ops->register_memory_area, -ENOTSUP); - return ops->register_memory_area(mp, vaddr, iova, len); + RTE_FUNC_PTR_OR_ERR_RET(ops->get_info, -ENOTSUP); + return ops->get_info(mp, info); } + /* sets mempool ops previously registered by rte_mempool_register_ops. */ int rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name, diff --git a/lib/librte_mempool/rte_mempool_ops_default.c b/lib/librte_mempool/rte_mempool_ops_default.c new file mode 100644 index 00000000..4e2bfc82 --- /dev/null +++ b/lib/librte_mempool/rte_mempool_ops_default.c @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation. + * Copyright(c) 2016 6WIND S.A. + * Copyright(c) 2018 Solarflare Communications Inc. + */ + +#include + +ssize_t +rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align) +{ + size_t total_elt_sz; + size_t obj_per_page, pg_num, pg_sz; + size_t mem_size; + + total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; + if (total_elt_sz == 0) { + mem_size = 0; + } else if (pg_shift == 0) { + mem_size = total_elt_sz * obj_num; + } else { + pg_sz = (size_t)1 << pg_shift; + obj_per_page = pg_sz / total_elt_sz; + if (obj_per_page == 0) { + /* + * Note that if object size is bigger than page size, + * then it is assumed that pages are grouped in subsets + * of physically continuous pages big enough to store + * at least one object. + */ + mem_size = + RTE_ALIGN_CEIL(total_elt_sz, pg_sz) * obj_num; + } else { + pg_num = (obj_num + obj_per_page - 1) / obj_per_page; + mem_size = pg_num << pg_shift; + } + } + + *min_chunk_size = RTE_MAX((size_t)1 << pg_shift, total_elt_sz); + + *align = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, (size_t)1 << pg_shift); + + return mem_size; +} + +int +rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs, + void *vaddr, rte_iova_t iova, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg) +{ + size_t total_elt_sz; + size_t off; + unsigned int i; + void *obj; + + total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; + + for (off = 0, i = 0; off + total_elt_sz <= len && i < max_objs; i++) { + off += mp->header_size; + obj = (char *)vaddr + off; + obj_cb(mp, obj_cb_arg, obj, + (iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off)); + rte_mempool_ops_enqueue_bulk(mp, &obj, 1); + off += mp->elt_size + mp->trailer_size; + } + + return i; +} diff --git a/lib/librte_mempool/rte_mempool_version.map b/lib/librte_mempool/rte_mempool_version.map index 62b76f91..17cbca46 100644 --- a/lib/librte_mempool/rte_mempool_version.map +++ b/lib/librte_mempool/rte_mempool_version.map @@ -8,9 +8,6 @@ DPDK_2.0 { rte_mempool_list_dump; rte_mempool_lookup; rte_mempool_walk; - rte_mempool_xmem_create; - rte_mempool_xmem_size; - rte_mempool_xmem_usage; local: *; }; @@ -34,8 +31,6 @@ DPDK_16.07 { rte_mempool_ops_table; rte_mempool_populate_anon; rte_mempool_populate_default; - rte_mempool_populate_phys; - rte_mempool_populate_phys_tab; rte_mempool_populate_virt; rte_mempool_register_ops; rte_mempool_set_ops_byname; @@ -45,9 +40,21 @@ DPDK_16.07 { DPDK_17.11 { global: - rte_mempool_ops_get_capabilities; - rte_mempool_ops_register_memory_area; rte_mempool_populate_iova; - rte_mempool_populate_iova_tab; } DPDK_16.07; + +DPDK_18.05 { + global: + + rte_mempool_contig_blocks_check_cookies; + rte_mempool_op_calc_mem_size_default; + rte_mempool_op_populate_default; + +} DPDK_17.11; + +EXPERIMENTAL { + global: + + rte_mempool_ops_get_info; +}; -- cgit 1.2.3-korg