summaryrefslogtreecommitdiffstats
path: root/drivers/mempool
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mempool')
-rw-r--r--drivers/mempool/Makefile5
-rw-r--r--drivers/mempool/bucket/Makefile27
-rw-r--r--drivers/mempool/bucket/meson.build9
-rw-r--r--drivers/mempool/bucket/rte_mempool_bucket.c628
-rw-r--r--drivers/mempool/bucket/rte_mempool_bucket_version.map4
-rw-r--r--drivers/mempool/dpaa/Makefile3
-rw-r--r--drivers/mempool/dpaa/dpaa_mempool.c54
-rw-r--r--drivers/mempool/dpaa/dpaa_mempool.h2
-rw-r--r--drivers/mempool/dpaa/meson.build12
-rw-r--r--drivers/mempool/dpaa/rte_mempool_dpaa_version.map1
-rw-r--r--drivers/mempool/dpaa2/Makefile11
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool.c139
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h38
-rw-r--r--drivers/mempool/dpaa2/meson.build12
-rw-r--r--drivers/mempool/dpaa2/rte_dpaa2_mempool.h53
-rw-r--r--drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map9
-rw-r--r--drivers/mempool/meson.build2
-rw-r--r--drivers/mempool/octeontx/Makefile5
-rw-r--r--drivers/mempool/octeontx/meson.build6
-rw-r--r--drivers/mempool/octeontx/octeontx_fpavf.c63
-rw-r--r--drivers/mempool/octeontx/octeontx_fpavf.h9
-rw-r--r--drivers/mempool/octeontx/octeontx_mbox.c214
-rw-r--r--drivers/mempool/octeontx/octeontx_mbox.h36
-rw-r--r--drivers/mempool/octeontx/octeontx_pool_logs.h9
-rw-r--r--drivers/mempool/octeontx/octeontx_ssovf.c271
-rw-r--r--drivers/mempool/octeontx/rte_mempool_octeontx.c64
-rw-r--r--drivers/mempool/octeontx/rte_mempool_octeontx_version.map6
27 files changed, 1061 insertions, 631 deletions
diff --git a/drivers/mempool/Makefile b/drivers/mempool/Makefile
index aae2cb10..28c2e836 100644
--- a/drivers/mempool/Makefile
+++ b/drivers/mempool/Makefile
@@ -3,8 +3,13 @@
include $(RTE_SDK)/mk/rte.vars.mk
+DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_BUCKET) += bucket
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
DIRS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += dpaa
+endif
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2
+endif
DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_RING) += ring
DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_STACK) += stack
DIRS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx
diff --git a/drivers/mempool/bucket/Makefile b/drivers/mempool/bucket/Makefile
new file mode 100644
index 00000000..7364916b
--- /dev/null
+++ b/drivers/mempool/bucket/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Copyright (c) 2017-2018 Solarflare Communications Inc.
+# All rights reserved.
+#
+# This software was jointly developed between OKTET Labs (under contract
+# for Solarflare) and Solarflare Communications, Inc.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_bucket.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
+
+EXPORT_MAP := rte_mempool_bucket_version.map
+
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_DRIVER_MEMPOOL_BUCKET) += rte_mempool_bucket.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/mempool/bucket/meson.build b/drivers/mempool/bucket/meson.build
new file mode 100644
index 00000000..618d7912
--- /dev/null
+++ b/drivers/mempool/bucket/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Copyright (c) 2017-2018 Solarflare Communications Inc.
+# All rights reserved.
+#
+# This software was jointly developed between OKTET Labs (under contract
+# for Solarflare) and Solarflare Communications, Inc.
+
+sources = files('rte_mempool_bucket.c')
diff --git a/drivers/mempool/bucket/rte_mempool_bucket.c b/drivers/mempool/bucket/rte_mempool_bucket.c
new file mode 100644
index 00000000..78d2b9d0
--- /dev/null
+++ b/drivers/mempool/bucket/rte_mempool_bucket.c
@@ -0,0 +1,628 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_errno.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+
+/*
+ * The general idea of the bucket mempool driver is as follows.
+ * We keep track of physically contiguous groups (buckets) of objects
+ * of a certain size. Every such a group has a counter that is
+ * incremented every time an object from that group is enqueued.
+ * Until the bucket is full, no objects from it are eligible for allocation.
+ * If a request is made to dequeue a multiply of bucket size, it is
+ * satisfied by returning the whole buckets, instead of separate objects.
+ */
+
+
+struct bucket_header {
+ unsigned int lcore_id;
+ uint8_t fill_cnt;
+};
+
+struct bucket_stack {
+ unsigned int top;
+ unsigned int limit;
+ void *objects[];
+};
+
+struct bucket_data {
+ unsigned int header_size;
+ unsigned int total_elt_size;
+ unsigned int obj_per_bucket;
+ unsigned int bucket_stack_thresh;
+ uintptr_t bucket_page_mask;
+ struct rte_ring *shared_bucket_ring;
+ struct bucket_stack *buckets[RTE_MAX_LCORE];
+ /*
+ * Multi-producer single-consumer ring to hold objects that are
+ * returned to the mempool at a different lcore than initially
+ * dequeued
+ */
+ struct rte_ring *adoption_buffer_rings[RTE_MAX_LCORE];
+ struct rte_ring *shared_orphan_ring;
+ struct rte_mempool *pool;
+ unsigned int bucket_mem_size;
+};
+
+static struct bucket_stack *
+bucket_stack_create(const struct rte_mempool *mp, unsigned int n_elts)
+{
+ struct bucket_stack *stack;
+
+ stack = rte_zmalloc_socket("bucket_stack",
+ sizeof(struct bucket_stack) +
+ n_elts * sizeof(void *),
+ RTE_CACHE_LINE_SIZE,
+ mp->socket_id);
+ if (stack == NULL)
+ return NULL;
+ stack->limit = n_elts;
+ stack->top = 0;
+
+ return stack;
+}
+
+static void
+bucket_stack_push(struct bucket_stack *stack, void *obj)
+{
+ RTE_ASSERT(stack->top < stack->limit);
+ stack->objects[stack->top++] = obj;
+}
+
+static void *
+bucket_stack_pop_unsafe(struct bucket_stack *stack)
+{
+ RTE_ASSERT(stack->top > 0);
+ return stack->objects[--stack->top];
+}
+
+static void *
+bucket_stack_pop(struct bucket_stack *stack)
+{
+ if (stack->top == 0)
+ return NULL;
+ return bucket_stack_pop_unsafe(stack);
+}
+
+static int
+bucket_enqueue_single(struct bucket_data *bd, void *obj)
+{
+ int rc = 0;
+ uintptr_t addr = (uintptr_t)obj;
+ struct bucket_header *hdr;
+ unsigned int lcore_id = rte_lcore_id();
+
+ addr &= bd->bucket_page_mask;
+ hdr = (struct bucket_header *)addr;
+
+ if (likely(hdr->lcore_id == lcore_id)) {
+ if (hdr->fill_cnt < bd->obj_per_bucket - 1) {
+ hdr->fill_cnt++;
+ } else {
+ hdr->fill_cnt = 0;
+ /* Stack is big enough to put all buckets */
+ bucket_stack_push(bd->buckets[lcore_id], hdr);
+ }
+ } else if (hdr->lcore_id != LCORE_ID_ANY) {
+ struct rte_ring *adopt_ring =
+ bd->adoption_buffer_rings[hdr->lcore_id];
+
+ rc = rte_ring_enqueue(adopt_ring, obj);
+ /* Ring is big enough to put all objects */
+ RTE_ASSERT(rc == 0);
+ } else if (hdr->fill_cnt < bd->obj_per_bucket - 1) {
+ hdr->fill_cnt++;
+ } else {
+ hdr->fill_cnt = 0;
+ rc = rte_ring_enqueue(bd->shared_bucket_ring, hdr);
+ /* Ring is big enough to put all buckets */
+ RTE_ASSERT(rc == 0);
+ }
+
+ return rc;
+}
+
+static int
+bucket_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned int n)
+{
+ struct bucket_data *bd = mp->pool_data;
+ struct bucket_stack *local_stack = bd->buckets[rte_lcore_id()];
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < n; i++) {
+ rc = bucket_enqueue_single(bd, obj_table[i]);
+ RTE_ASSERT(rc == 0);
+ }
+ if (local_stack->top > bd->bucket_stack_thresh) {
+ rte_ring_enqueue_bulk(bd->shared_bucket_ring,
+ &local_stack->objects
+ [bd->bucket_stack_thresh],
+ local_stack->top -
+ bd->bucket_stack_thresh,
+ NULL);
+ local_stack->top = bd->bucket_stack_thresh;
+ }
+ return rc;
+}
+
+static void **
+bucket_fill_obj_table(const struct bucket_data *bd, void **pstart,
+ void **obj_table, unsigned int n)
+{
+ unsigned int i;
+ uint8_t *objptr = *pstart;
+
+ for (objptr += bd->header_size, i = 0; i < n;
+ i++, objptr += bd->total_elt_size)
+ *obj_table++ = objptr;
+ *pstart = objptr;
+ return obj_table;
+}
+
+static int
+bucket_dequeue_orphans(struct bucket_data *bd, void **obj_table,
+ unsigned int n_orphans)
+{
+ unsigned int i;
+ int rc;
+ uint8_t *objptr;
+
+ rc = rte_ring_dequeue_bulk(bd->shared_orphan_ring, obj_table,
+ n_orphans, NULL);
+ if (unlikely(rc != (int)n_orphans)) {
+ struct bucket_header *hdr;
+
+ objptr = bucket_stack_pop(bd->buckets[rte_lcore_id()]);
+ hdr = (struct bucket_header *)objptr;
+
+ if (objptr == NULL) {
+ rc = rte_ring_dequeue(bd->shared_bucket_ring,
+ (void **)&objptr);
+ if (rc != 0) {
+ rte_errno = ENOBUFS;
+ return -rte_errno;
+ }
+ hdr = (struct bucket_header *)objptr;
+ hdr->lcore_id = rte_lcore_id();
+ }
+ hdr->fill_cnt = 0;
+ bucket_fill_obj_table(bd, (void **)&objptr, obj_table,
+ n_orphans);
+ for (i = n_orphans; i < bd->obj_per_bucket; i++,
+ objptr += bd->total_elt_size) {
+ rc = rte_ring_enqueue(bd->shared_orphan_ring,
+ objptr);
+ if (rc != 0) {
+ RTE_ASSERT(0);
+ rte_errno = -rc;
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+bucket_dequeue_buckets(struct bucket_data *bd, void **obj_table,
+ unsigned int n_buckets)
+{
+ struct bucket_stack *cur_stack = bd->buckets[rte_lcore_id()];
+ unsigned int n_buckets_from_stack = RTE_MIN(n_buckets, cur_stack->top);
+ void **obj_table_base = obj_table;
+
+ n_buckets -= n_buckets_from_stack;
+ while (n_buckets_from_stack-- > 0) {
+ void *obj = bucket_stack_pop_unsafe(cur_stack);
+
+ obj_table = bucket_fill_obj_table(bd, &obj, obj_table,
+ bd->obj_per_bucket);
+ }
+ while (n_buckets-- > 0) {
+ struct bucket_header *hdr;
+
+ if (unlikely(rte_ring_dequeue(bd->shared_bucket_ring,
+ (void **)&hdr) != 0)) {
+ /*
+ * Return the already-dequeued buffers
+ * back to the mempool
+ */
+ bucket_enqueue(bd->pool, obj_table_base,
+ obj_table - obj_table_base);
+ rte_errno = ENOBUFS;
+ return -rte_errno;
+ }
+ hdr->lcore_id = rte_lcore_id();
+ obj_table = bucket_fill_obj_table(bd, (void **)&hdr,
+ obj_table,
+ bd->obj_per_bucket);
+ }
+
+ return 0;
+}
+
+static int
+bucket_adopt_orphans(struct bucket_data *bd)
+{
+ int rc = 0;
+ struct rte_ring *adopt_ring =
+ bd->adoption_buffer_rings[rte_lcore_id()];
+
+ if (unlikely(!rte_ring_empty(adopt_ring))) {
+ void *orphan;
+
+ while (rte_ring_sc_dequeue(adopt_ring, &orphan) == 0) {
+ rc = bucket_enqueue_single(bd, orphan);
+ RTE_ASSERT(rc == 0);
+ }
+ }
+ return rc;
+}
+
+static int
+bucket_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+ struct bucket_data *bd = mp->pool_data;
+ unsigned int n_buckets = n / bd->obj_per_bucket;
+ unsigned int n_orphans = n - n_buckets * bd->obj_per_bucket;
+ int rc = 0;
+
+ bucket_adopt_orphans(bd);
+
+ if (unlikely(n_orphans > 0)) {
+ rc = bucket_dequeue_orphans(bd, obj_table +
+ (n_buckets * bd->obj_per_bucket),
+ n_orphans);
+ if (rc != 0)
+ return rc;
+ }
+
+ if (likely(n_buckets > 0)) {
+ rc = bucket_dequeue_buckets(bd, obj_table, n_buckets);
+ if (unlikely(rc != 0) && n_orphans > 0) {
+ rte_ring_enqueue_bulk(bd->shared_orphan_ring,
+ obj_table + (n_buckets *
+ bd->obj_per_bucket),
+ n_orphans, NULL);
+ }
+ }
+
+ return rc;
+}
+
+static int
+bucket_dequeue_contig_blocks(struct rte_mempool *mp, void **first_obj_table,
+ unsigned int n)
+{
+ struct bucket_data *bd = mp->pool_data;
+ const uint32_t header_size = bd->header_size;
+ struct bucket_stack *cur_stack = bd->buckets[rte_lcore_id()];
+ unsigned int n_buckets_from_stack = RTE_MIN(n, cur_stack->top);
+ struct bucket_header *hdr;
+ void **first_objp = first_obj_table;
+
+ bucket_adopt_orphans(bd);
+
+ n -= n_buckets_from_stack;
+ while (n_buckets_from_stack-- > 0) {
+ hdr = bucket_stack_pop_unsafe(cur_stack);
+ *first_objp++ = (uint8_t *)hdr + header_size;
+ }
+ if (n > 0) {
+ if (unlikely(rte_ring_dequeue_bulk(bd->shared_bucket_ring,
+ first_objp, n, NULL) != n)) {
+ /* Return the already dequeued buckets */
+ while (first_objp-- != first_obj_table) {
+ bucket_stack_push(cur_stack,
+ (uint8_t *)*first_objp -
+ header_size);
+ }
+ rte_errno = ENOBUFS;
+ return -rte_errno;
+ }
+ while (n-- > 0) {
+ hdr = (struct bucket_header *)*first_objp;
+ hdr->lcore_id = rte_lcore_id();
+ *first_objp++ = (uint8_t *)hdr + header_size;
+ }
+ }
+
+ return 0;
+}
+
+static void
+count_underfilled_buckets(struct rte_mempool *mp,
+ void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ __rte_unused unsigned int mem_idx)
+{
+ unsigned int *pcount = opaque;
+ const struct bucket_data *bd = mp->pool_data;
+ unsigned int bucket_page_sz =
+ (unsigned int)(~bd->bucket_page_mask + 1);
+ uintptr_t align;
+ uint8_t *iter;
+
+ align = (uintptr_t)RTE_PTR_ALIGN_CEIL(memhdr->addr, bucket_page_sz) -
+ (uintptr_t)memhdr->addr;
+
+ for (iter = (uint8_t *)memhdr->addr + align;
+ iter < (uint8_t *)memhdr->addr + memhdr->len;
+ iter += bucket_page_sz) {
+ struct bucket_header *hdr = (struct bucket_header *)iter;
+
+ *pcount += hdr->fill_cnt;
+ }
+}
+
+static unsigned int
+bucket_get_count(const struct rte_mempool *mp)
+{
+ const struct bucket_data *bd = mp->pool_data;
+ unsigned int count =
+ bd->obj_per_bucket * rte_ring_count(bd->shared_bucket_ring) +
+ rte_ring_count(bd->shared_orphan_ring);
+ unsigned int i;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (!rte_lcore_is_enabled(i))
+ continue;
+ count += bd->obj_per_bucket * bd->buckets[i]->top +
+ rte_ring_count(bd->adoption_buffer_rings[i]);
+ }
+
+ rte_mempool_mem_iter((struct rte_mempool *)(uintptr_t)mp,
+ count_underfilled_buckets, &count);
+
+ return count;
+}
+
+static int
+bucket_alloc(struct rte_mempool *mp)
+{
+ int rg_flags = 0;
+ int rc = 0;
+ char rg_name[RTE_RING_NAMESIZE];
+ struct bucket_data *bd;
+ unsigned int i;
+ unsigned int bucket_header_size;
+
+ bd = rte_zmalloc_socket("bucket_pool", sizeof(*bd),
+ RTE_CACHE_LINE_SIZE, mp->socket_id);
+ if (bd == NULL) {
+ rc = -ENOMEM;
+ goto no_mem_for_data;
+ }
+ bd->pool = mp;
+ if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ bucket_header_size = sizeof(struct bucket_header);
+ else
+ bucket_header_size = RTE_CACHE_LINE_SIZE;
+ RTE_BUILD_BUG_ON(sizeof(struct bucket_header) > RTE_CACHE_LINE_SIZE);
+ bd->header_size = mp->header_size + bucket_header_size;
+ bd->total_elt_size = mp->header_size + mp->elt_size + mp->trailer_size;
+ bd->bucket_mem_size = RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB * 1024;
+ bd->obj_per_bucket = (bd->bucket_mem_size - bucket_header_size) /
+ bd->total_elt_size;
+ bd->bucket_page_mask = ~(rte_align64pow2(bd->bucket_mem_size) - 1);
+ /* eventually this should be a tunable parameter */
+ bd->bucket_stack_thresh = (mp->size / bd->obj_per_bucket) * 4 / 3;
+
+ if (mp->flags & MEMPOOL_F_SP_PUT)
+ rg_flags |= RING_F_SP_ENQ;
+ if (mp->flags & MEMPOOL_F_SC_GET)
+ rg_flags |= RING_F_SC_DEQ;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (!rte_lcore_is_enabled(i))
+ continue;
+ bd->buckets[i] =
+ bucket_stack_create(mp, mp->size / bd->obj_per_bucket);
+ if (bd->buckets[i] == NULL) {
+ rc = -ENOMEM;
+ goto no_mem_for_stacks;
+ }
+ rc = snprintf(rg_name, sizeof(rg_name),
+ RTE_MEMPOOL_MZ_FORMAT ".a%u", mp->name, i);
+ if (rc < 0 || rc >= (int)sizeof(rg_name)) {
+ rc = -ENAMETOOLONG;
+ goto no_mem_for_stacks;
+ }
+ bd->adoption_buffer_rings[i] =
+ rte_ring_create(rg_name, rte_align32pow2(mp->size + 1),
+ mp->socket_id,
+ rg_flags | RING_F_SC_DEQ);
+ if (bd->adoption_buffer_rings[i] == NULL) {
+ rc = -rte_errno;
+ goto no_mem_for_stacks;
+ }
+ }
+
+ rc = snprintf(rg_name, sizeof(rg_name),
+ RTE_MEMPOOL_MZ_FORMAT ".0", mp->name);
+ if (rc < 0 || rc >= (int)sizeof(rg_name)) {
+ rc = -ENAMETOOLONG;
+ goto invalid_shared_orphan_ring;
+ }
+ bd->shared_orphan_ring =
+ rte_ring_create(rg_name, rte_align32pow2(mp->size + 1),
+ mp->socket_id, rg_flags);
+ if (bd->shared_orphan_ring == NULL) {
+ rc = -rte_errno;
+ goto cannot_create_shared_orphan_ring;
+ }
+
+ rc = snprintf(rg_name, sizeof(rg_name),
+ RTE_MEMPOOL_MZ_FORMAT ".1", mp->name);
+ if (rc < 0 || rc >= (int)sizeof(rg_name)) {
+ rc = -ENAMETOOLONG;
+ goto invalid_shared_bucket_ring;
+ }
+ bd->shared_bucket_ring =
+ rte_ring_create(rg_name,
+ rte_align32pow2((mp->size + 1) /
+ bd->obj_per_bucket),
+ mp->socket_id, rg_flags);
+ if (bd->shared_bucket_ring == NULL) {
+ rc = -rte_errno;
+ goto cannot_create_shared_bucket_ring;
+ }
+
+ mp->pool_data = bd;
+
+ return 0;
+
+cannot_create_shared_bucket_ring:
+invalid_shared_bucket_ring:
+ rte_ring_free(bd->shared_orphan_ring);
+cannot_create_shared_orphan_ring:
+invalid_shared_orphan_ring:
+no_mem_for_stacks:
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ rte_free(bd->buckets[i]);
+ rte_ring_free(bd->adoption_buffer_rings[i]);
+ }
+ rte_free(bd);
+no_mem_for_data:
+ rte_errno = -rc;
+ return rc;
+}
+
+static void
+bucket_free(struct rte_mempool *mp)
+{
+ unsigned int i;
+ struct bucket_data *bd = mp->pool_data;
+
+ if (bd == NULL)
+ return;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ rte_free(bd->buckets[i]);
+ rte_ring_free(bd->adoption_buffer_rings[i]);
+ }
+
+ rte_ring_free(bd->shared_orphan_ring);
+ rte_ring_free(bd->shared_bucket_ring);
+
+ rte_free(bd);
+}
+
+static ssize_t
+bucket_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num,
+ __rte_unused uint32_t pg_shift, size_t *min_total_elt_size,
+ size_t *align)
+{
+ struct bucket_data *bd = mp->pool_data;
+ unsigned int bucket_page_sz;
+
+ if (bd == NULL)
+ return -EINVAL;
+
+ bucket_page_sz = rte_align32pow2(bd->bucket_mem_size);
+ *align = bucket_page_sz;
+ *min_total_elt_size = bucket_page_sz;
+ /*
+ * Each bucket occupies its own block aligned to
+ * bucket_page_sz, so the required amount of memory is
+ * a multiple of bucket_page_sz.
+ * We also need extra space for a bucket header
+ */
+ return ((obj_num + bd->obj_per_bucket - 1) /
+ bd->obj_per_bucket) * bucket_page_sz;
+}
+
+static int
+bucket_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ struct bucket_data *bd = mp->pool_data;
+ unsigned int bucket_page_sz;
+ unsigned int bucket_header_sz;
+ unsigned int n_objs;
+ uintptr_t align;
+ uint8_t *iter;
+ int rc;
+
+ if (bd == NULL)
+ return -EINVAL;
+
+ bucket_page_sz = rte_align32pow2(bd->bucket_mem_size);
+ align = RTE_PTR_ALIGN_CEIL((uintptr_t)vaddr, bucket_page_sz) -
+ (uintptr_t)vaddr;
+
+ bucket_header_sz = bd->header_size - mp->header_size;
+ if (iova != RTE_BAD_IOVA)
+ iova += align + bucket_header_sz;
+
+ for (iter = (uint8_t *)vaddr + align, n_objs = 0;
+ iter < (uint8_t *)vaddr + len && n_objs < max_objs;
+ iter += bucket_page_sz) {
+ struct bucket_header *hdr = (struct bucket_header *)iter;
+ unsigned int chunk_len = bd->bucket_mem_size;
+
+ if ((size_t)(iter - (uint8_t *)vaddr) + chunk_len > len)
+ chunk_len = len - (iter - (uint8_t *)vaddr);
+ if (chunk_len <= bucket_header_sz)
+ break;
+ chunk_len -= bucket_header_sz;
+
+ hdr->fill_cnt = 0;
+ hdr->lcore_id = LCORE_ID_ANY;
+ rc = rte_mempool_op_populate_default(mp,
+ RTE_MIN(bd->obj_per_bucket,
+ max_objs - n_objs),
+ iter + bucket_header_sz,
+ iova, chunk_len,
+ obj_cb, obj_cb_arg);
+ if (rc < 0)
+ return rc;
+ n_objs += rc;
+ if (iova != RTE_BAD_IOVA)
+ iova += bucket_page_sz;
+ }
+
+ return n_objs;
+}
+
+static int
+bucket_get_info(const struct rte_mempool *mp, struct rte_mempool_info *info)
+{
+ struct bucket_data *bd = mp->pool_data;
+
+ info->contig_block_size = bd->obj_per_bucket;
+ return 0;
+}
+
+
+static const struct rte_mempool_ops ops_bucket = {
+ .name = "bucket",
+ .alloc = bucket_alloc,
+ .free = bucket_free,
+ .enqueue = bucket_enqueue,
+ .dequeue = bucket_dequeue,
+ .get_count = bucket_get_count,
+ .calc_mem_size = bucket_calc_mem_size,
+ .populate = bucket_populate,
+ .get_info = bucket_get_info,
+ .dequeue_contig_blocks = bucket_dequeue_contig_blocks,
+};
+
+
+MEMPOOL_REGISTER_OPS(ops_bucket);
diff --git a/drivers/mempool/bucket/rte_mempool_bucket_version.map b/drivers/mempool/bucket/rte_mempool_bucket_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/drivers/mempool/bucket/rte_mempool_bucket_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/drivers/mempool/dpaa/Makefile b/drivers/mempool/dpaa/Makefile
index 4c0d7aaa..da8da1e9 100644
--- a/drivers/mempool/dpaa/Makefile
+++ b/drivers/mempool/dpaa/Makefile
@@ -22,6 +22,9 @@ EXPORT_MAP := rte_mempool_dpaa_version.map
# Lbrary version
LIBABIVER := 1
+# depends on dpaa bus which uses experimental API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += dpaa_mempool.c
diff --git a/drivers/mempool/dpaa/dpaa_mempool.c b/drivers/mempool/dpaa/dpaa_mempool.c
index fb3b6ba0..10c536bf 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.c
+++ b/drivers/mempool/dpaa/dpaa_mempool.c
@@ -27,6 +27,13 @@
#include <dpaa_mempool.h>
+/* List of all the memseg information locally maintained in dpaa driver. This
+ * is to optimize the PA_to_VA searches until a better mechanism (algo) is
+ * available.
+ */
+struct dpaa_memseg_list rte_dpaa_memsegs
+ = TAILQ_HEAD_INITIALIZER(rte_dpaa_memsegs);
+
struct dpaa_bp_info rte_dpaa_bpid_info[DPAA_MAX_BPOOLS];
static int
@@ -115,7 +122,8 @@ dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr)
struct bm_buffer buf;
int ret;
- DPAA_MEMPOOL_DEBUG("Free 0x%lx to bpid: %d", addr, bp_info->bpid);
+ DPAA_MEMPOOL_DEBUG("Free 0x%" PRIx64 " to bpid: %d",
+ addr, bp_info->bpid);
bm_buffer_set64(&buf, addr);
retry:
@@ -154,8 +162,7 @@ dpaa_mbuf_free_bulk(struct rte_mempool *pool,
if (unlikely(!bp_info->ptov_off)) {
/* buffers are from single mem segment */
if (bp_info->flags & DPAA_MPOOL_SINGLE_SEGMENT) {
- bp_info->ptov_off
- = (uint64_t)obj_table[i] - phy;
+ bp_info->ptov_off = (size_t)obj_table[i] - phy;
rte_dpaa_bpid_info[bp_info->bpid].ptov_off
= bp_info->ptov_off;
}
@@ -264,10 +271,9 @@ dpaa_mbuf_get_count(const struct rte_mempool *mp)
}
static int
-dpaa_register_memory_area(const struct rte_mempool *mp,
- char *vaddr __rte_unused,
- rte_iova_t paddr __rte_unused,
- size_t len)
+dpaa_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t paddr, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
{
struct dpaa_bp_info *bp_info;
unsigned int total_elt_sz;
@@ -282,14 +288,40 @@ dpaa_register_memory_area(const struct rte_mempool *mp,
bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
- DPAA_MEMPOOL_DEBUG("Req size %lu vs Available %u\n",
- len, total_elt_sz * mp->size);
+ DPAA_MEMPOOL_DEBUG("Req size %" PRIx64 " vs Available %u\n",
+ (uint64_t)len, total_elt_sz * mp->size);
/* Detect pool area has sufficient space for elements in this memzone */
if (len >= total_elt_sz * mp->size)
bp_info->flags |= DPAA_MPOOL_SINGLE_SEGMENT;
+ struct dpaa_memseg *ms;
+
+ /* For each memory chunk pinned to the Mempool, a linked list of the
+ * contained memsegs is created for searching when PA to VA
+ * conversion is required.
+ */
+ ms = rte_zmalloc(NULL, sizeof(struct dpaa_memseg), 0);
+ if (!ms) {
+ DPAA_MEMPOOL_ERR("Unable to allocate internal memory.");
+ DPAA_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
+ /* If the element is not added, it would only lead to failure
+ * in searching for the element and the logic would Fallback
+ * to traditional DPDK memseg traversal code. So, this is not
+ * a blocking error - but, error would be printed on screen.
+ */
+ return 0;
+ }
- return 0;
+ ms->vaddr = vaddr;
+ ms->iova = paddr;
+ ms->len = len;
+ /* Head insertions are generally faster than tail insertions as the
+ * buffers pinned are picked from rear end.
+ */
+ TAILQ_INSERT_HEAD(&rte_dpaa_memsegs, ms, next);
+
+ return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
+ obj_cb, obj_cb_arg);
}
struct rte_mempool_ops dpaa_mpool_ops = {
@@ -299,7 +331,7 @@ struct rte_mempool_ops dpaa_mpool_ops = {
.enqueue = dpaa_mbuf_free_bulk,
.dequeue = dpaa_mbuf_alloc_bulk,
.get_count = dpaa_mbuf_get_count,
- .register_memory_area = dpaa_register_memory_area,
+ .populate = dpaa_populate,
};
MEMPOOL_REGISTER_OPS(dpaa_mpool_ops);
diff --git a/drivers/mempool/dpaa/dpaa_mempool.h b/drivers/mempool/dpaa/dpaa_mempool.h
index 9435dd2f..092f326c 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.h
+++ b/drivers/mempool/dpaa/dpaa_mempool.h
@@ -46,7 +46,7 @@ static inline void *
DPAA_MEMPOOL_PTOV(struct dpaa_bp_info *bp_info, uint64_t addr)
{
if (bp_info->ptov_off)
- return ((void *)(addr + bp_info->ptov_off));
+ return ((void *) (size_t)(addr + bp_info->ptov_off));
return rte_dpaa_mem_ptov(addr);
}
diff --git a/drivers/mempool/dpaa/meson.build b/drivers/mempool/dpaa/meson.build
new file mode 100644
index 00000000..9163b3db
--- /dev/null
+++ b/drivers/mempool/dpaa/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['bus_dpaa']
+sources = files('dpaa_mempool.c')
+
+# depends on dpaa bus which uses experimental API
+allow_experimental_apis = true
diff --git a/drivers/mempool/dpaa/rte_mempool_dpaa_version.map b/drivers/mempool/dpaa/rte_mempool_dpaa_version.map
index d05f274d..60bf50b2 100644
--- a/drivers/mempool/dpaa/rte_mempool_dpaa_version.map
+++ b/drivers/mempool/dpaa/rte_mempool_dpaa_version.map
@@ -2,6 +2,7 @@ DPDK_17.11 {
global:
rte_dpaa_bpid_info;
+ rte_dpaa_memsegs;
local: *;
};
diff --git a/drivers/mempool/dpaa2/Makefile b/drivers/mempool/dpaa2/Makefile
index efaac96e..9e4c87d7 100644
--- a/drivers/mempool/dpaa2/Makefile
+++ b/drivers/mempool/dpaa2/Makefile
@@ -9,14 +9,8 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_mempool_dpaa2.a
-ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y)
-CFLAGS += -O0 -g
-CFLAGS += "-Wno-error"
-else
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-endif
-
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
@@ -27,6 +21,9 @@ EXPORT_MAP := rte_mempool_dpaa2_version.map
# Lbrary version
LIBABIVER := 1
+# depends on fslmc bus which uses experimental API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2_hw_mempool.c
@@ -34,4 +31,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2_hw_mempool.c
LDLIBS += -lrte_bus_fslmc
LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
+SYMLINK-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL)-include := rte_dpaa2_mempool.h
+
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index 2bd62e88..7d0435f5 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -21,16 +21,28 @@
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
+#include "rte_dpaa2_mempool.h"
#include <fslmc_logs.h>
#include <mc/fsl_dpbp.h>
#include <portal/dpaa2_hw_pvt.h>
#include <portal/dpaa2_hw_dpio.h>
#include "dpaa2_hw_mempool.h"
+#include "dpaa2_hw_mempool_logs.h"
struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
static struct dpaa2_bp_list *h_bp_list;
+/* List of all the memseg information locally maintained in dpaa2 driver. This
+ * is to optimize the PA_to_VA searches until a better mechanism (algo) is
+ * available.
+ */
+struct dpaa2_memseg_list rte_dpaa2_memsegs
+ = TAILQ_HEAD_INITIALIZER(rte_dpaa2_memsegs);
+
+/* Dynamic logging identified for mempool */
+int dpaa2_logtype_mempool;
+
static int
rte_hw_mbuf_create_pool(struct rte_mempool *mp)
{
@@ -44,30 +56,30 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
avail_dpbp = dpaa2_alloc_dpbp_dev();
if (!avail_dpbp) {
- PMD_DRV_LOG(ERR, "DPAA2 resources not available");
+ DPAA2_MEMPOOL_ERR("DPAA2 pool not available!");
return -ENOENT;
}
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ DPAA2_MEMPOOL_ERR("Failure in affining portal");
goto err1;
}
}
ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
if (ret != 0) {
- PMD_INIT_LOG(ERR, "Resource enable failure with"
- " err code: %d\n", ret);
+ DPAA2_MEMPOOL_ERR("Resource enable failure with err code: %d",
+ ret);
goto err1;
}
ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW,
avail_dpbp->token, &dpbp_attr);
if (ret != 0) {
- PMD_INIT_LOG(ERR, "Resource read failure with"
- " err code: %d\n", ret);
+ DPAA2_MEMPOOL_ERR("Resource read failure with err code: %d",
+ ret);
goto err2;
}
@@ -75,7 +87,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
sizeof(struct dpaa2_bp_info),
RTE_CACHE_LINE_SIZE);
if (!bp_info) {
- PMD_INIT_LOG(ERR, "No heap memory available for bp_info");
+ DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
ret = -ENOMEM;
goto err2;
}
@@ -84,7 +96,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list),
RTE_CACHE_LINE_SIZE);
if (!bp_list) {
- PMD_INIT_LOG(ERR, "No heap memory available");
+ DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
ret = -ENOMEM;
goto err3;
}
@@ -112,7 +124,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
sizeof(struct dpaa2_bp_info));
mp->pool_data = (void *)bp_info;
- PMD_INIT_LOG(DEBUG, "BP List created for bpid =%d", dpbp_attr.bpid);
+ DPAA2_MEMPOOL_DEBUG("BP List created for bpid =%d", dpbp_attr.bpid);
h_bp_list = bp_list;
return 0;
@@ -134,7 +146,7 @@ rte_hw_mbuf_free_pool(struct rte_mempool *mp)
struct dpaa2_dpbp_dev *dpbp_node;
if (!mp->pool_data) {
- PMD_DRV_LOG(ERR, "Not a valid dpaa22 pool");
+ DPAA2_MEMPOOL_ERR("Not a valid dpaa2 buffer pool");
return;
}
@@ -180,7 +192,7 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret != 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n");
+ DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
return;
}
}
@@ -233,6 +245,35 @@ aligned:
}
}
+uint16_t
+rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp)
+{
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(mp);
+ if (!(bp_info->bp_list)) {
+ RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ return -ENOMEM;
+ }
+
+ return bp_info->bpid;
+}
+
+struct rte_mbuf *
+rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr)
+{
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(mp);
+ if (!(bp_info->bp_list)) {
+ RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ return NULL;
+ }
+
+ return (struct rte_mbuf *)((uint8_t *)buf_addr -
+ bp_info->meta_data_size);
+}
+
int
rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
void **obj_table, unsigned int count)
@@ -242,7 +283,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
#endif
struct qbman_swp *swp;
uint16_t bpid;
- uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
+ size_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
int i, ret;
unsigned int n = 0;
struct dpaa2_bp_info *bp_info;
@@ -250,7 +291,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
bp_info = mempool_to_bpinfo(pool);
if (!(bp_info->bp_list)) {
- RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
return -ENOENT;
}
@@ -259,7 +300,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret != 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n");
+ DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
return ret;
}
}
@@ -270,18 +311,18 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
* then the remainder.
*/
if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
- ret = qbman_swp_acquire(swp, bpid, bufs,
+ ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
DPAA2_MBUF_MAX_ACQ_REL);
} else {
- ret = qbman_swp_acquire(swp, bpid, bufs,
+ ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
count - n);
}
/* In case of less than requested number of buffers available
* in pool, qbman_swp_acquire returns 0
*/
if (ret <= 0) {
- PMD_TX_LOG(ERR, "Buffer acquire failed with"
- " err code: %d", ret);
+ DPAA2_MEMPOOL_ERR("Buffer acquire failed with"
+ " err code: %d", ret);
/* The API expect the exact number of requested bufs */
/* Releasing all buffers allocated */
rte_dpaa2_mbuf_release(pool, obj_table, bpid,
@@ -290,10 +331,11 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
}
/* assigning mbuf from the acquired objects */
for (i = 0; (i < ret) && bufs[i]; i++) {
- DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], uint64_t);
+ DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], size_t);
obj_table[n] = (struct rte_mbuf *)
(bufs[i] - bp_info->meta_data_size);
- PMD_TX_LOG(DEBUG, "Acquired %p address %p from BMAN",
+ DPAA2_MEMPOOL_DP_DEBUG(
+ "Acquired %p address %p from BMAN\n",
(void *)bufs[i], (void *)obj_table[n]);
n++;
}
@@ -301,8 +343,8 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
alloc += n;
- PMD_TX_LOG(DEBUG, "Total = %d , req = %d done = %d",
- alloc, count, n);
+ DPAA2_MEMPOOL_DP_DEBUG("Total = %d , req = %d done = %d\n",
+ alloc, count, n);
#endif
return 0;
}
@@ -315,7 +357,7 @@ rte_hw_mbuf_free_bulk(struct rte_mempool *pool,
bp_info = mempool_to_bpinfo(pool);
if (!(bp_info->bp_list)) {
- RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
return -ENOENT;
}
rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid,
@@ -333,7 +375,7 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp)
struct dpaa2_dpbp_dev *dpbp_node;
if (!mp || !mp->pool_data) {
- RTE_LOG(ERR, PMD, "Invalid mempool provided\n");
+ DPAA2_MEMPOOL_ERR("Invalid mempool provided");
return 0;
}
@@ -343,16 +385,51 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp)
ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW,
dpbp_node->token, &num_of_bufs);
if (ret) {
- RTE_LOG(ERR, PMD, "Unable to obtain free buf count (err=%d)\n",
- ret);
+ DPAA2_MEMPOOL_ERR("Unable to obtain free buf count (err=%d)",
+ ret);
return 0;
}
- RTE_LOG(DEBUG, PMD, "Free bufs = %u\n", num_of_bufs);
+ DPAA2_MEMPOOL_DP_DEBUG("Free bufs = %u\n", num_of_bufs);
return num_of_bufs;
}
+static int
+dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t paddr, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ struct dpaa2_memseg *ms;
+
+ /* For each memory chunk pinned to the Mempool, a linked list of the
+ * contained memsegs is created for searching when PA to VA
+ * conversion is required.
+ */
+ ms = rte_zmalloc(NULL, sizeof(struct dpaa2_memseg), 0);
+ if (!ms) {
+ DPAA2_MEMPOOL_ERR("Unable to allocate internal memory.");
+ DPAA2_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
+ /* If the element is not added, it would only lead to failure
+ * in searching for the element and the logic would Fallback
+ * to traditional DPDK memseg traversal code. So, this is not
+ * a blocking error - but, error would be printed on screen.
+ */
+ return 0;
+ }
+
+ ms->vaddr = vaddr;
+ ms->iova = paddr;
+ ms->len = len;
+ /* Head insertions are generally faster than tail insertions as the
+ * buffers pinned are picked from rear end.
+ */
+ TAILQ_INSERT_HEAD(&rte_dpaa2_memsegs, ms, next);
+
+ return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
+ obj_cb, obj_cb_arg);
+}
+
struct rte_mempool_ops dpaa2_mpool_ops = {
.name = DPAA2_MEMPOOL_OPS_NAME,
.alloc = rte_hw_mbuf_create_pool,
@@ -360,6 +437,14 @@ struct rte_mempool_ops dpaa2_mpool_ops = {
.enqueue = rte_hw_mbuf_free_bulk,
.dequeue = rte_dpaa2_mbuf_alloc_bulk,
.get_count = rte_hw_mbuf_get_count,
+ .populate = dpaa2_populate,
};
MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops);
+
+RTE_INIT(dpaa2_mempool_init_log)
+{
+ dpaa2_logtype_mempool = rte_log_register("mempool.dpaa2");
+ if (dpaa2_logtype_mempool >= 0)
+ rte_log_set_level(dpaa2_logtype_mempool, RTE_LOG_NOTICE);
+}
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h b/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h
new file mode 100644
index 00000000..c79b3d1c
--- /dev/null
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h
@@ -0,0 +1,38 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#ifndef _DPAA2_HW_MEMPOOL_LOGS_H_
+#define _DPAA2_HW_MEMPOOL_LOGS_H_
+
+extern int dpaa2_logtype_mempool;
+
+#define DPAA2_MEMPOOL_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_mempool, \
+ "mempool/dpaa2: " fmt "\n", ##args)
+
+/* Debug logs are with Function names */
+#define DPAA2_MEMPOOL_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_mempool, \
+ "mempool/dpaa2: %s(): " fmt "\n", __func__, ##args)
+
+#define DPAA2_MEMPOOL_INFO(fmt, args...) \
+ DPAA2_MEMPOOL_LOG(INFO, fmt, ## args)
+#define DPAA2_MEMPOOL_ERR(fmt, args...) \
+ DPAA2_MEMPOOL_LOG(ERR, fmt, ## args)
+#define DPAA2_MEMPOOL_WARN(fmt, args...) \
+ DPAA2_MEMPOOL_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_MEMPOOL_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_MEMPOOL_DP_DEBUG(fmt, args...) \
+ DPAA2_MEMPOOL_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_MEMPOOL_DP_INFO(fmt, args...) \
+ DPAA2_MEMPOOL_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_MEMPOOL_DP_WARN(fmt, args...) \
+ DPAA2_MEMPOOL_DP_LOG(WARNING, fmt, ## args)
+
+#endif /* _DPAA2_HW_MEMPOOL_LOGS_H_ */
diff --git a/drivers/mempool/dpaa2/meson.build b/drivers/mempool/dpaa2/meson.build
new file mode 100644
index 00000000..90bab606
--- /dev/null
+++ b/drivers/mempool/dpaa2/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['bus_fslmc']
+sources = files('dpaa2_hw_mempool.c')
+
+# depends on fslmc bus which uses experimental API
+allow_experimental_apis = true
diff --git a/drivers/mempool/dpaa2/rte_dpaa2_mempool.h b/drivers/mempool/dpaa2/rte_dpaa2_mempool.h
new file mode 100644
index 00000000..4a22b7c4
--- /dev/null
+++ b/drivers/mempool/dpaa2/rte_dpaa2_mempool.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __RTE_DPAA2_MEMPOOL_H__
+#define __RTE_DPAA2_MEMPOOL_H__
+
+/**
+ * @file
+ *
+ * NXP specific mempool related functions.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_mempool.h>
+
+/**
+ * Get BPID corresponding to the packet pool
+ *
+ * @param mp
+ * memory pool
+ *
+ * @return
+ * BPID of the buffer pool
+ */
+uint16_t
+rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp);
+
+/**
+ * Get MBUF from the corresponding 'buf_addr'
+ *
+ * @param mp
+ * memory pool
+ * @param buf_addr
+ * The 'buf_addr' of the mbuf. This is the start buffer address
+ * of the packet buffer (mbuf).
+ *
+ * @return
+ * - MBUF pointer for success
+ * - NULL in case of error
+ */
+struct rte_mbuf *
+rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_DPAA2_MEMPOOL_H__ */
diff --git a/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map b/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
index a8aa685c..b9d996a6 100644
--- a/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
+++ b/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
@@ -3,6 +3,15 @@ DPDK_17.05 {
rte_dpaa2_bpid_info;
rte_dpaa2_mbuf_alloc_bulk;
+ rte_dpaa2_memsegs;
local: *;
};
+
+DPDK_18.05 {
+ global:
+
+ rte_dpaa2_mbuf_from_buf_addr;
+ rte_dpaa2_mbuf_pool_bpid;
+
+} DPDK_17.05;
diff --git a/drivers/mempool/meson.build b/drivers/mempool/meson.build
index 59918560..4527d980 100644
--- a/drivers/mempool/meson.build
+++ b/drivers/mempool/meson.build
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['ring', 'stack', 'octeontx']
+drivers = ['bucket', 'dpaa', 'dpaa2', 'octeontx', 'ring', 'stack']
std_deps = ['mempool']
config_flag_fmt = 'RTE_LIBRTE_@0@_MEMPOOL'
driver_name_fmt = 'rte_mempool_@0@'
diff --git a/drivers/mempool/octeontx/Makefile b/drivers/mempool/octeontx/Makefile
index dfc373e6..a3e1dce8 100644
--- a/drivers/mempool/octeontx/Makefile
+++ b/drivers/mempool/octeontx/Makefile
@@ -10,6 +10,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_mempool_octeontx.a
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/
EXPORT_MAP := rte_mempool_octeontx_version.map
LIBABIVER := 1
@@ -17,8 +18,6 @@ LIBABIVER := 1
#
# all source are stored in SRCS-y
#
-SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_ssovf.c
-SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_mbox.c
SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_fpavf.c
SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += rte_mempool_octeontx.c
@@ -36,6 +35,6 @@ CFLAGS_rte_mempool_octeontx.o += -Ofast
endif
LDLIBS += -lrte_eal -lrte_mempool -lrte_ring -lrte_mbuf
-LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_bus_pci -lrte_common_octeontx
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/mempool/octeontx/meson.build b/drivers/mempool/octeontx/meson.build
index 1e894a56..3baaf7db 100644
--- a/drivers/mempool/octeontx/meson.build
+++ b/drivers/mempool/octeontx/meson.build
@@ -1,10 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Cavium, Inc
-sources = files('octeontx_ssovf.c',
- 'octeontx_mbox.c',
- 'octeontx_fpavf.c',
+sources = files('octeontx_fpavf.c',
'rte_mempool_octeontx.c'
)
-deps += ['mbuf', 'bus_pci']
+deps += ['mbuf', 'bus_pci', 'common_octeontx']
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c
index 61c72c7c..4cf387e8 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.c
+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -108,17 +108,11 @@ static struct octeontx_fpadev fpadev;
int octeontx_logtype_fpavf;
int octeontx_logtype_fpavf_mbox;
-RTE_INIT(otx_pool_init_log);
-static void
-otx_pool_init_log(void)
+RTE_INIT(otx_pool_init_log)
{
octeontx_logtype_fpavf = rte_log_register("pmd.mempool.octeontx");
if (octeontx_logtype_fpavf >= 0)
rte_log_set_level(octeontx_logtype_fpavf, RTE_LOG_NOTICE);
-
- octeontx_logtype_fpavf_mbox = rte_log_register("pmd.mempool.octeontx.mbox");
- if (octeontx_logtype_fpavf_mbox >= 0)
- rte_log_set_level(octeontx_logtype_fpavf_mbox, RTE_LOG_NOTICE);
}
/* lock is taken by caller */
@@ -247,13 +241,13 @@ octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN |
POOL_ENA;
- cfg.aid = 0;
+ cfg.aid = FPA_AURA_IDX(gpool);
cfg.pool_cfg = reg;
cfg.pool_stack_base = phys_addr;
cfg.pool_stack_end = phys_addr + memsz;
cfg.aura_cfg = (1 << 9);
- ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
+ ret = octeontx_mbox_send(&hdr, &cfg,
sizeof(struct octeontx_mbox_fpa_cfg),
&resp, sizeof(resp));
if (ret < 0) {
@@ -298,7 +292,7 @@ octeontx_fpapf_pool_destroy(unsigned int gpool_index)
cfg.pool_stack_end = 0;
cfg.aura_cfg = 0;
- ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
+ ret = octeontx_mbox_send(&hdr, &cfg,
sizeof(struct octeontx_mbox_fpa_cfg),
&resp, sizeof(resp));
if (ret < 0) {
@@ -331,15 +325,16 @@ octeontx_fpapf_aura_attach(unsigned int gpool_index)
hdr.vfid = gpool_index;
hdr.res_code = 0;
memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg));
- cfg.aid = gpool_index; /* gpool is guara */
+ cfg.aid = FPA_AURA_IDX(gpool_index);
- ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
+ ret = octeontx_mbox_send(&hdr, &cfg,
sizeof(struct octeontx_mbox_fpa_cfg),
&resp, sizeof(resp));
if (ret < 0) {
fpavf_log_err("Could not attach fpa ");
fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n",
- gpool_index, gpool_index, ret, hdr.res_code);
+ FPA_AURA_IDX(gpool_index), gpool_index, ret,
+ hdr.res_code);
ret = -EACCES;
goto err;
}
@@ -359,14 +354,15 @@ octeontx_fpapf_aura_detach(unsigned int gpool_index)
goto err;
}
- cfg.aid = gpool_index; /* gpool is gaura */
+ cfg.aid = FPA_AURA_IDX(gpool_index);
hdr.coproc = FPA_COPROC;
hdr.msg = FPA_DETACHAURA;
hdr.vfid = gpool_index;
- ret = octeontx_ssovf_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
+ ret = octeontx_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
if (ret < 0) {
fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n",
- gpool_index, ret, hdr.res_code);
+ FPA_AURA_IDX(gpool_index), ret,
+ hdr.res_code);
ret = -EINVAL;
}
@@ -410,7 +406,7 @@ octeontx_fpapf_start_count(uint16_t gpool_index)
hdr.coproc = FPA_COPROC;
hdr.msg = FPA_START_COUNT;
hdr.vfid = gpool_index;
- ret = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ ret = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
if (ret < 0) {
fpavf_log_err("Could not start buffer counting for ");
fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n",
@@ -473,6 +469,7 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle)
{
uint64_t cnt, limit, avail;
uint8_t gpool;
+ uint16_t gaura;
uintptr_t pool_bar;
if (unlikely(!octeontx_fpa_handle_valid(handle)))
@@ -480,14 +477,16 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle)
/* get the gpool */
gpool = octeontx_fpa_bufpool_gpool(handle);
+ /* get the aura */
+ gaura = octeontx_fpa_bufpool_gaura(handle);
/* Get pool bar address from handle */
pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT(gpool)));
+ FPA_VF_VHAURA_CNT(gaura)));
limit = fpavf_read64((void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
avail = fpavf_read64((void *)((uintptr_t)pool_bar +
FPA_VF_VHPOOL_AVAILABLE(gpool)));
@@ -500,6 +499,7 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
unsigned int buf_offset, int node_id)
{
unsigned int gpool;
+ unsigned int gaura;
uintptr_t gpool_handle;
uintptr_t pool_bar;
int res;
@@ -549,16 +549,18 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
goto error_pool_destroy;
}
+ gaura = FPA_AURA_IDX(gpool);
+
/* Release lock */
rte_spinlock_unlock(&fpadev.lock);
/* populate AURA registers */
fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT(gpool)));
+ FPA_VF_VHAURA_CNT(gaura)));
fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+ FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
octeontx_fpapf_start_count(gpool);
@@ -585,6 +587,7 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
uint64_t sz;
uint64_t cnt, avail;
uint8_t gpool;
+ uint16_t gaura;
uintptr_t pool_bar;
int ret;
@@ -598,13 +601,15 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
/* get the pool */
gpool = octeontx_fpa_bufpool_gpool(handle);
+ /* get the aura */
+ gaura = octeontx_fpa_bufpool_gaura(handle);
/* Get pool bar address from handle */
pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
/* Check for no outstanding buffers */
cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT(gpool)));
+ FPA_VF_VHAURA_CNT(gaura)));
if (cnt) {
fpavf_log_dbg("buffer exist in pool cnt %" PRId64 "\n", cnt);
return -EBUSY;
@@ -617,9 +622,9 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
/* Prepare to empty the entire POOL */
fpavf_write64(avail, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+ FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
/* Empty the pool */
/* Invalidate the POOL */
@@ -631,11 +636,11 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
/* Yank a buffer from the pool */
node = (void *)(uintptr_t)
fpavf_read64((void *)
- (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gpool)));
+ (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gaura)));
if (node == NULL) {
fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n",
- gpool, avail);
+ gaura, avail);
break;
}
@@ -669,9 +674,9 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
/* Deactivate the AURA */
fpavf_write64(0, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
fpavf_write64(0, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+ FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
ret = octeontx_fpapf_aura_detach(gpool);
if (ret) {
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.h b/drivers/mempool/octeontx/octeontx_fpavf.h
index b76f40e7..b00be137 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.h
+++ b/drivers/mempool/octeontx/octeontx_fpavf.h
@@ -14,6 +14,7 @@
#define FPA_VF_MAX 32
#define FPA_GPOOL_MASK (FPA_VF_MAX-1)
+#define FPA_GAURA_SHIFT 4
/* FPA VF register offsets */
#define FPA_VF_INT(x) (0x200ULL | ((x) << 22))
@@ -36,6 +37,7 @@
#define FPA_VF_FREE_ADDRS_S(x, y, z) \
((x) | (((y) & 0x1ff) << 3) | ((((z) & 1)) << 14))
+#define FPA_AURA_IDX(gpool) (gpool << FPA_GAURA_SHIFT)
/* FPA VF register offsets from VF_BAR4, size 2 MByte */
#define FPA_VF_MSIX_VEC_ADDR 0x00000
#define FPA_VF_MSIX_VEC_CTL 0x00008
@@ -102,4 +104,11 @@ octeontx_fpa_bufpool_gpool(uintptr_t handle)
{
return (uint8_t)handle & FPA_GPOOL_MASK;
}
+
+static __rte_always_inline uint16_t
+octeontx_fpa_bufpool_gaura(uintptr_t handle)
+{
+ return octeontx_fpa_bufpool_gpool(handle) << FPA_GAURA_SHIFT;
+}
+
#endif /* __OCTEONTX_FPAVF_H__ */
diff --git a/drivers/mempool/octeontx/octeontx_mbox.c b/drivers/mempool/octeontx/octeontx_mbox.c
deleted file mode 100644
index f8cb6a45..00000000
--- a/drivers/mempool/octeontx/octeontx_mbox.c
+++ /dev/null
@@ -1,214 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Cavium, Inc
- */
-
-#include <string.h>
-
-#include <rte_atomic.h>
-#include <rte_common.h>
-#include <rte_cycles.h>
-#include <rte_io.h>
-#include <rte_spinlock.h>
-
-#include "octeontx_mbox.h"
-#include "octeontx_pool_logs.h"
-
-/* Mbox operation timeout in seconds */
-#define MBOX_WAIT_TIME_SEC 3
-#define MAX_RAM_MBOX_LEN ((SSOW_BAR4_LEN >> 1) - 8 /* Mbox header */)
-
-/* Mbox channel state */
-enum {
- MBOX_CHAN_STATE_REQ = 1,
- MBOX_CHAN_STATE_RES = 0,
-};
-
-/* Response messages */
-enum {
- MBOX_RET_SUCCESS,
- MBOX_RET_INVALID,
- MBOX_RET_INTERNAL_ERR,
-};
-
-struct mbox {
- int init_once;
- uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */
- uint8_t *reg; /* Store to this register triggers PF mbox interrupt */
- uint16_t tag_own; /* Last tag which was written to own channel */
- rte_spinlock_t lock;
-};
-
-static struct mbox octeontx_mbox;
-
-/*
- * Structure used for mbox synchronization
- * This structure sits at the begin of Mbox RAM and used as main
- * synchronization point for channel communication
- */
-struct mbox_ram_hdr {
- union {
- uint64_t u64;
- struct {
- uint8_t chan_state : 1;
- uint8_t coproc : 7;
- uint8_t msg;
- uint8_t vfid;
- uint8_t res_code;
- uint16_t tag;
- uint16_t len;
- };
- };
-};
-
-static inline void
-mbox_msgcpy(volatile uint8_t *d, volatile const uint8_t *s, uint16_t size)
-{
- uint16_t i;
-
- for (i = 0; i < size; i++)
- d[i] = s[i];
-}
-
-static inline void
-mbox_send_request(struct mbox *m, struct octeontx_mbox_hdr *hdr,
- const void *txmsg, uint16_t txsize)
-{
- struct mbox_ram_hdr old_hdr;
- struct mbox_ram_hdr new_hdr = { {0} };
- uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
- uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
-
- /*
- * Initialize the channel with the tag left by last send.
- * On success full mbox send complete, PF increments the tag by one.
- * The sender can validate integrity of PF message with this scheme
- */
- old_hdr.u64 = rte_read64(ram_mbox_hdr);
- m->tag_own = (old_hdr.tag + 2) & (~0x1ul); /* next even number */
-
- /* Copy msg body */
- if (txmsg)
- mbox_msgcpy(ram_mbox_msg, txmsg, txsize);
-
- /* Prepare new hdr */
- new_hdr.chan_state = MBOX_CHAN_STATE_REQ;
- new_hdr.coproc = hdr->coproc;
- new_hdr.msg = hdr->msg;
- new_hdr.vfid = hdr->vfid;
- new_hdr.tag = m->tag_own;
- new_hdr.len = txsize;
-
- /* Write the msg header */
- rte_write64(new_hdr.u64, ram_mbox_hdr);
- rte_smp_wmb();
- /* Notify PF about the new msg - write to MBOX reg generates PF IRQ */
- rte_write64(0, m->reg);
-}
-
-static inline int
-mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr,
- void *rxmsg, uint16_t rxsize)
-{
- int res = 0, wait;
- uint16_t len;
- struct mbox_ram_hdr rx_hdr;
- uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
- uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
-
- /* Wait for response */
- wait = MBOX_WAIT_TIME_SEC * 1000 * 10;
- while (wait > 0) {
- rte_delay_us(100);
- rx_hdr.u64 = rte_read64(ram_mbox_hdr);
- if (rx_hdr.chan_state == MBOX_CHAN_STATE_RES)
- break;
- --wait;
- }
-
- hdr->res_code = rx_hdr.res_code;
- m->tag_own++;
-
- /* Timeout */
- if (wait <= 0) {
- res = -ETIMEDOUT;
- goto error;
- }
-
- /* Tag mismatch */
- if (m->tag_own != rx_hdr.tag) {
- res = -EINVAL;
- goto error;
- }
-
- /* PF nacked the msg */
- if (rx_hdr.res_code != MBOX_RET_SUCCESS) {
- res = -EBADMSG;
- goto error;
- }
-
- len = RTE_MIN(rx_hdr.len, rxsize);
- if (rxmsg)
- mbox_msgcpy(rxmsg, ram_mbox_msg, len);
-
- return len;
-
-error:
- mbox_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)",
- m->tag_own, rx_hdr.tag, hdr->coproc, hdr->msg, res,
- hdr->res_code);
- return res;
-}
-
-static inline int
-mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg,
- uint16_t txsize, void *rxmsg, uint16_t rxsize)
-{
- int res = -EINVAL;
-
- if (m->init_once == 0 || hdr == NULL ||
- txsize > MAX_RAM_MBOX_LEN || rxsize > MAX_RAM_MBOX_LEN) {
- mbox_log_err("Invalid init_once=%d hdr=%p txsz=%d rxsz=%d",
- m->init_once, hdr, txsize, rxsize);
- return res;
- }
-
- rte_spinlock_lock(&m->lock);
-
- mbox_send_request(m, hdr, txmsg, txsize);
- res = mbox_wait_response(m, hdr, rxmsg, rxsize);
-
- rte_spinlock_unlock(&m->lock);
- return res;
-}
-
-static inline int
-mbox_setup(struct mbox *m)
-{
- if (unlikely(m->init_once == 0)) {
- rte_spinlock_init(&m->lock);
- m->ram_mbox_base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, 0, 4);
- m->reg = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, 0, 0);
- m->reg += SSO_VHGRP_PF_MBOX(1);
-
- if (m->ram_mbox_base == NULL || m->reg == NULL) {
- mbox_log_err("Invalid ram_mbox_base=%p or reg=%p",
- m->ram_mbox_base, m->reg);
- return -EINVAL;
- }
- m->init_once = 1;
- }
- return 0;
-}
-
-int
-octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata,
- uint16_t txlen, void *rxdata, uint16_t rxlen)
-{
- struct mbox *m = &octeontx_mbox;
-
- RTE_BUILD_BUG_ON(sizeof(struct mbox_ram_hdr) != 8);
- if (rte_eal_process_type() != RTE_PROC_PRIMARY || mbox_setup(m))
- return -EINVAL;
-
- return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen);
-}
diff --git a/drivers/mempool/octeontx/octeontx_mbox.h b/drivers/mempool/octeontx/octeontx_mbox.h
deleted file mode 100644
index 1b056071..00000000
--- a/drivers/mempool/octeontx/octeontx_mbox.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Cavium, Inc
- */
-
-#ifndef __OCTEONTX_MBOX_H__
-#define __OCTEONTX_MBOX_H__
-
-#include <rte_common.h>
-
-#define SSOW_BAR4_LEN (64 * 1024)
-#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3))
-
-struct octeontx_ssovf_info {
- uint16_t domain; /* Domain id */
- uint8_t total_ssovfs; /* Total sso groups available in domain */
- uint8_t total_ssowvfs;/* Total sso hws available in domain */
-};
-
-enum octeontx_ssovf_type {
- OCTEONTX_SSO_GROUP, /* SSO group vf */
- OCTEONTX_SSO_HWS, /* SSO hardware workslot vf */
-};
-
-struct octeontx_mbox_hdr {
- uint16_t vfid; /* VF index or pf resource index local to the domain */
- uint8_t coproc; /* Coprocessor id */
- uint8_t msg; /* Message id */
- uint8_t res_code; /* Functional layer response code */
-};
-
-int octeontx_ssovf_info(struct octeontx_ssovf_info *info);
-void *octeontx_ssovf_bar(enum octeontx_ssovf_type, uint8_t id, uint8_t bar);
-int octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr,
- void *txdata, uint16_t txlen, void *rxdata, uint16_t rxlen);
-
-#endif /* __OCTEONTX_MBOX_H__ */
diff --git a/drivers/mempool/octeontx/octeontx_pool_logs.h b/drivers/mempool/octeontx/octeontx_pool_logs.h
index 95865192..7b4e1b38 100644
--- a/drivers/mempool/octeontx/octeontx_pool_logs.h
+++ b/drivers/mempool/octeontx/octeontx_pool_logs.h
@@ -11,21 +11,12 @@
rte_log(RTE_LOG_ ## level, octeontx_logtype_fpavf,\
"%s() line %u: " fmt "\n", __func__, __LINE__, ## args)
-#define MBOX_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, octeontx_logtype_fpavf_mbox,\
- "%s() line %u: " fmt "\n", __func__, __LINE__, ## args)
-
#define fpavf_log_info(fmt, ...) FPAVF_LOG(INFO, fmt, ##__VA_ARGS__)
#define fpavf_log_dbg(fmt, ...) FPAVF_LOG(DEBUG, fmt, ##__VA_ARGS__)
#define fpavf_log_err(fmt, ...) FPAVF_LOG(ERR, fmt, ##__VA_ARGS__)
#define fpavf_func_trace fpavf_log_dbg
-#define mbox_log_info(fmt, ...) MBOX_LOG(INFO, fmt, ##__VA_ARGS__)
-#define mbox_log_dbg(fmt, ...) MBOX_LOG(DEBUG, fmt, ##__VA_ARGS__)
-#define mbox_log_err(fmt, ...) MBOX_LOG(ERR, fmt, ##__VA_ARGS__)
-#define mbox_func_trace mbox_log_dbg
extern int octeontx_logtype_fpavf;
-extern int octeontx_logtype_fpavf_mbox;
#endif /* __OCTEONTX_POOL_LOGS_H__*/
diff --git a/drivers/mempool/octeontx/octeontx_ssovf.c b/drivers/mempool/octeontx/octeontx_ssovf.c
deleted file mode 100644
index 97b24066..00000000
--- a/drivers/mempool/octeontx/octeontx_ssovf.c
+++ /dev/null
@@ -1,271 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Cavium, Inc
- */
-
-#include <rte_atomic.h>
-#include <rte_common.h>
-#include <rte_eal.h>
-#include <rte_io.h>
-#include <rte_pci.h>
-#include <rte_bus_pci.h>
-
-#include "octeontx_mbox.h"
-#include "octeontx_pool_logs.h"
-
-#define PCI_VENDOR_ID_CAVIUM 0x177D
-#define PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF 0xA04B
-#define PCI_DEVICE_ID_OCTEONTX_SSOWS_VF 0xA04D
-
-#define SSO_MAX_VHGRP (64)
-#define SSO_MAX_VHWS (32)
-
-#define SSO_VHGRP_AQ_THR (0x1E0ULL)
-
-struct ssovf_res {
- uint16_t domain;
- uint16_t vfid;
- void *bar0;
- void *bar2;
-};
-
-struct ssowvf_res {
- uint16_t domain;
- uint16_t vfid;
- void *bar0;
- void *bar2;
- void *bar4;
-};
-
-struct ssowvf_identify {
- uint16_t domain;
- uint16_t vfid;
-};
-
-struct ssodev {
- uint8_t total_ssovfs;
- uint8_t total_ssowvfs;
- struct ssovf_res grp[SSO_MAX_VHGRP];
- struct ssowvf_res hws[SSO_MAX_VHWS];
-};
-
-static struct ssodev sdev;
-
-/* Interface functions */
-int
-octeontx_ssovf_info(struct octeontx_ssovf_info *info)
-{
- uint8_t i;
- uint16_t domain;
-
- if (rte_eal_process_type() != RTE_PROC_PRIMARY || info == NULL)
- return -EINVAL;
-
- if (sdev.total_ssovfs == 0 || sdev.total_ssowvfs == 0)
- return -ENODEV;
-
- domain = sdev.grp[0].domain;
- for (i = 0; i < sdev.total_ssovfs; i++) {
- /* Check vfid's are contiguous and belong to same domain */
- if (sdev.grp[i].vfid != i ||
- sdev.grp[i].bar0 == NULL ||
- sdev.grp[i].domain != domain) {
- mbox_log_err("GRP error, vfid=%d/%d domain=%d/%d %p",
- i, sdev.grp[i].vfid,
- domain, sdev.grp[i].domain,
- sdev.grp[i].bar0);
- return -EINVAL;
- }
- }
-
- for (i = 0; i < sdev.total_ssowvfs; i++) {
- /* Check vfid's are contiguous and belong to same domain */
- if (sdev.hws[i].vfid != i ||
- sdev.hws[i].bar0 == NULL ||
- sdev.hws[i].domain != domain) {
- mbox_log_err("HWS error, vfid=%d/%d domain=%d/%d %p",
- i, sdev.hws[i].vfid,
- domain, sdev.hws[i].domain,
- sdev.hws[i].bar0);
- return -EINVAL;
- }
- }
-
- info->domain = domain;
- info->total_ssovfs = sdev.total_ssovfs;
- info->total_ssowvfs = sdev.total_ssowvfs;
- return 0;
-}
-
-void*
-octeontx_ssovf_bar(enum octeontx_ssovf_type type, uint8_t id, uint8_t bar)
-{
- if (rte_eal_process_type() != RTE_PROC_PRIMARY ||
- type > OCTEONTX_SSO_HWS)
- return NULL;
-
- if (type == OCTEONTX_SSO_GROUP) {
- if (id >= sdev.total_ssovfs)
- return NULL;
- } else {
- if (id >= sdev.total_ssowvfs)
- return NULL;
- }
-
- if (type == OCTEONTX_SSO_GROUP) {
- switch (bar) {
- case 0:
- return sdev.grp[id].bar0;
- case 2:
- return sdev.grp[id].bar2;
- default:
- return NULL;
- }
- } else {
- switch (bar) {
- case 0:
- return sdev.hws[id].bar0;
- case 2:
- return sdev.hws[id].bar2;
- case 4:
- return sdev.hws[id].bar4;
- default:
- return NULL;
- }
- }
-}
-
-/* SSOWVF pcie device aka event port probe */
-
-static int
-ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
-{
- uint16_t vfid;
- struct ssowvf_res *res;
- struct ssowvf_identify *id;
-
- RTE_SET_USED(pci_drv);
-
- /* For secondary processes, the primary has done all the work */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return 0;
-
- if (pci_dev->mem_resource[0].addr == NULL ||
- pci_dev->mem_resource[2].addr == NULL ||
- pci_dev->mem_resource[4].addr == NULL) {
- mbox_log_err("Empty bars %p %p %p",
- pci_dev->mem_resource[0].addr,
- pci_dev->mem_resource[2].addr,
- pci_dev->mem_resource[4].addr);
- return -ENODEV;
- }
-
- if (pci_dev->mem_resource[4].len != SSOW_BAR4_LEN) {
- mbox_log_err("Bar4 len mismatch %d != %d",
- SSOW_BAR4_LEN, (int)pci_dev->mem_resource[4].len);
- return -EINVAL;
- }
-
- id = pci_dev->mem_resource[4].addr;
- vfid = id->vfid;
- if (vfid >= SSO_MAX_VHWS) {
- mbox_log_err("Invalid vfid(%d/%d)", vfid, SSO_MAX_VHWS);
- return -EINVAL;
- }
-
- res = &sdev.hws[vfid];
- res->vfid = vfid;
- res->bar0 = pci_dev->mem_resource[0].addr;
- res->bar2 = pci_dev->mem_resource[2].addr;
- res->bar4 = pci_dev->mem_resource[4].addr;
- res->domain = id->domain;
-
- sdev.total_ssowvfs++;
- rte_wmb();
- mbox_log_dbg("Domain=%d hws=%d total_ssowvfs=%d", res->domain,
- res->vfid, sdev.total_ssowvfs);
- return 0;
-}
-
-static const struct rte_pci_id pci_ssowvf_map[] = {
- {
- RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
- PCI_DEVICE_ID_OCTEONTX_SSOWS_VF)
- },
- {
- .vendor_id = 0,
- },
-};
-
-static struct rte_pci_driver pci_ssowvf = {
- .id_table = pci_ssowvf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
- .probe = ssowvf_probe,
-};
-
-RTE_PMD_REGISTER_PCI(octeontx_ssowvf, pci_ssowvf);
-
-/* SSOVF pcie device aka event queue probe */
-
-static int
-ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
-{
- uint64_t val;
- uint16_t vfid;
- uint8_t *idreg;
- struct ssovf_res *res;
-
- RTE_SET_USED(pci_drv);
-
- /* For secondary processes, the primary has done all the work */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return 0;
-
- if (pci_dev->mem_resource[0].addr == NULL ||
- pci_dev->mem_resource[2].addr == NULL) {
- mbox_log_err("Empty bars %p %p",
- pci_dev->mem_resource[0].addr,
- pci_dev->mem_resource[2].addr);
- return -ENODEV;
- }
- idreg = pci_dev->mem_resource[0].addr;
- idreg += SSO_VHGRP_AQ_THR;
- val = rte_read64(idreg);
-
- /* Write back the default value of aq_thr */
- rte_write64((1ULL << 33) - 1, idreg);
- vfid = (val >> 16) & 0xffff;
- if (vfid >= SSO_MAX_VHGRP) {
- mbox_log_err("Invalid vfid (%d/%d)", vfid, SSO_MAX_VHGRP);
- return -EINVAL;
- }
-
- res = &sdev.grp[vfid];
- res->vfid = vfid;
- res->bar0 = pci_dev->mem_resource[0].addr;
- res->bar2 = pci_dev->mem_resource[2].addr;
- res->domain = val & 0xffff;
-
- sdev.total_ssovfs++;
- rte_wmb();
- mbox_log_dbg("Domain=%d group=%d total_ssovfs=%d", res->domain,
- res->vfid, sdev.total_ssovfs);
- return 0;
-}
-
-static const struct rte_pci_id pci_ssovf_map[] = {
- {
- RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
- PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF)
- },
- {
- .vendor_id = 0,
- },
-};
-
-static struct rte_pci_driver pci_ssovf = {
- .id_table = pci_ssovf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
- .probe = ssovf_probe,
-};
-
-RTE_PMD_REGISTER_PCI(octeontx_ssovf, pci_ssovf);
diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx.c b/drivers/mempool/octeontx/rte_mempool_octeontx.c
index d143d05c..ab94dfe9 100644
--- a/drivers/mempool/octeontx/rte_mempool_octeontx.c
+++ b/drivers/mempool/octeontx/rte_mempool_octeontx.c
@@ -126,28 +126,66 @@ octeontx_fpavf_get_count(const struct rte_mempool *mp)
return octeontx_fpa_bufpool_free_count(pool);
}
-static int
-octeontx_fpavf_get_capabilities(const struct rte_mempool *mp,
- unsigned int *flags)
+static ssize_t
+octeontx_fpavf_calc_mem_size(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align)
{
- RTE_SET_USED(mp);
- *flags |= (MEMPOOL_F_CAPA_PHYS_CONTIG |
- MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS);
- return 0;
+ ssize_t mem_size;
+
+ /*
+ * Simply need space for one more object to be able to
+ * fulfil alignment requirements.
+ */
+ mem_size = rte_mempool_op_calc_mem_size_default(mp, obj_num + 1,
+ pg_shift,
+ min_chunk_size, align);
+ if (mem_size >= 0) {
+ /*
+ * Memory area which contains objects must be physically
+ * contiguous.
+ */
+ *min_chunk_size = mem_size;
+ }
+
+ return mem_size;
}
static int
-octeontx_fpavf_register_memory_area(const struct rte_mempool *mp,
- char *vaddr, rte_iova_t paddr, size_t len)
+octeontx_fpavf_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
{
- RTE_SET_USED(paddr);
+ size_t total_elt_sz;
+ size_t off;
uint8_t gpool;
uintptr_t pool_bar;
+ int ret;
+
+ if (iova == RTE_BAD_IOVA)
+ return -EINVAL;
+
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+
+ /* align object start address to a multiple of total_elt_sz */
+ off = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz);
+
+ if (len < off)
+ return -EINVAL;
+
+ vaddr = (char *)vaddr + off;
+ iova += off;
+ len -= off;
gpool = octeontx_fpa_bufpool_gpool(mp->pool_id);
pool_bar = mp->pool_id & ~(uint64_t)FPA_GPOOL_MASK;
- return octeontx_fpavf_pool_set_range(pool_bar, len, vaddr, gpool);
+ ret = octeontx_fpavf_pool_set_range(pool_bar, len, vaddr, gpool);
+ if (ret < 0)
+ return ret;
+
+ return rte_mempool_op_populate_default(mp, max_objs, vaddr, iova, len,
+ obj_cb, obj_cb_arg);
}
static struct rte_mempool_ops octeontx_fpavf_ops = {
@@ -157,8 +195,8 @@ static struct rte_mempool_ops octeontx_fpavf_ops = {
.enqueue = octeontx_fpavf_enqueue,
.dequeue = octeontx_fpavf_dequeue,
.get_count = octeontx_fpavf_get_count,
- .get_capabilities = octeontx_fpavf_get_capabilities,
- .register_memory_area = octeontx_fpavf_register_memory_area,
+ .calc_mem_size = octeontx_fpavf_calc_mem_size,
+ .populate = octeontx_fpavf_populate,
};
MEMPOOL_REGISTER_OPS(octeontx_fpavf_ops);
diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx_version.map b/drivers/mempool/octeontx/rte_mempool_octeontx_version.map
index fe8cdeca..a7530317 100644
--- a/drivers/mempool/octeontx/rte_mempool_octeontx_version.map
+++ b/drivers/mempool/octeontx/rte_mempool_octeontx_version.map
@@ -1,9 +1,3 @@
DPDK_17.11 {
- global:
-
- octeontx_ssovf_info;
- octeontx_ssovf_bar;
- octeontx_ssovf_mbox_send;
-
local: *;
};