aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mempool/dpaa/dpaa_mempool.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mempool/dpaa/dpaa_mempool.c')
-rw-r--r--drivers/mempool/dpaa/dpaa_mempool.c54
1 files changed, 43 insertions, 11 deletions
diff --git a/drivers/mempool/dpaa/dpaa_mempool.c b/drivers/mempool/dpaa/dpaa_mempool.c
index fb3b6ba0..10c536bf 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.c
+++ b/drivers/mempool/dpaa/dpaa_mempool.c
@@ -27,6 +27,13 @@
#include <dpaa_mempool.h>
+/* List of all the memseg information locally maintained in dpaa driver. This
+ * is to optimize the PA_to_VA searches until a better mechanism (algo) is
+ * available.
+ */
+struct dpaa_memseg_list rte_dpaa_memsegs
+ = TAILQ_HEAD_INITIALIZER(rte_dpaa_memsegs);
+
struct dpaa_bp_info rte_dpaa_bpid_info[DPAA_MAX_BPOOLS];
static int
@@ -115,7 +122,8 @@ dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr)
struct bm_buffer buf;
int ret;
- DPAA_MEMPOOL_DEBUG("Free 0x%lx to bpid: %d", addr, bp_info->bpid);
+ DPAA_MEMPOOL_DEBUG("Free 0x%" PRIx64 " to bpid: %d",
+ addr, bp_info->bpid);
bm_buffer_set64(&buf, addr);
retry:
@@ -154,8 +162,7 @@ dpaa_mbuf_free_bulk(struct rte_mempool *pool,
if (unlikely(!bp_info->ptov_off)) {
/* buffers are from single mem segment */
if (bp_info->flags & DPAA_MPOOL_SINGLE_SEGMENT) {
- bp_info->ptov_off
- = (uint64_t)obj_table[i] - phy;
+ bp_info->ptov_off = (size_t)obj_table[i] - phy;
rte_dpaa_bpid_info[bp_info->bpid].ptov_off
= bp_info->ptov_off;
}
@@ -264,10 +271,9 @@ dpaa_mbuf_get_count(const struct rte_mempool *mp)
}
static int
-dpaa_register_memory_area(const struct rte_mempool *mp,
- char *vaddr __rte_unused,
- rte_iova_t paddr __rte_unused,
- size_t len)
+dpaa_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t paddr, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
{
struct dpaa_bp_info *bp_info;
unsigned int total_elt_sz;
@@ -282,14 +288,40 @@ dpaa_register_memory_area(const struct rte_mempool *mp,
bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
- DPAA_MEMPOOL_DEBUG("Req size %lu vs Available %u\n",
- len, total_elt_sz * mp->size);
+ DPAA_MEMPOOL_DEBUG("Req size %" PRIx64 " vs Available %u\n",
+ (uint64_t)len, total_elt_sz * mp->size);
/* Detect pool area has sufficient space for elements in this memzone */
if (len >= total_elt_sz * mp->size)
bp_info->flags |= DPAA_MPOOL_SINGLE_SEGMENT;
+ struct dpaa_memseg *ms;
+
+ /* For each memory chunk pinned to the Mempool, a linked list of the
+ * contained memsegs is created for searching when PA to VA
+ * conversion is required.
+ */
+ ms = rte_zmalloc(NULL, sizeof(struct dpaa_memseg), 0);
+ if (!ms) {
+ DPAA_MEMPOOL_ERR("Unable to allocate internal memory.");
+ DPAA_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
+ /* If the element is not added, it would only lead to failure
+ * in searching for the element and the logic would Fallback
+ * to traditional DPDK memseg traversal code. So, this is not
+ * a blocking error - but, error would be printed on screen.
+ */
+ return 0;
+ }
- return 0;
+ ms->vaddr = vaddr;
+ ms->iova = paddr;
+ ms->len = len;
+ /* Head insertions are generally faster than tail insertions as the
+ * buffers pinned are picked from rear end.
+ */
+ TAILQ_INSERT_HEAD(&rte_dpaa_memsegs, ms, next);
+
+ return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
+ obj_cb, obj_cb_arg);
}
struct rte_mempool_ops dpaa_mpool_ops = {
@@ -299,7 +331,7 @@ struct rte_mempool_ops dpaa_mpool_ops = {
.enqueue = dpaa_mbuf_free_bulk,
.dequeue = dpaa_mbuf_alloc_bulk,
.get_count = dpaa_mbuf_get_count,
- .register_memory_area = dpaa_register_memory_area,
+ .populate = dpaa_populate,
};
MEMPOOL_REGISTER_OPS(dpaa_mpool_ops);