aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mempool/dpaa2/dpaa2_hw_mempool.c')
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool.c139
1 files changed, 112 insertions, 27 deletions
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index 2bd62e88..7d0435f5 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -21,16 +21,28 @@
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
+#include "rte_dpaa2_mempool.h"
#include <fslmc_logs.h>
#include <mc/fsl_dpbp.h>
#include <portal/dpaa2_hw_pvt.h>
#include <portal/dpaa2_hw_dpio.h>
#include "dpaa2_hw_mempool.h"
+#include "dpaa2_hw_mempool_logs.h"
struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
static struct dpaa2_bp_list *h_bp_list;
+/* List of all the memseg information locally maintained in dpaa2 driver. This
+ * is to optimize the PA_to_VA searches until a better mechanism (algo) is
+ * available.
+ */
+struct dpaa2_memseg_list rte_dpaa2_memsegs
+ = TAILQ_HEAD_INITIALIZER(rte_dpaa2_memsegs);
+
+/* Dynamic logging identified for mempool */
+int dpaa2_logtype_mempool;
+
static int
rte_hw_mbuf_create_pool(struct rte_mempool *mp)
{
@@ -44,30 +56,30 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
avail_dpbp = dpaa2_alloc_dpbp_dev();
if (!avail_dpbp) {
- PMD_DRV_LOG(ERR, "DPAA2 resources not available");
+ DPAA2_MEMPOOL_ERR("DPAA2 pool not available!");
return -ENOENT;
}
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ DPAA2_MEMPOOL_ERR("Failure in affining portal");
goto err1;
}
}
ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
if (ret != 0) {
- PMD_INIT_LOG(ERR, "Resource enable failure with"
- " err code: %d\n", ret);
+ DPAA2_MEMPOOL_ERR("Resource enable failure with err code: %d",
+ ret);
goto err1;
}
ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW,
avail_dpbp->token, &dpbp_attr);
if (ret != 0) {
- PMD_INIT_LOG(ERR, "Resource read failure with"
- " err code: %d\n", ret);
+ DPAA2_MEMPOOL_ERR("Resource read failure with err code: %d",
+ ret);
goto err2;
}
@@ -75,7 +87,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
sizeof(struct dpaa2_bp_info),
RTE_CACHE_LINE_SIZE);
if (!bp_info) {
- PMD_INIT_LOG(ERR, "No heap memory available for bp_info");
+ DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
ret = -ENOMEM;
goto err2;
}
@@ -84,7 +96,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list),
RTE_CACHE_LINE_SIZE);
if (!bp_list) {
- PMD_INIT_LOG(ERR, "No heap memory available");
+ DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
ret = -ENOMEM;
goto err3;
}
@@ -112,7 +124,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
sizeof(struct dpaa2_bp_info));
mp->pool_data = (void *)bp_info;
- PMD_INIT_LOG(DEBUG, "BP List created for bpid =%d", dpbp_attr.bpid);
+ DPAA2_MEMPOOL_DEBUG("BP List created for bpid =%d", dpbp_attr.bpid);
h_bp_list = bp_list;
return 0;
@@ -134,7 +146,7 @@ rte_hw_mbuf_free_pool(struct rte_mempool *mp)
struct dpaa2_dpbp_dev *dpbp_node;
if (!mp->pool_data) {
- PMD_DRV_LOG(ERR, "Not a valid dpaa22 pool");
+ DPAA2_MEMPOOL_ERR("Not a valid dpaa2 buffer pool");
return;
}
@@ -180,7 +192,7 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret != 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n");
+ DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
return;
}
}
@@ -233,6 +245,35 @@ aligned:
}
}
+uint16_t
+rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp)
+{
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(mp);
+ if (!(bp_info->bp_list)) {
+ RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ return -ENOMEM;
+ }
+
+ return bp_info->bpid;
+}
+
+struct rte_mbuf *
+rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr)
+{
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(mp);
+ if (!(bp_info->bp_list)) {
+ RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ return NULL;
+ }
+
+ return (struct rte_mbuf *)((uint8_t *)buf_addr -
+ bp_info->meta_data_size);
+}
+
int
rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
void **obj_table, unsigned int count)
@@ -242,7 +283,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
#endif
struct qbman_swp *swp;
uint16_t bpid;
- uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
+ size_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
int i, ret;
unsigned int n = 0;
struct dpaa2_bp_info *bp_info;
@@ -250,7 +291,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
bp_info = mempool_to_bpinfo(pool);
if (!(bp_info->bp_list)) {
- RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
return -ENOENT;
}
@@ -259,7 +300,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret != 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n");
+ DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
return ret;
}
}
@@ -270,18 +311,18 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
* then the remainder.
*/
if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
- ret = qbman_swp_acquire(swp, bpid, bufs,
+ ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
DPAA2_MBUF_MAX_ACQ_REL);
} else {
- ret = qbman_swp_acquire(swp, bpid, bufs,
+ ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
count - n);
}
/* In case of less than requested number of buffers available
* in pool, qbman_swp_acquire returns 0
*/
if (ret <= 0) {
- PMD_TX_LOG(ERR, "Buffer acquire failed with"
- " err code: %d", ret);
+ DPAA2_MEMPOOL_ERR("Buffer acquire failed with"
+ " err code: %d", ret);
/* The API expect the exact number of requested bufs */
/* Releasing all buffers allocated */
rte_dpaa2_mbuf_release(pool, obj_table, bpid,
@@ -290,10 +331,11 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
}
/* assigning mbuf from the acquired objects */
for (i = 0; (i < ret) && bufs[i]; i++) {
- DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], uint64_t);
+ DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], size_t);
obj_table[n] = (struct rte_mbuf *)
(bufs[i] - bp_info->meta_data_size);
- PMD_TX_LOG(DEBUG, "Acquired %p address %p from BMAN",
+ DPAA2_MEMPOOL_DP_DEBUG(
+ "Acquired %p address %p from BMAN\n",
(void *)bufs[i], (void *)obj_table[n]);
n++;
}
@@ -301,8 +343,8 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
alloc += n;
- PMD_TX_LOG(DEBUG, "Total = %d , req = %d done = %d",
- alloc, count, n);
+ DPAA2_MEMPOOL_DP_DEBUG("Total = %d , req = %d done = %d\n",
+ alloc, count, n);
#endif
return 0;
}
@@ -315,7 +357,7 @@ rte_hw_mbuf_free_bulk(struct rte_mempool *pool,
bp_info = mempool_to_bpinfo(pool);
if (!(bp_info->bp_list)) {
- RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
return -ENOENT;
}
rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid,
@@ -333,7 +375,7 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp)
struct dpaa2_dpbp_dev *dpbp_node;
if (!mp || !mp->pool_data) {
- RTE_LOG(ERR, PMD, "Invalid mempool provided\n");
+ DPAA2_MEMPOOL_ERR("Invalid mempool provided");
return 0;
}
@@ -343,16 +385,51 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp)
ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW,
dpbp_node->token, &num_of_bufs);
if (ret) {
- RTE_LOG(ERR, PMD, "Unable to obtain free buf count (err=%d)\n",
- ret);
+ DPAA2_MEMPOOL_ERR("Unable to obtain free buf count (err=%d)",
+ ret);
return 0;
}
- RTE_LOG(DEBUG, PMD, "Free bufs = %u\n", num_of_bufs);
+ DPAA2_MEMPOOL_DP_DEBUG("Free bufs = %u\n", num_of_bufs);
return num_of_bufs;
}
+static int
+dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t paddr, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ struct dpaa2_memseg *ms;
+
+ /* For each memory chunk pinned to the Mempool, a linked list of the
+ * contained memsegs is created for searching when PA to VA
+ * conversion is required.
+ */
+ ms = rte_zmalloc(NULL, sizeof(struct dpaa2_memseg), 0);
+ if (!ms) {
+ DPAA2_MEMPOOL_ERR("Unable to allocate internal memory.");
+ DPAA2_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
+ /* If the element is not added, it would only lead to failure
+ * in searching for the element and the logic would Fallback
+ * to traditional DPDK memseg traversal code. So, this is not
+ * a blocking error - but, error would be printed on screen.
+ */
+ return 0;
+ }
+
+ ms->vaddr = vaddr;
+ ms->iova = paddr;
+ ms->len = len;
+ /* Head insertions are generally faster than tail insertions as the
+ * buffers pinned are picked from rear end.
+ */
+ TAILQ_INSERT_HEAD(&rte_dpaa2_memsegs, ms, next);
+
+ return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
+ obj_cb, obj_cb_arg);
+}
+
struct rte_mempool_ops dpaa2_mpool_ops = {
.name = DPAA2_MEMPOOL_OPS_NAME,
.alloc = rte_hw_mbuf_create_pool,
@@ -360,6 +437,14 @@ struct rte_mempool_ops dpaa2_mpool_ops = {
.enqueue = rte_hw_mbuf_free_bulk,
.dequeue = rte_dpaa2_mbuf_alloc_bulk,
.get_count = rte_hw_mbuf_get_count,
+ .populate = dpaa2_populate,
};
MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops);
+
+RTE_INIT(dpaa2_mempool_init_log)
+{
+ dpaa2_logtype_mempool = rte_log_register("mempool.dpaa2");
+ if (dpaa2_logtype_mempool >= 0)
+ rte_log_set_level(dpaa2_logtype_mempool, RTE_LOG_NOTICE);
+}