diff options
Diffstat (limited to 'drivers/net/mlx5/mlx5_mr.c')
-rw-r--r-- | drivers/net/mlx5/mlx5_mr.c | 197 |
1 files changed, 101 insertions, 96 deletions
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 2776dc70..a50c5208 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -55,15 +55,12 @@ struct mlx5_check_mempool_data { /* Called by mlx5_check_mempool() when iterating the memory chunks. */ static void -mlx5_check_mempool_cb(struct rte_mempool *mp, +mlx5_check_mempool_cb(struct rte_mempool *mp __rte_unused, void *opaque, struct rte_mempool_memhdr *memhdr, - unsigned int mem_idx) + unsigned int mem_idx __rte_unused) { struct mlx5_check_mempool_data *data = opaque; - (void)mp; - (void)mem_idx; - /* It already failed, skip the next chunks. */ if (data->ret != 0) return; @@ -98,8 +95,9 @@ mlx5_check_mempool_cb(struct rte_mempool *mp, * @return * 0 on success (mempool is virtually contiguous), -1 on error. */ -static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, - uintptr_t *end) +static int +mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, + uintptr_t *end) { struct mlx5_check_mempool_data data; @@ -107,7 +105,6 @@ static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data); *start = (uintptr_t)data.start; *end = (uintptr_t)data.end; - return data.ret; } @@ -115,10 +112,6 @@ static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, * Register a Memory Region (MR) <-> Memory Pool (MP) association in * txq->mp2mr[]. If mp2mr[] is full, remove an entry first. * - * This function should only be called by txq_mp2mr(). - * - * @param priv - * Pointer to private structure. * @param txq * Pointer to TX queue structure. * @param[in] mp @@ -127,71 +120,63 @@ static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, * Index of the next available entry. * * @return - * mr on success, NULL on failure. + * mr on success, NULL on failure and rte_errno is set. */ -struct mlx5_mr* -priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq, - struct rte_mempool *mp, unsigned int idx) +struct mlx5_mr * +mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp, + unsigned int idx) { struct mlx5_txq_ctrl *txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); + struct rte_eth_dev *dev; struct mlx5_mr *mr; + rte_spinlock_lock(&txq_ctrl->priv->mr_lock); /* Add a new entry, register MR first. */ - DEBUG("%p: discovered new memory pool \"%s\" (%p)", - (void *)txq_ctrl, mp->name, (void *)mp); - mr = priv_mr_get(priv, mp); - if (mr == NULL) - mr = priv_mr_new(priv, mp); + DRV_LOG(DEBUG, "port %u discovered new memory pool \"%s\" (%p)", + PORT_ID(txq_ctrl->priv), mp->name, (void *)mp); + dev = ETH_DEV(txq_ctrl->priv); + mr = mlx5_mr_get(dev, mp); + if (mr == NULL) { + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + DRV_LOG(DEBUG, + "port %u using unregistered mempool 0x%p(%s)" + " in secondary process, please create mempool" + " before rte_eth_dev_start()", + PORT_ID(txq_ctrl->priv), (void *)mp, mp->name); + rte_spinlock_unlock(&txq_ctrl->priv->mr_lock); + rte_errno = ENOTSUP; + return NULL; + } + mr = mlx5_mr_new(dev, mp); + } if (unlikely(mr == NULL)) { - DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.", - (void *)txq_ctrl); + DRV_LOG(DEBUG, + "port %u unable to configure memory region," + " ibv_reg_mr() failed.", + PORT_ID(txq_ctrl->priv)); + rte_spinlock_unlock(&txq_ctrl->priv->mr_lock); return NULL; } if (unlikely(idx == RTE_DIM(txq->mp2mr))) { /* Table is full, remove oldest entry. */ - DEBUG("%p: MR <-> MP table full, dropping oldest entry.", - (void *)txq_ctrl); + DRV_LOG(DEBUG, + "port %u memory region <-> memory pool table full, " + " dropping oldest entry", + PORT_ID(txq_ctrl->priv)); --idx; - priv_mr_release(priv, txq->mp2mr[0]); + mlx5_mr_release(txq->mp2mr[0]); memmove(&txq->mp2mr[0], &txq->mp2mr[1], (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0]))); } /* Store the new entry. */ txq_ctrl->txq.mp2mr[idx] = mr; - DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, - (void *)txq_ctrl, mp->name, (void *)mp, - txq_ctrl->txq.mp2mr[idx]->lkey); - return mr; -} - -/** - * Register a Memory Region (MR) <-> Memory Pool (MP) association in - * txq->mp2mr[]. If mp2mr[] is full, remove an entry first. - * - * This function should only be called by txq_mp2mr(). - * - * @param txq - * Pointer to TX queue structure. - * @param[in] mp - * Memory Pool for which a Memory Region lkey must be returned. - * @param idx - * Index of the next available entry. - * - * @return - * mr on success, NULL on failure. - */ -struct mlx5_mr* -mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp, - unsigned int idx) -{ - struct mlx5_txq_ctrl *txq_ctrl = - container_of(txq, struct mlx5_txq_ctrl, txq); - struct mlx5_mr *mr; - - priv_lock(txq_ctrl->priv); - mr = priv_txq_mp2mr_reg(txq_ctrl->priv, txq, mp, idx); - priv_unlock(txq_ctrl->priv); + DRV_LOG(DEBUG, + "port %u new memory region lkey for MP \"%s\" (%p): 0x%08" + PRIu32, + PORT_ID(txq_ctrl->priv), mp->name, (void *)mp, + txq_ctrl->txq.mp2mr[idx]->lkey); + rte_spinlock_unlock(&txq_ctrl->priv->mr_lock); return mr; } @@ -250,28 +235,33 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg) if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 || data.ret == -1) return; - mr = priv_mr_get(priv, mp); + mr = mlx5_mr_get(ETH_DEV(priv), mp); if (mr) { - priv_mr_release(priv, mr); + mlx5_mr_release(mr); return; } - priv_mr_new(priv, mp); + mr = mlx5_mr_new(ETH_DEV(priv), mp); + if (!mr) + DRV_LOG(ERR, "port %u cannot create memory region: %s", + PORT_ID(priv), strerror(rte_errno)); } /** * Register a new memory region from the mempool and store it in the memory * region list. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param mp * Pointer to the memory pool to register. + * * @return - * The memory region on success. + * The memory region on success, NULL on failure and rte_errno is set. */ -struct mlx5_mr* -priv_mr_new(struct priv *priv, struct rte_mempool *mp) +struct mlx5_mr * +mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp) { + struct priv *priv = dev->data->dev_private; const struct rte_memseg *ms = rte_eal_get_physmem_layout(); uintptr_t start; uintptr_t end; @@ -280,17 +270,22 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp) mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id); if (!mr) { - DEBUG("unable to configure MR, ibv_reg_mr() failed."); + DRV_LOG(DEBUG, + "port %u unable to configure memory region," + " ibv_reg_mr() failed.", + dev->data->port_id); + rte_errno = ENOMEM; return NULL; } if (mlx5_check_mempool(mp, &start, &end) != 0) { - ERROR("mempool %p: not virtually contiguous", - (void *)mp); + DRV_LOG(ERR, "port %u mempool %p: not virtually contiguous", + dev->data->port_id, (void *)mp); + rte_errno = ENOMEM; return NULL; } - DEBUG("mempool %p area start=%p end=%p size=%zu", - (void *)mp, (void *)start, (void *)end, - (size_t)(end - start)); + DRV_LOG(DEBUG, "port %u mempool %p area start=%p end=%p size=%zu", + dev->data->port_id, (void *)mp, (void *)start, (void *)end, + (size_t)(end - start)); /* Save original addresses for exact MR lookup. */ mr->start = start; mr->end = end; @@ -305,16 +300,22 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp) if ((end > addr) && (end < addr + len)) end = RTE_ALIGN_CEIL(end, align); } - DEBUG("mempool %p using start=%p end=%p size=%zu for MR", - (void *)mp, (void *)start, (void *)end, - (size_t)(end - start)); + DRV_LOG(DEBUG, + "port %u mempool %p using start=%p end=%p size=%zu for memory" + " region", + dev->data->port_id, (void *)mp, (void *)start, (void *)end, + (size_t)(end - start)); mr->mr = ibv_reg_mr(priv->pd, (void *)start, end - start, IBV_ACCESS_LOCAL_WRITE); + if (!mr->mr) { + rte_errno = ENOMEM; + return NULL; + } mr->mp = mp; mr->lkey = rte_cpu_to_be_32(mr->mr->lkey); rte_atomic32_inc(&mr->refcnt); - DEBUG("%p: new Memory Region %p refcnt: %d", (void *)priv, - (void *)mr, rte_atomic32_read(&mr->refcnt)); + DRV_LOG(DEBUG, "port %u new memory Region %p refcnt: %d", + dev->data->port_id, (void *)mr, rte_atomic32_read(&mr->refcnt)); LIST_INSERT_HEAD(&priv->mr, mr, next); return mr; } @@ -322,16 +323,18 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp) /** * Search the memory region object in the memory region list. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param mp * Pointer to the memory pool to register. + * * @return * The memory region on success. */ -struct mlx5_mr* -priv_mr_get(struct priv *priv, struct rte_mempool *mp) +struct mlx5_mr * +mlx5_mr_get(struct rte_eth_dev *dev, struct rte_mempool *mp) { + struct priv *priv = dev->data->dev_private; struct mlx5_mr *mr; assert(mp); @@ -340,8 +343,9 @@ priv_mr_get(struct priv *priv, struct rte_mempool *mp) LIST_FOREACH(mr, &priv->mr, next) { if (mr->mp == mp) { rte_atomic32_inc(&mr->refcnt); - DEBUG("Memory Region %p refcnt: %d", - (void *)mr, rte_atomic32_read(&mr->refcnt)); + DRV_LOG(DEBUG, "port %u memory region %p refcnt: %d", + dev->data->port_id, (void *)mr, + rte_atomic32_read(&mr->refcnt)); return mr; } } @@ -355,41 +359,42 @@ priv_mr_get(struct priv *priv, struct rte_mempool *mp) * Pointer to memory region to release. * * @return - * 0 on success, errno on failure. + * 1 while a reference on it exists, 0 when freed. */ int -priv_mr_release(struct priv *priv, struct mlx5_mr *mr) +mlx5_mr_release(struct mlx5_mr *mr) { - (void)priv; assert(mr); - DEBUG("Memory Region %p refcnt: %d", - (void *)mr, rte_atomic32_read(&mr->refcnt)); + DRV_LOG(DEBUG, "memory region %p refcnt: %d", (void *)mr, + rte_atomic32_read(&mr->refcnt)); if (rte_atomic32_dec_and_test(&mr->refcnt)) { claim_zero(ibv_dereg_mr(mr->mr)); LIST_REMOVE(mr, next); rte_free(mr); return 0; } - return EBUSY; + return 1; } /** * Verify the flow list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * - * @return the number of object not released. + * @return + * The number of object not released. */ int -priv_mr_verify(struct priv *priv) +mlx5_mr_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int ret = 0; struct mlx5_mr *mr; LIST_FOREACH(mr, &priv->mr, next) { - DEBUG("%p: mr %p still referenced", (void *)priv, - (void *)mr); + DRV_LOG(DEBUG, "port %u memory region %p still referenced", + dev->data->port_id, (void *)mr); ++ret; } return ret; |