aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx5/mlx5_txq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/mlx5/mlx5_txq.c')
-rw-r--r--drivers/net/mlx5/mlx5_txq.c23
1 files changed, 6 insertions, 17 deletions
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 760ac92d..2ead2177 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -339,7 +339,6 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
return NULL;
}
memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv));
- /* MRs will be registered in mp2mr[] later. */
attr.cq = (struct ibv_cq_init_attr_ex){
.comp_mask = 0,
};
@@ -622,10 +621,12 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
struct mlx5_txq_ctrl *tmpl;
+ const unsigned int mr_n = MR_TABLE_SZ(priv->mr_n);
tmpl = rte_calloc_socket("TXQ", 1,
sizeof(*tmpl) +
- desc * sizeof(struct rte_mbuf *),
+ desc * sizeof(struct rte_mbuf *) +
+ mr_n * sizeof(struct mlx5_mr_cache),
0, socket);
if (!tmpl) {
rte_errno = ENOMEM;
@@ -639,7 +640,6 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
tmpl->idx = idx;
if (priv->mps == MLX5_MPW_ENHANCED)
tmpl->txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
- /* MRs will be registered in mp2mr[] later. */
DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
@@ -700,6 +700,9 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
tmpl->txq.tunnel_en = 1;
tmpl->txq.elts =
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
+ tmpl->txq.mr_ctrl.cache_bh =
+ (struct mlx5_mr_cache (*)[mr_n])
+ &(*tmpl->txq.elts)[1 << tmpl->txq.elts_n];
tmpl->txq.stats.idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
@@ -728,15 +731,8 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
if ((*priv->txqs)[idx]) {
ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
txq);
- unsigned int i;
mlx5_txq_ibv_get(dev, idx);
- for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
- if (ctrl->txq.mp2mr[i])
- claim_nonzero
- (mlx5_mr_get(dev,
- ctrl->txq.mp2mr[i]->mp));
- }
rte_atomic32_inc(&ctrl->refcnt);
DRV_LOG(DEBUG, "port %u Tx queue %u refcnt %d",
dev->data->port_id,
@@ -760,7 +756,6 @@ int
mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
{
struct priv *priv = dev->data->dev_private;
- unsigned int i;
struct mlx5_txq_ctrl *txq;
size_t page_size = sysconf(_SC_PAGESIZE);
@@ -771,12 +766,6 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
txq->idx, rte_atomic32_read(&txq->refcnt));
if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
txq->ibv = NULL;
- for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
- if (txq->txq.mp2mr[i]) {
- mlx5_mr_release(txq->txq.mp2mr[i]);
- txq->txq.mp2mr[i] = NULL;
- }
- }
if (priv->uar_base)
munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg,
page_size), page_size);