diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2017-07-03 15:11:03 +0100 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2017-07-03 15:13:07 +0100 |
commit | bf7567fd2a5b0b28ab724046143c24561d38d015 (patch) | |
tree | d3fecf7bb6da55e6ee81f8d42110bd51c6e93631 /drivers/net/mlx5 | |
parent | 7595afa4d30097c1177b69257118d8ad89a539be (diff) |
New upstream version 17.05.1
Change-Id: I8a23679edd6c9c593ceebecf7d2bf1b489e14ccb
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/mlx5')
-rw-r--r-- | drivers/net/mlx5/mlx5.c | 1 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5.h | 2 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_fdir.c | 7 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_flow.c | 27 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_rxq.c | 14 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_txq.c | 16 |
6 files changed, 37 insertions, 30 deletions
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index fc99c0d5..bcb2c1b2 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -789,6 +789,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) eth_dev->device->driver = &mlx5_driver.driver; priv->dev = eth_dev; eth_dev->dev_ops = &mlx5_dev_ops; + TAILQ_INIT(&priv->flows); /* Bring Ethernet device up. */ DEBUG("forcing Ethernet interface up"); diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 67fd7428..1148dee3 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -155,7 +155,7 @@ struct priv { struct fdir_filter_list *fdir_filter_list; /* Flow director rules. */ struct fdir_queue *fdir_drop_queue; /* Flow director drop queue. */ struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */ - LIST_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */ + TAILQ_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */ uint32_t link_speed_capa; /* Link speed capabilities. */ struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */ rte_spinlock_t lock; /* Lock for control functions. */ diff --git a/drivers/net/mlx5/mlx5_fdir.c b/drivers/net/mlx5/mlx5_fdir.c index f80c58b4..c8d47489 100644 --- a/drivers/net/mlx5/mlx5_fdir.c +++ b/drivers/net/mlx5/mlx5_fdir.c @@ -144,6 +144,7 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter, case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: desc->src_port = fdir_filter->input.flow.udp4_flow.src_port; desc->dst_port = fdir_filter->input.flow.udp4_flow.dst_port; + /* fallthrough */ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip; desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip; @@ -733,9 +734,11 @@ priv_fdir_disable(struct priv *priv) /* Destroy flow director context in each RX queue. */ for (i = 0; (i != priv->rxqs_n); i++) { - struct rxq_ctrl *rxq_ctrl = - container_of((*priv->rxqs)[i], struct rxq_ctrl, rxq); + struct rxq_ctrl *rxq_ctrl; + if (!(*priv->rxqs)[i]) + continue; + rxq_ctrl = container_of((*priv->rxqs)[i], struct rxq_ctrl, rxq); if (!rxq_ctrl->fdir_queue) continue; priv_fdir_queue_destroy(priv, rxq_ctrl->fdir_queue); diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index adcbe3f5..8b3957ba 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -91,7 +91,7 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item, void *data); struct rte_flow { - LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ + TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ struct ibv_exp_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */ struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */ struct ibv_qp *qp; /**< Verbs queue pair. */ @@ -1230,7 +1230,7 @@ mlx5_flow_create(struct rte_eth_dev *dev, priv_lock(priv); flow = priv_flow_create(priv, attr, items, actions, error); if (flow) { - LIST_INSERT_HEAD(&priv->flows, flow, next); + TAILQ_INSERT_TAIL(&priv->flows, flow, next); DEBUG("Flow created %p", (void *)flow); } priv_unlock(priv); @@ -1249,8 +1249,7 @@ static void priv_flow_destroy(struct priv *priv, struct rte_flow *flow) { - (void)priv; - LIST_REMOVE(flow, next); + TAILQ_REMOVE(&priv->flows, flow, next); if (flow->ibv_flow) claim_zero(ibv_exp_destroy_flow(flow->ibv_flow)); if (flow->drop) @@ -1275,9 +1274,9 @@ priv_flow_destroy(struct priv *priv, */ for (queue_n = 0; queue_n < flow->rxqs_n; ++queue_n) { rxq = flow->rxqs[queue_n]; - for (tmp = LIST_FIRST(&priv->flows); + for (tmp = TAILQ_FIRST(&priv->flows); tmp; - tmp = LIST_NEXT(tmp, next)) { + tmp = TAILQ_NEXT(tmp, next)) { uint32_t tqueue_n; if (tmp->drop) @@ -1330,10 +1329,10 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, static void priv_flow_flush(struct priv *priv) { - while (!LIST_EMPTY(&priv->flows)) { + while (!TAILQ_EMPTY(&priv->flows)) { struct rte_flow *flow; - flow = LIST_FIRST(&priv->flows); + flow = TAILQ_FIRST(&priv->flows); priv_flow_destroy(priv, flow); } } @@ -1494,9 +1493,7 @@ priv_flow_stop(struct priv *priv) { struct rte_flow *flow; - for (flow = LIST_FIRST(&priv->flows); - flow; - flow = LIST_NEXT(flow, next)) { + TAILQ_FOREACH_REVERSE(flow, &priv->flows, mlx5_flows, next) { claim_zero(ibv_exp_destroy_flow(flow->ibv_flow)); flow->ibv_flow = NULL; if (flow->mark) { @@ -1528,9 +1525,7 @@ priv_flow_start(struct priv *priv) ret = priv_flow_create_drop_queue(priv); if (ret) return -1; - for (flow = LIST_FIRST(&priv->flows); - flow; - flow = LIST_NEXT(flow, next)) { + TAILQ_FOREACH(flow, &priv->flows, next) { struct ibv_qp *qp; if (flow->drop) @@ -1570,9 +1565,9 @@ priv_flow_rxq_in_use(struct priv *priv, struct rxq *rxq) { struct rte_flow *flow; - for (flow = LIST_FIRST(&priv->flows); + for (flow = TAILQ_FIRST(&priv->flows); flow; - flow = LIST_NEXT(flow, next)) { + flow = TAILQ_NEXT(flow, next)) { unsigned int n; if (flow->drop) diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 8b782336..2a268398 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -838,12 +838,16 @@ static inline int rxq_setup(struct rxq_ctrl *tmpl) { struct ibv_cq *ibcq = tmpl->cq; - struct mlx5_cq *cq = to_mxxx(cq, cq); + struct ibv_mlx5_cq_info cq_info; struct mlx5_rwq *rwq = container_of(tmpl->wq, struct mlx5_rwq, wq); struct rte_mbuf *(*elts)[1 << tmpl->rxq.elts_n] = rte_calloc_socket("RXQ", 1, sizeof(*elts), 0, tmpl->socket); - if (cq->cqe_sz != RTE_CACHE_LINE_SIZE) { + if (ibv_mlx5_exp_get_cq_info(ibcq, &cq_info)) { + ERROR("Unable to query CQ info. check your OFED."); + return ENOTSUP; + } + if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { ERROR("Wrong MLX5_CQE_SIZE environment variable value: " "it should be set to %u", RTE_CACHE_LINE_SIZE); return EINVAL; @@ -851,16 +855,16 @@ rxq_setup(struct rxq_ctrl *tmpl) if (elts == NULL) return ENOMEM; tmpl->rxq.rq_db = rwq->rq.db; - tmpl->rxq.cqe_n = log2above(ibcq->cqe); + tmpl->rxq.cqe_n = log2above(cq_info.cqe_cnt); tmpl->rxq.cq_ci = 0; tmpl->rxq.rq_ci = 0; - tmpl->rxq.cq_db = cq->dbrec; + tmpl->rxq.cq_db = cq_info.dbrec; tmpl->rxq.wqes = (volatile struct mlx5_wqe_data_seg (*)[]) (uintptr_t)rwq->rq.buff; tmpl->rxq.cqes = (volatile struct mlx5_cqe (*)[]) - (uintptr_t)cq->active_buf->buf; + (uintptr_t)cq_info.buf; tmpl->rxq.elts = elts; return 0; } diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index de7e28be..bf72468d 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -117,7 +117,7 @@ txq_free_elts(struct txq_ctrl *txq_ctrl) struct rte_mbuf *elt = (*elts)[elts_tail]; assert(elt != NULL); - rte_pktmbuf_free(elt); + rte_pktmbuf_free_seg(elt); #ifndef NDEBUG /* Poisoning. */ memset(&(*elts)[elts_tail], @@ -173,23 +173,27 @@ txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl) { struct mlx5_qp *qp = to_mqp(tmpl->qp); struct ibv_cq *ibcq = tmpl->cq; - struct mlx5_cq *cq = to_mxxx(cq, cq); + struct ibv_mlx5_cq_info cq_info; - if (cq->cqe_sz != RTE_CACHE_LINE_SIZE) { + if (ibv_mlx5_exp_get_cq_info(ibcq, &cq_info)) { + ERROR("Unable to query CQ info. check your OFED."); + return ENOTSUP; + } + if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { ERROR("Wrong MLX5_CQE_SIZE environment variable value: " "it should be set to %u", RTE_CACHE_LINE_SIZE); return EINVAL; } - tmpl->txq.cqe_n = log2above(ibcq->cqe); + tmpl->txq.cqe_n = log2above(cq_info.cqe_cnt); tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8; tmpl->txq.wqes = qp->gen_data.sqstart; tmpl->txq.wqe_n = log2above(qp->sq.wqe_cnt); tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR]; tmpl->txq.bf_reg = qp->gen_data.bf->reg; - tmpl->txq.cq_db = cq->dbrec; + tmpl->txq.cq_db = cq_info.dbrec; tmpl->txq.cqes = (volatile struct mlx5_cqe (*)[]) - (uintptr_t)cq->active_buf->buf; + (uintptr_t)cq_info.buf; tmpl->txq.elts = (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n]) ((uintptr_t)txq_ctrl + sizeof(*txq_ctrl)); |