diff options
author | Christian Ehrhardt <christian.ehrhardt@canonical.com> | 2016-12-08 14:07:29 +0100 |
---|---|---|
committer | Christian Ehrhardt <christian.ehrhardt@canonical.com> | 2016-12-08 14:10:05 +0100 |
commit | 6b3e017e5d25f15da73f7700f7f2ac553ef1a2e9 (patch) | |
tree | 1b1fb3f903b2282e261ade69e3c17952b3fd3464 /drivers/net/mlx5 | |
parent | 32e04ea00cd159613e04acef75e52bfca6eeff2f (diff) |
Imported Upstream version 16.11
Change-Id: I1944c65ddc88a9ad70f8c0eb6731552b84fbcb77
Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'drivers/net/mlx5')
-rw-r--r-- | drivers/net/mlx5/mlx5.c | 29 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_defs.h | 2 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_ethdev.c | 8 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_prm.h | 86 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_rxq.c | 13 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_rxtx.c | 569 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_rxtx.h | 24 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_txq.c | 18 |
8 files changed, 377 insertions, 372 deletions
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 9448374e..90cc35e4 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -355,7 +355,7 @@ static struct eth_driver mlx5_driver; * 0 on success, negative errno value on failure. */ static int -mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) { struct ibv_device **list; struct ibv_device *ibv_dev; @@ -511,7 +511,7 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) priv->mtu = ETHER_MTU; priv->mps = mps; /* Enable MPW by default if supported. */ priv->cqe_comp = 1; /* Enable compression by default. */ - err = mlx5_args(priv, pci_dev->devargs); + err = mlx5_args(priv, pci_dev->device.devargs); if (err) { ERROR("failed to process device arguments: %s", strerror(err)); @@ -617,7 +617,7 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) snprintf(name, sizeof(name), "%s port %u", ibv_get_device_name(ibv_dev), port); - eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_PCI); + eth_dev = rte_eth_dev_allocate(name); } if (eth_dev == NULL) { ERROR("can not allocate rte ethdev"); @@ -729,9 +729,11 @@ static const struct rte_pci_id mlx5_pci_id_map[] = { static struct eth_driver mlx5_driver = { .pci_drv = { - .name = MLX5_DRIVER_NAME, + .driver = { + .name = MLX5_DRIVER_NAME + }, .id_table = mlx5_pci_id_map, - .devinit = mlx5_pci_devinit, + .probe = mlx5_pci_probe, .drv_flags = RTE_PCI_DRV_INTR_LSC, }, .dev_private_size = sizeof(struct priv) @@ -740,11 +742,10 @@ static struct eth_driver mlx5_driver = { /** * Driver initialization routine. */ -static int -rte_mlx5_pmd_init(const char *name, const char *args) +RTE_INIT(rte_mlx5_pmd_init); +static void +rte_mlx5_pmd_init(void) { - (void)name; - (void)args; /* * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use * huge pages. Calling ibv_fork_init() during init allows @@ -754,13 +755,7 @@ rte_mlx5_pmd_init(const char *name, const char *args) setenv("RDMAV_HUGEPAGES_SAFE", "1", 1); ibv_fork_init(); rte_eal_pci_register(&mlx5_driver.pci_drv); - return 0; } -static struct rte_driver rte_mlx5_driver = { - .type = PMD_PDEV, - .init = rte_mlx5_pmd_init, -}; - -PMD_REGISTER_DRIVER(rte_mlx5_driver, mlx5); -DRIVER_REGISTER_PCI_TABLE(mlx5, mlx5_pci_id_map); +RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__); +RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map); diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h index cc2a6f3e..b32816e6 100644 --- a/drivers/net/mlx5/mlx5_defs.h +++ b/drivers/net/mlx5/mlx5_defs.h @@ -37,7 +37,7 @@ #include "mlx5_autoconf.h" /* Reported driver name. */ -#define MLX5_DRIVER_NAME "librte_pmd_mlx5" +#define MLX5_DRIVER_NAME "net_mlx5" /* Maximum number of simultaneous MAC addresses. */ #define MLX5_MAX_MAC_ADDRESSES 128 diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index ba1ec2a6..c0f73e93 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -933,7 +933,7 @@ recover: if (rehash) ret = rxq_rehash(dev, rxq_ctrl); else - ret = rxq_ctrl_setup(dev, rxq_ctrl, rxq->elts_n, + ret = rxq_ctrl_setup(dev, rxq_ctrl, 1 << rxq->elts_n, rxq_ctrl->socket, NULL, rxq->mp); if (!ret) continue; @@ -1194,7 +1194,7 @@ mlx5_dev_link_status_handler(void *arg) ret = priv_dev_link_status_handler(priv, dev); priv_unlock(priv); if (ret) - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); } /** @@ -1217,7 +1217,7 @@ mlx5_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *cb_arg) ret = priv_dev_link_status_handler(priv, dev); priv_unlock(priv); if (ret) - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); } /** @@ -1441,7 +1441,7 @@ mlx5_secondary_data_setup(struct priv *priv) if (txq_ctrl != NULL) { if (txq_ctrl_setup(priv->dev, txq_ctrl, - primary_txq->elts_n, + 1 << primary_txq->elts_n, primary_txq_ctrl->socket, NULL) == 0) { txq_ctrl->txq.stats.idx = diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h index 1c369cac..7f31a2f7 100644 --- a/drivers/net/mlx5/mlx5_prm.h +++ b/drivers/net/mlx5/mlx5_prm.h @@ -67,8 +67,15 @@ /* Maximum number of packets a multi-packet WQE can handle. */ #define MLX5_MPW_DSEG_MAX 5 -/* Room for inline data in regular work queue element. */ -#define MLX5_WQE64_INL_DATA 12 +/* WQE DWORD size */ +#define MLX5_WQE_DWORD_SIZE 16 + +/* WQE size */ +#define MLX5_WQE_SIZE (4 * MLX5_WQE_DWORD_SIZE) + +/* Compute the number of DS. */ +#define MLX5_WQE_DS(n) \ + (((n) + MLX5_WQE_DWORD_SIZE - 1) / MLX5_WQE_DWORD_SIZE) /* Room for inline data in multi-packet WQE. */ #define MLX5_MWQE64_INL_DATA 28 @@ -106,59 +113,26 @@ struct mlx5_wqe_eth_seg_small { uint16_t mss; uint32_t rsvd2; uint16_t inline_hdr_sz; + uint8_t inline_hdr[2]; }; -/* Regular WQE. */ -struct mlx5_wqe_regular { - union { - struct mlx5_wqe_ctrl_seg ctrl; - uint32_t data[4]; - } ctrl; - struct mlx5_wqe_eth_seg eseg; - struct mlx5_wqe_data_seg dseg; -} __rte_aligned(64); - -/* Inline WQE. */ -struct mlx5_wqe_inl { - union { - struct mlx5_wqe_ctrl_seg ctrl; - uint32_t data[4]; - } ctrl; - struct mlx5_wqe_eth_seg eseg; +struct mlx5_wqe_inl_small { uint32_t byte_cnt; - uint8_t data[MLX5_WQE64_INL_DATA]; -} __rte_aligned(64); + uint8_t raw; +}; -/* Multi-packet WQE. */ -struct mlx5_wqe_mpw { - union { - struct mlx5_wqe_ctrl_seg ctrl; - uint32_t data[4]; - } ctrl; +/* Small common part of the WQE. */ +struct mlx5_wqe { + uint32_t ctrl[4]; struct mlx5_wqe_eth_seg_small eseg; - struct mlx5_wqe_data_seg dseg[2]; -} __rte_aligned(64); +}; -/* Multi-packet WQE with inline. */ -struct mlx5_wqe_mpw_inl { - union { - struct mlx5_wqe_ctrl_seg ctrl; - uint32_t data[4]; - } ctrl; - struct mlx5_wqe_eth_seg_small eseg; - uint32_t byte_cnt; - uint8_t data[MLX5_MWQE64_INL_DATA]; +/* WQE. */ +struct mlx5_wqe64 { + struct mlx5_wqe hdr; + uint8_t raw[32]; } __rte_aligned(64); -/* Union of all WQE types. */ -union mlx5_wqe { - struct mlx5_wqe_regular wqe; - struct mlx5_wqe_inl inl; - struct mlx5_wqe_mpw mpw; - struct mlx5_wqe_mpw_inl mpw_inl; - uint8_t data[64]; -}; - /* MPW session status. */ enum mlx5_mpw_state { MLX5_MPW_STATE_OPENED, @@ -172,7 +146,7 @@ struct mlx5_mpw { unsigned int pkts_n; unsigned int len; unsigned int total_len; - volatile union mlx5_wqe *wqe; + volatile struct mlx5_wqe *wqe; union { volatile struct mlx5_wqe_data_seg *dseg[MLX5_MPW_DSEG_MAX]; volatile uint8_t *raw; @@ -184,7 +158,21 @@ struct mlx5_cqe { #if (RTE_CACHE_LINE_SIZE == 128) uint8_t padding[64]; #endif - struct mlx5_cqe64 cqe64; + uint8_t pkt_info; + uint8_t rsvd0[11]; + uint32_t rx_hash_res; + uint8_t rx_hash_type; + uint8_t rsvd1[11]; + uint8_t hds_ip_ext; + uint8_t l4_hdr_type_etc; + uint16_t vlan_info; + uint8_t rsvd2[12]; + uint32_t byte_cnt; + uint64_t timestamp; + uint8_t rsvd3[4]; + uint16_t wqe_counter; + uint8_t rsvd4; + uint8_t op_own; }; #endif /* RTE_PMD_MLX5_PRM_H_ */ diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 7dbe8dd2..28e93d3e 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -723,7 +723,7 @@ rxq_free_elts(struct rxq_ctrl *rxq_ctrl) if (rxq_ctrl->rxq.elts == NULL) return; - for (i = 0; (i != rxq_ctrl->rxq.elts_n); ++i) { + for (i = 0; (i != (1u << rxq_ctrl->rxq.elts_n)); ++i) { if ((*rxq_ctrl->rxq.elts)[i] != NULL) rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]); (*rxq_ctrl->rxq.elts)[i] = NULL; @@ -807,7 +807,7 @@ rxq_cleanup(struct rxq_ctrl *rxq_ctrl) int rxq_rehash(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl) { - unsigned int elts_n = rxq_ctrl->rxq.elts_n; + unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n; unsigned int i; struct ibv_exp_wq_attr mod; int err; @@ -870,7 +870,7 @@ rxq_setup(struct rxq_ctrl *tmpl) struct ibv_cq *ibcq = tmpl->cq; struct mlx5_cq *cq = to_mxxx(cq, cq); struct mlx5_rwq *rwq = container_of(tmpl->wq, struct mlx5_rwq, wq); - struct rte_mbuf *(*elts)[tmpl->rxq.elts_n] = + struct rte_mbuf *(*elts)[1 << tmpl->rxq.elts_n] = rte_calloc_socket("RXQ", 1, sizeof(*elts), 0, tmpl->socket); if (cq->cqe_sz != RTE_CACHE_LINE_SIZE) { @@ -881,7 +881,7 @@ rxq_setup(struct rxq_ctrl *tmpl) if (elts == NULL) return ENOMEM; tmpl->rxq.rq_db = rwq->rq.db; - tmpl->rxq.cqe_n = ibcq->cqe + 1; + tmpl->rxq.cqe_n = log2above(ibcq->cqe); tmpl->rxq.cq_ci = 0; tmpl->rxq.rq_ci = 0; tmpl->rxq.cq_db = cq->dbrec; @@ -924,8 +924,9 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, .priv = priv, .socket = socket, .rxq = { - .elts_n = desc, + .elts_n = log2above(desc), .mp = mp, + .rss_hash = priv->rxqs_n > 1, }, }; struct ibv_exp_wq_attr mod; @@ -1153,7 +1154,7 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, } /* Reuse buffers from original queue if possible. */ if (rxq_ctrl->rxq.elts_n) { - assert(rxq_ctrl->rxq.elts_n == desc); + assert(1 << rxq_ctrl->rxq.elts_n == desc); assert(rxq_ctrl->rxq.elts != tmpl.rxq.elts); ret = rxq_alloc_elts(&tmpl, desc, rxq_ctrl->rxq.elts); } else diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 79f7fa99..9b598014 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -81,10 +81,10 @@ * 0 the first time. */ static inline int -check_cqe64_seen(volatile struct mlx5_cqe64 *cqe) +check_cqe_seen(volatile struct mlx5_cqe *cqe) { static const uint8_t magic[] = "seen"; - volatile uint8_t (*buf)[sizeof(cqe->rsvd40)] = &cqe->rsvd40; + volatile uint8_t (*buf)[sizeof(cqe->rsvd3)] = &cqe->rsvd3; int ret = 1; unsigned int i; @@ -99,9 +99,9 @@ check_cqe64_seen(volatile struct mlx5_cqe64 *cqe) #endif /* NDEBUG */ static inline int -check_cqe64(volatile struct mlx5_cqe64 *cqe, - unsigned int cqes_n, const uint16_t ci) - __attribute__((always_inline)); +check_cqe(volatile struct mlx5_cqe *cqe, + unsigned int cqes_n, const uint16_t ci) + __attribute__((always_inline)); /** * Check whether CQE is valid. @@ -117,8 +117,8 @@ check_cqe64(volatile struct mlx5_cqe64 *cqe, * 0 on success, 1 on failure. */ static inline int -check_cqe64(volatile struct mlx5_cqe64 *cqe, - unsigned int cqes_n, const uint16_t ci) +check_cqe(volatile struct mlx5_cqe *cqe, + unsigned int cqes_n, const uint16_t ci) { uint16_t idx = ci & cqes_n; uint8_t op_own = cqe->op_own; @@ -136,14 +136,14 @@ check_cqe64(volatile struct mlx5_cqe64 *cqe, if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) || (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR)) return 0; - if (!check_cqe64_seen(cqe)) + if (!check_cqe_seen(cqe)) ERROR("unexpected CQE error %u (0x%02x)" " syndrome 0x%02x", op_code, op_code, syndrome); return 1; } else if ((op_code != MLX5_CQE_RESP_SEND) && (op_code != MLX5_CQE_REQ)) { - if (!check_cqe64_seen(cqe)) + if (!check_cqe_seen(cqe)) ERROR("unexpected CQE opcode %u (0x%02x)", op_code, op_code); return 1; @@ -152,6 +152,9 @@ check_cqe64(volatile struct mlx5_cqe64 *cqe, return 0; } +static inline void +txq_complete(struct txq *txq) __attribute__((always_inline)); + /** * Manage TX completions. * @@ -160,34 +163,34 @@ check_cqe64(volatile struct mlx5_cqe64 *cqe, * @param txq * Pointer to TX queue structure. */ -static void +static inline void txq_complete(struct txq *txq) { - const unsigned int elts_n = txq->elts_n; - const unsigned int cqe_n = txq->cqe_n; + const unsigned int elts_n = 1 << txq->elts_n; + const unsigned int cqe_n = 1 << txq->cqe_n; const unsigned int cqe_cnt = cqe_n - 1; uint16_t elts_free = txq->elts_tail; uint16_t elts_tail; uint16_t cq_ci = txq->cq_ci; - volatile struct mlx5_cqe64 *cqe = NULL; - volatile union mlx5_wqe *wqe; + volatile struct mlx5_cqe *cqe = NULL; + volatile struct mlx5_wqe *wqe; do { - volatile struct mlx5_cqe64 *tmp; + volatile struct mlx5_cqe *tmp; - tmp = &(*txq->cqes)[cq_ci & cqe_cnt].cqe64; - if (check_cqe64(tmp, cqe_n, cq_ci)) + tmp = &(*txq->cqes)[cq_ci & cqe_cnt]; + if (check_cqe(tmp, cqe_n, cq_ci)) break; cqe = tmp; #ifndef NDEBUG if (MLX5_CQE_FORMAT(cqe->op_own) == MLX5_COMPRESSED) { - if (!check_cqe64_seen(cqe)) + if (!check_cqe_seen(cqe)) ERROR("unexpected compressed CQE, TX stopped"); return; } if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) || (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) { - if (!check_cqe64_seen(cqe)) + if (!check_cqe_seen(cqe)) ERROR("unexpected error CQE, TX stopped"); return; } @@ -196,9 +199,10 @@ txq_complete(struct txq *txq) } while (1); if (unlikely(cqe == NULL)) return; - wqe = &(*txq->wqes)[htons(cqe->wqe_counter) & (txq->wqe_n - 1)]; - elts_tail = wqe->wqe.ctrl.data[3]; - assert(elts_tail < txq->wqe_n); + wqe = &(*txq->wqes)[htons(cqe->wqe_counter) & + ((1 << txq->wqe_n) - 1)].hdr; + elts_tail = wqe->ctrl[3]; + assert(elts_tail < (1 << txq->wqe_n)); /* Free buffers. */ while (elts_free != elts_tail) { struct rte_mbuf *elt = (*txq->elts)[elts_free]; @@ -284,112 +288,6 @@ txq_mp2mr(struct txq *txq, struct rte_mempool *mp) } /** - * Write a regular WQE. - * - * @param txq - * Pointer to TX queue structure. - * @param wqe - * Pointer to the WQE to fill. - * @param buf - * Buffer. - * @param length - * Packet length. - * - * @return ds - * Number of DS elements consumed. - */ -static inline unsigned int -mlx5_wqe_write(struct txq *txq, volatile union mlx5_wqe *wqe, - struct rte_mbuf *buf, uint32_t length) -{ - uintptr_t raw = (uintptr_t)&wqe->wqe.eseg.inline_hdr_start; - uint16_t ds; - uint16_t pkt_inline_sz = 16; - uintptr_t addr = rte_pktmbuf_mtod(buf, uintptr_t); - struct mlx5_wqe_data_seg *dseg = NULL; - - assert(length >= 16); - /* Start the know and common part of the WQE structure. */ - wqe->wqe.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND); - wqe->wqe.ctrl.data[2] = 0; - wqe->wqe.ctrl.data[3] = 0; - wqe->wqe.eseg.rsvd0 = 0; - wqe->wqe.eseg.rsvd1 = 0; - wqe->wqe.eseg.mss = 0; - wqe->wqe.eseg.rsvd2 = 0; - /* Start by copying the Ethernet Header. */ - rte_mov16((uint8_t *)raw, (uint8_t *)addr); - length -= 16; - addr += 16; - /* Replace the Ethernet type by the VLAN if necessary. */ - if (buf->ol_flags & PKT_TX_VLAN_PKT) { - uint32_t vlan = htonl(0x81000000 | buf->vlan_tci); - - memcpy((uint8_t *)(raw + 16 - sizeof(vlan)), - &vlan, sizeof(vlan)); - addr -= sizeof(vlan); - length += sizeof(vlan); - } - /* Inline if enough room. */ - if (txq->max_inline != 0) { - uintptr_t end = (uintptr_t)&(*txq->wqes)[txq->wqe_n]; - uint16_t max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE; - uint16_t room; - - raw += 16; - room = end - (uintptr_t)raw; - if (room > max_inline) { - uintptr_t addr_end = (addr + max_inline) & - ~(RTE_CACHE_LINE_SIZE - 1); - uint16_t copy_b = ((addr_end - addr) > length) ? - length : - (addr_end - addr); - - rte_memcpy((void *)raw, (void *)addr, copy_b); - addr += copy_b; - length -= copy_b; - pkt_inline_sz += copy_b; - /* Sanity check. */ - assert(addr <= addr_end); - } - /* Store the inlined packet size in the WQE. */ - wqe->wqe.eseg.inline_hdr_sz = htons(pkt_inline_sz); - /* - * 2 DWORDs consumed by the WQE header + 1 DSEG + - * the size of the inline part of the packet. - */ - ds = 2 + ((pkt_inline_sz - 2 + 15) / 16); - if (length > 0) { - dseg = (struct mlx5_wqe_data_seg *) - ((uintptr_t)wqe + (ds * 16)); - if ((uintptr_t)dseg >= end) - dseg = (struct mlx5_wqe_data_seg *) - ((uintptr_t)&(*txq->wqes)[0]); - goto use_dseg; - } - } else { - /* Add the remaining packet as a simple ds. */ - ds = 3; - /* - * No inline has been done in the packet, only the Ethernet - * Header as been stored. - */ - wqe->wqe.eseg.inline_hdr_sz = htons(16); - dseg = (struct mlx5_wqe_data_seg *) - ((uintptr_t)wqe + (ds * 16)); -use_dseg: - *dseg = (struct mlx5_wqe_data_seg) { - .addr = htonll(addr), - .byte_count = htonl(length), - .lkey = txq_mp2mr(txq, txq_mb2mp(buf)), - }; - ++ds; - } - wqe->wqe.ctrl.data[1] = htonl(txq->qp_num_8s | ds); - return ds; -} - -/** * Ring TX queue doorbell. * * @param txq @@ -409,8 +307,8 @@ mlx5_tx_dbrec(struct txq *txq) *txq->qp_db = htonl(txq->wqe_ci); /* Ensure ordering between DB record and BF copy. */ rte_wmb(); - rte_mov16(dst, (uint8_t *)data); - txq->bf_offset ^= txq->bf_buf_size; + memcpy(dst, (uint8_t *)data, 16); + txq->bf_offset ^= (1 << txq->bf_buf_size); } /** @@ -424,9 +322,9 @@ mlx5_tx_dbrec(struct txq *txq) static inline void tx_prefetch_cqe(struct txq *txq, uint16_t ci) { - volatile struct mlx5_cqe64 *cqe; + volatile struct mlx5_cqe *cqe; - cqe = &(*txq->cqes)[ci & (txq->cqe_n - 1)].cqe64; + cqe = &(*txq->cqes)[ci & ((1 << txq->cqe_n) - 1)]; rte_prefetch0(cqe); } @@ -441,9 +339,9 @@ tx_prefetch_cqe(struct txq *txq, uint16_t ci) static inline void tx_prefetch_wqe(struct txq *txq, uint16_t ci) { - volatile union mlx5_wqe *wqe; + volatile struct mlx5_wqe64 *wqe; - wqe = &(*txq->wqes)[ci & (txq->wqe_n - 1)]; + wqe = &(*txq->wqes)[ci & ((1 << txq->wqe_n) - 1)]; rte_prefetch0(wqe); } @@ -465,12 +363,15 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) { struct txq *txq = (struct txq *)dpdk_txq; uint16_t elts_head = txq->elts_head; - const unsigned int elts_n = txq->elts_n; + const unsigned int elts_n = 1 << txq->elts_n; unsigned int i = 0; unsigned int j = 0; unsigned int max; unsigned int comp; - volatile union mlx5_wqe *wqe = NULL; + volatile struct mlx5_wqe *wqe = NULL; + unsigned int segs_n = 0; + struct rte_mbuf *buf = NULL; + uint8_t *raw; if (unlikely(!pkts_n)) return 0; @@ -484,13 +385,17 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (max > elts_n) max -= elts_n; do { - struct rte_mbuf *buf = *(pkts++); - unsigned int elts_head_next; + volatile struct mlx5_wqe_data_seg *dseg = NULL; uint32_t length; - unsigned int segs_n = buf->nb_segs; - volatile struct mlx5_wqe_data_seg *dseg; - unsigned int ds = sizeof(*wqe) / 16; + unsigned int ds = 0; + uintptr_t addr; +#ifdef MLX5_PMD_SOFT_COUNTERS + uint32_t total_length = 0; +#endif + /* first_seg */ + buf = *(pkts++); + segs_n = buf->nb_segs; /* * Make sure there is enough room to store this packet and * that one ring entry remains unused. @@ -499,73 +404,176 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (max < segs_n + 1) break; max -= segs_n; - --pkts_n; - elts_head_next = (elts_head + 1) & (elts_n - 1); - wqe = &(*txq->wqes)[txq->wqe_ci & (txq->wqe_n - 1)]; - tx_prefetch_wqe(txq, txq->wqe_ci); + --segs_n; + if (!segs_n) + --pkts_n; + wqe = &(*txq->wqes)[txq->wqe_ci & + ((1 << txq->wqe_n) - 1)].hdr; tx_prefetch_wqe(txq, txq->wqe_ci + 1); - if (pkts_n) + if (pkts_n > 1) rte_prefetch0(*pkts); + addr = rte_pktmbuf_mtod(buf, uintptr_t); length = DATA_LEN(buf); +#ifdef MLX5_PMD_SOFT_COUNTERS + total_length = length; +#endif + assert(length >= MLX5_WQE_DWORD_SIZE); /* Update element. */ (*txq->elts)[elts_head] = buf; + elts_head = (elts_head + 1) & (elts_n - 1); /* Prefetch next buffer data. */ - if (pkts_n) - rte_prefetch0(rte_pktmbuf_mtod(*pkts, - volatile void *)); + if (pkts_n > 1) { + volatile void *pkt_addr; + + pkt_addr = rte_pktmbuf_mtod(*pkts, volatile void *); + rte_prefetch0(pkt_addr); + } /* Should we enable HW CKSUM offload */ if (buf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { - wqe->wqe.eseg.cs_flags = + wqe->eseg.cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; } else { - wqe->wqe.eseg.cs_flags = 0; + wqe->eseg.cs_flags = 0; } - ds = mlx5_wqe_write(txq, wqe, buf, length); - if (segs_n == 1) - goto skip_segs; - dseg = (volatile struct mlx5_wqe_data_seg *) - (((uintptr_t)wqe) + ds * 16); - while (--segs_n) { + raw = (uint8_t *)(uintptr_t)&wqe->eseg.inline_hdr[0]; + /* Start the know and common part of the WQE structure. */ + wqe->ctrl[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND); + wqe->ctrl[2] = 0; + wqe->ctrl[3] = 0; + wqe->eseg.rsvd0 = 0; + wqe->eseg.rsvd1 = 0; + wqe->eseg.mss = 0; + wqe->eseg.rsvd2 = 0; + /* Start by copying the Ethernet Header. */ + memcpy((uint8_t *)raw, ((uint8_t *)addr), 16); + length -= MLX5_WQE_DWORD_SIZE; + addr += MLX5_WQE_DWORD_SIZE; + /* Replace the Ethernet type by the VLAN if necessary. */ + if (buf->ol_flags & PKT_TX_VLAN_PKT) { + uint32_t vlan = htonl(0x81000000 | buf->vlan_tci); + + memcpy((uint8_t *)(raw + MLX5_WQE_DWORD_SIZE - + sizeof(vlan)), + &vlan, sizeof(vlan)); + addr -= sizeof(vlan); + length += sizeof(vlan); + } + /* Inline if enough room. */ + if (txq->max_inline != 0) { + uintptr_t end = + (uintptr_t)&(*txq->wqes)[1 << txq->wqe_n]; + uint16_t max_inline = + txq->max_inline * RTE_CACHE_LINE_SIZE; + uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE; + uint16_t room; + + raw += MLX5_WQE_DWORD_SIZE; + room = end - (uintptr_t)raw; + if (room > max_inline) { + uintptr_t addr_end = (addr + max_inline) & + ~(RTE_CACHE_LINE_SIZE - 1); + uint16_t copy_b = ((addr_end - addr) > length) ? + length : + (addr_end - addr); + + rte_memcpy((void *)raw, (void *)addr, copy_b); + addr += copy_b; + length -= copy_b; + pkt_inline_sz += copy_b; + /* Sanity check. */ + assert(addr <= addr_end); + } + /* Store the inlined packet size in the WQE. */ + wqe->eseg.inline_hdr_sz = htons(pkt_inline_sz); /* - * Spill on next WQE when the current one does not have - * enough room left. Size of WQE must a be a multiple - * of data segment size. + * 2 DWORDs consumed by the WQE header + 1 DSEG + + * the size of the inline part of the packet. */ - assert(!(sizeof(*wqe) % sizeof(*dseg))); - if (!(ds % (sizeof(*wqe) / 16))) - dseg = (volatile void *) - &(*txq->wqes)[txq->wqe_ci++ & - (txq->wqe_n - 1)]; - else - ++dseg; + ds = 2 + MLX5_WQE_DS(pkt_inline_sz - 2); + if (length > 0) { + dseg = (struct mlx5_wqe_data_seg *) + ((uintptr_t)wqe + + (ds * MLX5_WQE_DWORD_SIZE)); + if ((uintptr_t)dseg >= end) + dseg = (struct mlx5_wqe_data_seg *) + ((uintptr_t)&(*txq->wqes)[0]); + goto use_dseg; + } else if (!segs_n) { + goto next_pkt; + } else { + goto next_seg; + } + } else { + /* + * No inline has been done in the packet, only the + * Ethernet Header as been stored. + */ + wqe->eseg.inline_hdr_sz = htons(MLX5_WQE_DWORD_SIZE); + dseg = (struct mlx5_wqe_data_seg *) + ((uintptr_t)wqe + (3 * MLX5_WQE_DWORD_SIZE)); + ds = 3; +use_dseg: + /* Add the remaining packet as a simple ds. */ + *dseg = (struct mlx5_wqe_data_seg) { + .addr = htonll(addr), + .byte_count = htonl(length), + .lkey = txq_mp2mr(txq, txq_mb2mp(buf)), + }; ++ds; - buf = buf->next; - assert(buf); - /* Store segment information. */ - dseg->byte_count = htonl(DATA_LEN(buf)); - dseg->lkey = txq_mp2mr(txq, txq_mb2mp(buf)); - dseg->addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)); - (*txq->elts)[elts_head_next] = buf; - elts_head_next = (elts_head_next + 1) & (elts_n - 1); + if (!segs_n) + goto next_pkt; + } +next_seg: + assert(buf); + assert(ds); + assert(wqe); + /* + * Spill on next WQE when the current one does not have + * enough room left. Size of WQE must a be a multiple + * of data segment size. + */ + assert(!(MLX5_WQE_SIZE % MLX5_WQE_DWORD_SIZE)); + if (!(ds % (MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE))) { + unsigned int n = (txq->wqe_ci + ((ds + 3) / 4)) & + ((1 << txq->wqe_n) - 1); + + dseg = (struct mlx5_wqe_data_seg *) + ((uintptr_t)&(*txq->wqes)[n]); + tx_prefetch_wqe(txq, n + 1); + } else { + ++dseg; + } + ++ds; + buf = buf->next; + assert(buf); + length = DATA_LEN(buf); #ifdef MLX5_PMD_SOFT_COUNTERS - length += DATA_LEN(buf); + total_length += length; #endif - ++j; - } - /* Update DS field in WQE. */ - wqe->wqe.ctrl.data[1] &= htonl(0xffffffc0); - wqe->wqe.ctrl.data[1] |= htonl(ds & 0x3f); -skip_segs: + /* Store segment information. */ + *dseg = (struct mlx5_wqe_data_seg) { + .addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)), + .byte_count = htonl(length), + .lkey = txq_mp2mr(txq, txq_mb2mp(buf)), + }; + (*txq->elts)[elts_head] = buf; + elts_head = (elts_head + 1) & (elts_n - 1); + ++j; + --segs_n; + if (segs_n) + goto next_seg; + else + --pkts_n; +next_pkt: + ++i; + wqe->ctrl[1] = htonl(txq->qp_num_8s | ds); + txq->wqe_ci += (ds + 3) / 4; #ifdef MLX5_PMD_SOFT_COUNTERS /* Increment sent bytes counter. */ - txq->stats.obytes += length; + txq->stats.obytes += total_length; #endif - /* Increment consumer index. */ - txq->wqe_ci += (ds + 3) / 4; - elts_head = elts_head_next; - ++i; } while (pkts_n); /* Take a shortcut if nothing must be sent. */ if (unlikely(i == 0)) @@ -574,9 +582,9 @@ skip_segs: comp = txq->elts_comp + i + j; if (comp >= MLX5_TX_COMP_THRESH) { /* Request completion on last WQE. */ - wqe->wqe.ctrl.data[2] = htonl(8); + wqe->ctrl[2] = htonl(8); /* Save elts_head in unused "immediate" field of WQE. */ - wqe->wqe.ctrl.data[3] = elts_head; + wqe->ctrl[3] = elts_head; txq->elts_comp = 0; } else { txq->elts_comp = comp; @@ -604,28 +612,29 @@ skip_segs: static inline void mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length) { - uint16_t idx = txq->wqe_ci & (txq->wqe_n - 1); + uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1); volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] = (volatile struct mlx5_wqe_data_seg (*)[]) - (uintptr_t)&(*txq->wqes)[(idx + 1) & (txq->wqe_n - 1)]; + (uintptr_t)&(*txq->wqes)[(idx + 1) & ((1 << txq->wqe_n) - 1)]; mpw->state = MLX5_MPW_STATE_OPENED; mpw->pkts_n = 0; mpw->len = length; mpw->total_len = 0; - mpw->wqe = &(*txq->wqes)[idx]; - mpw->wqe->mpw.eseg.mss = htons(length); - mpw->wqe->mpw.eseg.inline_hdr_sz = 0; - mpw->wqe->mpw.eseg.rsvd0 = 0; - mpw->wqe->mpw.eseg.rsvd1 = 0; - mpw->wqe->mpw.eseg.rsvd2 = 0; - mpw->wqe->mpw.ctrl.data[0] = htonl((MLX5_OPC_MOD_MPW << 24) | - (txq->wqe_ci << 8) | - MLX5_OPCODE_TSO); - mpw->wqe->mpw.ctrl.data[2] = 0; - mpw->wqe->mpw.ctrl.data[3] = 0; - mpw->data.dseg[0] = &mpw->wqe->mpw.dseg[0]; - mpw->data.dseg[1] = &mpw->wqe->mpw.dseg[1]; + mpw->wqe = (volatile struct mlx5_wqe *)&(*txq->wqes)[idx].hdr; + mpw->wqe->eseg.mss = htons(length); + mpw->wqe->eseg.inline_hdr_sz = 0; + mpw->wqe->eseg.rsvd0 = 0; + mpw->wqe->eseg.rsvd1 = 0; + mpw->wqe->eseg.rsvd2 = 0; + mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) | + (txq->wqe_ci << 8) | MLX5_OPCODE_TSO); + mpw->wqe->ctrl[2] = 0; + mpw->wqe->ctrl[3] = 0; + mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *) + (((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE)); + mpw->data.dseg[1] = (volatile struct mlx5_wqe_data_seg *) + (((uintptr_t)mpw->wqe) + (3 * MLX5_WQE_DWORD_SIZE)); mpw->data.dseg[2] = &(*dseg)[0]; mpw->data.dseg[3] = &(*dseg)[1]; mpw->data.dseg[4] = &(*dseg)[2]; @@ -648,7 +657,7 @@ mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw) * Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->mpw.ctrl.data[1] = htonl(txq->qp_num_8s | (2 + num)); + mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | (2 + num)); mpw->state = MLX5_MPW_STATE_CLOSED; if (num < 3) ++txq->wqe_ci; @@ -676,7 +685,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) { struct txq *txq = (struct txq *)dpdk_txq; uint16_t elts_head = txq->elts_head; - const unsigned int elts_n = txq->elts_n; + const unsigned int elts_n = 1 << txq->elts_n; unsigned int i = 0; unsigned int j = 0; unsigned int max; @@ -726,11 +735,11 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if ((mpw.state == MLX5_MPW_STATE_OPENED) && ((mpw.len != length) || (segs_n != 1) || - (mpw.wqe->mpw.eseg.cs_flags != cs_flags))) + (mpw.wqe->eseg.cs_flags != cs_flags))) mlx5_mpw_close(txq, &mpw); if (mpw.state == MLX5_MPW_STATE_CLOSED) { mlx5_mpw_new(txq, &mpw, length); - mpw.wqe->mpw.eseg.cs_flags = cs_flags; + mpw.wqe->eseg.cs_flags = cs_flags; } /* Multi-segment packets must be alone in their MPW. */ assert((segs_n == 1) || (mpw.pkts_n == 0)); @@ -776,12 +785,12 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) /* "j" includes both packets and segments. */ comp = txq->elts_comp + j; if (comp >= MLX5_TX_COMP_THRESH) { - volatile union mlx5_wqe *wqe = mpw.wqe; + volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->mpw.ctrl.data[2] = htonl(8); + wqe->ctrl[2] = htonl(8); /* Save elts_head in unused "immediate" field of WQE. */ - wqe->mpw.ctrl.data[3] = elts_head; + wqe->ctrl[3] = elts_head; txq->elts_comp = 0; } else { txq->elts_comp = comp; @@ -811,25 +820,28 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) static inline void mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length) { - uint16_t idx = txq->wqe_ci & (txq->wqe_n - 1); + uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1); + struct mlx5_wqe_inl_small *inl; mpw->state = MLX5_MPW_INL_STATE_OPENED; mpw->pkts_n = 0; mpw->len = length; mpw->total_len = 0; - mpw->wqe = &(*txq->wqes)[idx]; - mpw->wqe->mpw_inl.ctrl.data[0] = htonl((MLX5_OPC_MOD_MPW << 24) | - (txq->wqe_ci << 8) | - MLX5_OPCODE_TSO); - mpw->wqe->mpw_inl.ctrl.data[2] = 0; - mpw->wqe->mpw_inl.ctrl.data[3] = 0; - mpw->wqe->mpw_inl.eseg.mss = htons(length); - mpw->wqe->mpw_inl.eseg.inline_hdr_sz = 0; - mpw->wqe->mpw_inl.eseg.cs_flags = 0; - mpw->wqe->mpw_inl.eseg.rsvd0 = 0; - mpw->wqe->mpw_inl.eseg.rsvd1 = 0; - mpw->wqe->mpw_inl.eseg.rsvd2 = 0; - mpw->data.raw = &mpw->wqe->mpw_inl.data[0]; + mpw->wqe = (volatile struct mlx5_wqe *)&(*txq->wqes)[idx].hdr; + mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_TSO); + mpw->wqe->ctrl[2] = 0; + mpw->wqe->ctrl[3] = 0; + mpw->wqe->eseg.mss = htons(length); + mpw->wqe->eseg.inline_hdr_sz = 0; + mpw->wqe->eseg.cs_flags = 0; + mpw->wqe->eseg.rsvd0 = 0; + mpw->wqe->eseg.rsvd1 = 0; + mpw->wqe->eseg.rsvd2 = 0; + inl = (struct mlx5_wqe_inl_small *) + (((uintptr_t)mpw->wqe) + 2 * MLX5_WQE_DWORD_SIZE); + mpw->data.raw = (uint8_t *)&inl->raw; } /** @@ -844,17 +856,18 @@ static inline void mlx5_mpw_inline_close(struct txq *txq, struct mlx5_mpw *mpw) { unsigned int size; + struct mlx5_wqe_inl_small *inl = (struct mlx5_wqe_inl_small *) + (((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE)); - size = sizeof(*mpw->wqe) - MLX5_MWQE64_INL_DATA + mpw->total_len; + size = MLX5_WQE_SIZE - MLX5_MWQE64_INL_DATA + mpw->total_len; /* * Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->mpw_inl.ctrl.data[1] = - htonl(txq->qp_num_8s | ((size + 15) / 16)); + mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(size)); mpw->state = MLX5_MPW_STATE_CLOSED; - mpw->wqe->mpw_inl.byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG); - txq->wqe_ci += (size + (sizeof(*mpw->wqe) - 1)) / sizeof(*mpw->wqe); + inl->byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG); + txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE; } /** @@ -876,7 +889,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, { struct txq *txq = (struct txq *)dpdk_txq; uint16_t elts_head = txq->elts_head; - const unsigned int elts_n = txq->elts_n; + const unsigned int elts_n = 1 << txq->elts_n; unsigned int i = 0; unsigned int j = 0; unsigned int max; @@ -927,13 +940,13 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, if (mpw.state == MLX5_MPW_STATE_OPENED) { if ((mpw.len != length) || (segs_n != 1) || - (mpw.wqe->mpw.eseg.cs_flags != cs_flags)) + (mpw.wqe->eseg.cs_flags != cs_flags)) mlx5_mpw_close(txq, &mpw); } else if (mpw.state == MLX5_MPW_INL_STATE_OPENED) { if ((mpw.len != length) || (segs_n != 1) || (length > inline_room) || - (mpw.wqe->mpw_inl.eseg.cs_flags != cs_flags)) { + (mpw.wqe->eseg.cs_flags != cs_flags)) { mlx5_mpw_inline_close(txq, &mpw); inline_room = txq->max_inline * RTE_CACHE_LINE_SIZE; @@ -943,10 +956,10 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, if ((segs_n != 1) || (length > inline_room)) { mlx5_mpw_new(txq, &mpw, length); - mpw.wqe->mpw.eseg.cs_flags = cs_flags; + mpw.wqe->eseg.cs_flags = cs_flags; } else { mlx5_mpw_inline_new(txq, &mpw, length); - mpw.wqe->mpw_inl.eseg.cs_flags = cs_flags; + mpw.wqe->eseg.cs_flags = cs_flags; } } /* Multi-segment packets must be alone in their MPW. */ @@ -992,7 +1005,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, addr = rte_pktmbuf_mtod(buf, uintptr_t); (*txq->elts)[elts_head] = buf; /* Maximum number of bytes before wrapping. */ - max = ((uintptr_t)&(*txq->wqes)[txq->wqe_n] - + max = ((uintptr_t)&(*txq->wqes)[1 << txq->wqe_n] - (uintptr_t)mpw.data.raw); if (length > max) { rte_memcpy((void *)(uintptr_t)mpw.data.raw, @@ -1011,7 +1024,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, mpw.data.raw += length; } if ((uintptr_t)mpw.data.raw == - (uintptr_t)&(*txq->wqes)[txq->wqe_n]) + (uintptr_t)&(*txq->wqes)[1 << txq->wqe_n]) mpw.data.raw = (volatile void *)&(*txq->wqes)[0]; ++mpw.pkts_n; @@ -1039,12 +1052,12 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, /* "j" includes both packets and segments. */ comp = txq->elts_comp + j; if (comp >= MLX5_TX_COMP_THRESH) { - volatile union mlx5_wqe *wqe = mpw.wqe; + volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->mpw_inl.ctrl.data[2] = htonl(8); + wqe->ctrl[2] = htonl(8); /* Save elts_head in unused "immediate" field of WQE. */ - wqe->mpw_inl.ctrl.data[3] = elts_head; + wqe->ctrl[3] = elts_head; txq->elts_comp = 0; } else { txq->elts_comp = comp; @@ -1075,13 +1088,12 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, * Packet type for struct rte_mbuf. */ static inline uint32_t -rxq_cq_to_pkt_type(volatile struct mlx5_cqe64 *cqe) +rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe) { uint32_t pkt_type; uint8_t flags = cqe->l4_hdr_type_etc; - uint8_t info = cqe->rsvd0[0]; - if (info & MLX5_CQE_RX_TUNNEL_PACKET) + if (cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) pkt_type = TRANSPOSE(flags, MLX5_CQE_RX_OUTER_IPV4_PACKET, @@ -1115,14 +1127,16 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe64 *cqe) * Pointer to RX queue. * @param cqe * CQE to process. + * @param[out] rss_hash + * Packet RSS Hash result. * * @return * Packet size in bytes (0 if there is none), -1 in case of completion * with error. */ static inline int -mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe, - uint16_t cqe_cnt) +mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, + uint16_t cqe_cnt, uint32_t *rss_hash) { struct rxq_zip *zip = &rxq->zip; uint16_t cqe_n = cqe_cnt + 1; @@ -1132,9 +1146,10 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe, if (zip->ai) { volatile struct mlx5_mini_cqe8 (*mc)[8] = (volatile struct mlx5_mini_cqe8 (*)[8]) - (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].cqe64); + (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt]); len = ntohl((*mc)[zip->ai & 7].byte_cnt); + *rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result); if ((++zip->ai & 7) == 0) { /* * Increment consumer index to skip the number of @@ -1149,7 +1164,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe, uint16_t end = zip->cq_ci; while (idx != end) { - (*rxq->cqes)[idx & cqe_cnt].cqe64.op_own = + (*rxq->cqes)[idx & cqe_cnt].op_own = MLX5_CQE_INVALIDATE; ++idx; } @@ -1161,7 +1176,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe, int ret; int8_t op_own; - ret = check_cqe64(cqe, cqe_n, rxq->cq_ci); + ret = check_cqe(cqe, cqe_n, rxq->cq_ci); if (unlikely(ret == 1)) return 0; ++rxq->cq_ci; @@ -1170,7 +1185,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe, volatile struct mlx5_mini_cqe8 (*mc)[8] = (volatile struct mlx5_mini_cqe8 (*)[8]) (uintptr_t)(&(*rxq->cqes)[rxq->cq_ci & - cqe_cnt].cqe64); + cqe_cnt]); /* Fix endianness. */ zip->cqe_cnt = ntohl(cqe->byte_cnt); @@ -1189,9 +1204,11 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe, zip->cq_ci = rxq->cq_ci + zip->cqe_cnt; /* Get packet size to return. */ len = ntohl((*mc)[0].byte_cnt); + *rss_hash = ntohl((*mc)[0].rx_hash_result); zip->ai = 1; } else { len = ntohl(cqe->byte_cnt); + *rss_hash = ntohl(cqe->rx_hash_res); } /* Error while receiving packet. */ if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR)) @@ -1212,38 +1229,32 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe, * Offload flags (ol_flags) for struct rte_mbuf. */ static inline uint32_t -rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe) +rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe) { uint32_t ol_flags = 0; uint8_t l3_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L3_HDR_TYPE_MASK; uint8_t l4_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L4_HDR_TYPE_MASK; - uint8_t info = cqe->rsvd0[0]; if ((l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV4) || (l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV6)) - ol_flags |= - (!(cqe->hds_ip_ext & MLX5_CQE_L3_OK) * - PKT_RX_IP_CKSUM_BAD); + ol_flags |= TRANSPOSE(cqe->hds_ip_ext, + MLX5_CQE_L3_OK, + PKT_RX_IP_CKSUM_GOOD); if ((l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP) || (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_EMP_ACK) || (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_ACK) || (l4_hdr == MLX5_CQE_L4_HDR_TYPE_UDP)) + ol_flags |= TRANSPOSE(cqe->hds_ip_ext, + MLX5_CQE_L4_OK, + PKT_RX_L4_CKSUM_GOOD); + if ((cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun)) ol_flags |= - (!(cqe->hds_ip_ext & MLX5_CQE_L4_OK) * - PKT_RX_L4_CKSUM_BAD); - /* - * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place - * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional - * (its value is 0). - */ - if ((info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun)) - ol_flags |= - TRANSPOSE(~cqe->l4_hdr_type_etc, + TRANSPOSE(cqe->l4_hdr_type_etc, MLX5_CQE_RX_OUTER_IP_CSUM_OK, - PKT_RX_IP_CKSUM_BAD) | - TRANSPOSE(~cqe->l4_hdr_type_etc, + PKT_RX_IP_CKSUM_GOOD) | + TRANSPOSE(cqe->l4_hdr_type_etc, MLX5_CQE_RX_OUTER_TCP_UDP_CSUM_OK, - PKT_RX_L4_CKSUM_BAD); + PKT_RX_L4_CKSUM_GOOD); return ol_flags; } @@ -1264,21 +1275,22 @@ uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) { struct rxq *rxq = dpdk_rxq; - const unsigned int wqe_cnt = rxq->elts_n - 1; - const unsigned int cqe_cnt = rxq->cqe_n - 1; + const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1; + const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1; const unsigned int sges_n = rxq->sges_n; struct rte_mbuf *pkt = NULL; struct rte_mbuf *seg = NULL; - volatile struct mlx5_cqe64 *cqe = - &(*rxq->cqes)[rxq->cq_ci & cqe_cnt].cqe64; + volatile struct mlx5_cqe *cqe = + &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; unsigned int i = 0; unsigned int rq_ci = rxq->rq_ci << sges_n; - int len; + int len; /* keep its value across iterations. */ while (pkts_n) { unsigned int idx = rq_ci & wqe_cnt; volatile struct mlx5_wqe_data_seg *wqe = &(*rxq->wqes)[idx]; struct rte_mbuf *rep = (*rxq->elts)[idx]; + uint32_t rss_hash_res = 0; if (pkt) NEXT(seg) = rep; @@ -1306,9 +1318,10 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) break; } if (!pkt) { - cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt].cqe64; - len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt); - if (len == 0) { + cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; + len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, + &rss_hash_res); + if (!len) { rte_mbuf_refcnt_set(rep, 0); __rte_mbuf_raw_free(rep); break; @@ -1325,12 +1338,16 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) /* Update packet information. */ pkt->packet_type = 0; pkt->ol_flags = 0; + if (rxq->rss_hash) { + pkt->hash.rss = rss_hash_res; + pkt->ol_flags = PKT_RX_RSS_HASH; + } if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip | rxq->crc_present) { if (rxq->csum) { pkt->packet_type = rxq_cq_to_pkt_type(cqe); - pkt->ol_flags = + pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe); } if (cqe->l4_hdr_type_etc & diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 05779ef5..5708c2a7 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -54,6 +54,7 @@ #endif #include <rte_mbuf.h> #include <rte_mempool.h> +#include <rte_common.h> #ifdef PEDANTIC #pragma GCC diagnostic error "-Wpedantic" #endif @@ -109,16 +110,18 @@ struct rxq { unsigned int vlan_strip:1; /* Enable VLAN stripping. */ unsigned int crc_present:1; /* CRC must be subtracted. */ unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */ + unsigned int cqe_n:4; /* Log 2 of CQ elements. */ + unsigned int elts_n:4; /* Log 2 of Mbufs. */ + unsigned int port_id:8; + unsigned int rss_hash:1; /* RSS hash result is enabled. */ + unsigned int :9; /* Remaining bits. */ + volatile uint32_t *rq_db; + volatile uint32_t *cq_db; uint16_t rq_ci; uint16_t cq_ci; - uint16_t elts_n; - uint16_t cqe_n; /* Number of CQ elements. */ - uint16_t port_id; volatile struct mlx5_wqe_data_seg(*wqes)[]; volatile struct mlx5_cqe(*cqes)[]; struct rxq_zip zip; /* Compressed context. */ - volatile uint32_t *rq_db; - volatile uint32_t *cq_db; struct rte_mbuf *(*elts)[]; struct rte_mempool *mp; struct mlx5_rxq_stats stats; @@ -238,21 +241,22 @@ struct hash_rxq { }; /* TX queue descriptor. */ +RTE_STD_C11 struct txq { uint16_t elts_head; /* Current index in (*elts)[]. */ uint16_t elts_tail; /* First element awaiting completion. */ uint16_t elts_comp; /* Counter since last completion request. */ - uint16_t elts_n; /* (*elts)[] length. */ uint16_t cq_ci; /* Consumer index for completion queue. */ - uint16_t cqe_n; /* Number of CQ elements. */ uint16_t wqe_ci; /* Consumer index for work queue. */ - uint16_t wqe_n; /* Number of WQ elements. */ + uint16_t elts_n:4; /* (*elts)[] length (in log2). */ + uint16_t cqe_n:4; /* Number of CQ elements (in log2). */ + uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */ + uint16_t bf_buf_size:4; /* Log2 Blueflame size. */ uint16_t bf_offset; /* Blueflame offset. */ - uint16_t bf_buf_size; /* Blueflame size. */ uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */ uint32_t qp_num_8s; /* QP number shifted by 8. */ volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */ - volatile union mlx5_wqe (*wqes)[]; /* Work queue. */ + volatile struct mlx5_wqe64 (*wqes)[]; /* Work queue. */ volatile uint32_t *qp_db; /* Work queue doorbell. */ volatile uint32_t *cq_db; /* Completion queue doorbell. */ volatile void *bf_reg; /* Blueflame register. */ diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index e4510efe..053665d5 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -81,8 +81,8 @@ txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n) for (i = 0; (i != elts_n); ++i) (*txq_ctrl->txq.elts)[i] = NULL; - for (i = 0; (i != txq_ctrl->txq.wqe_n); ++i) { - volatile union mlx5_wqe *wqe = &(*txq_ctrl->txq.wqes)[i]; + for (i = 0; (i != (1u << txq_ctrl->txq.wqe_n)); ++i) { + volatile struct mlx5_wqe64 *wqe = &(*txq_ctrl->txq.wqes)[i]; memset((void *)(uintptr_t)wqe, 0x0, sizeof(*wqe)); } @@ -101,7 +101,7 @@ txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n) static void txq_free_elts(struct txq_ctrl *txq_ctrl) { - unsigned int elts_n = txq_ctrl->txq.elts_n; + unsigned int elts_n = 1 << txq_ctrl->txq.elts_n; unsigned int elts_head = txq_ctrl->txq.elts_head; unsigned int elts_tail = txq_ctrl->txq.elts_tail; struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts; @@ -212,22 +212,22 @@ txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl) "it should be set to %u", RTE_CACHE_LINE_SIZE); return EINVAL; } - tmpl->txq.cqe_n = ibcq->cqe + 1; + tmpl->txq.cqe_n = log2above(ibcq->cqe); tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8; tmpl->txq.wqes = - (volatile union mlx5_wqe (*)[]) + (volatile struct mlx5_wqe64 (*)[]) (uintptr_t)qp->gen_data.sqstart; - tmpl->txq.wqe_n = qp->sq.wqe_cnt; + tmpl->txq.wqe_n = log2above(qp->sq.wqe_cnt); tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR]; tmpl->txq.bf_reg = qp->gen_data.bf->reg; tmpl->txq.bf_offset = qp->gen_data.bf->offset; - tmpl->txq.bf_buf_size = qp->gen_data.bf->buf_size; + tmpl->txq.bf_buf_size = log2above(qp->gen_data.bf->buf_size); tmpl->txq.cq_db = cq->dbrec; tmpl->txq.cqes = (volatile struct mlx5_cqe (*)[]) (uintptr_t)cq->active_buf->buf; tmpl->txq.elts = - (struct rte_mbuf *(*)[tmpl->txq.elts_n]) + (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n]) ((uintptr_t)txq_ctrl + sizeof(*txq_ctrl)); return 0; } @@ -277,7 +277,7 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, } (void)conf; /* Thresholds configuration (ignored). */ assert(desc > MLX5_TX_COMP_THRESH); - tmpl.txq.elts_n = desc; + tmpl.txq.elts_n = log2above(desc); /* MRs will be registered in mp2mr[] later. */ attr.rd = (struct ibv_exp_res_domain_init_attr){ .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL | |