diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2018-11-01 11:59:50 +0000 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2018-11-01 12:00:19 +0000 |
commit | 8d01b9cd70a67cdafd5b965a70420c3bd7fb3f82 (patch) | |
tree | 208e3bc33c220854d89d010e3abf720a2e62e546 /drivers/net/qede/qede_rxtx.c | |
parent | b63264c8342e6a1b6971c79550d2af2024b6a4de (diff) |
New upstream version 18.11-rc1upstream/18.11-rc1
Change-Id: Iaa71986dd6332e878d8f4bf493101b2bbc6313bb
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/qede/qede_rxtx.c')
-rw-r--r-- | drivers/net/qede/qede_rxtx.c | 140 |
1 files changed, 133 insertions, 7 deletions
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c index 0f157ded..8a4772f4 100644 --- a/drivers/net/qede/qede_rxtx.c +++ b/drivers/net/qede/qede_rxtx.c @@ -35,6 +35,49 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq) return 0; } +/* Criterias for calculating Rx buffer size - + * 1) rx_buf_size should not exceed the size of mbuf + * 2) In scattered_rx mode - minimum rx_buf_size should be + * (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT + * 3) In regular mode - minimum rx_buf_size should be + * (MTU + Maximum L2 Header Size + 2) + * In above cases +2 corrosponds to 2 bytes padding in front of L2 + * header. + * 4) rx_buf_size should be cacheline-size aligned. So considering + * criteria 1, we need to adjust the size to floor instead of ceil, + * so that we don't exceed mbuf size while ceiling rx_buf_size. + */ +int +qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz, + uint16_t max_frame_size) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + int rx_buf_size; + + if (dev->data->scattered_rx) { + /* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of + * bufferes can be used for single packet. So need to make sure + * mbuf size is sufficient enough for this. + */ + if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) < + (max_frame_size + QEDE_ETH_OVERHEAD)) { + DP_ERR(edev, "mbuf %d size is not enough to hold max fragments (%d) for max rx packet length (%d)\n", + mbufsz, ETH_RX_MAX_BUFF_PER_PKT, max_frame_size); + return -EINVAL; + } + + rx_buf_size = RTE_MAX(mbufsz, + (max_frame_size + QEDE_ETH_OVERHEAD) / + ETH_RX_MAX_BUFF_PER_PKT); + } else { + rx_buf_size = max_frame_size + QEDE_ETH_OVERHEAD; + } + + /* Align to cache-line size if needed */ + return QEDE_FLOOR_TO_CACHE_LINE_SIZE(rx_buf_size); +} + int qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, @@ -85,6 +128,8 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, /* Fix up RX buffer size */ bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; + /* cache align the mbuf size to simplfy rx_buf_size calculation */ + bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz); if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) || (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) { if (!dev->data->scattered_rx) { @@ -93,13 +138,13 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, } } - if (dev->data->scattered_rx) - rxq->rx_buf_size = bufsz + ETHER_HDR_LEN + - ETHER_CRC_LEN + QEDE_ETH_OVERHEAD; - else - rxq->rx_buf_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD; - /* Align to cache-line size if needed */ - rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size); + rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pkt_len); + if (rc < 0) { + rte_free(rxq); + return rc; + } + + rxq->rx_buf_size = rc; DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n", qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx); @@ -2106,3 +2151,84 @@ qede_rxtx_pkts_dummy(__rte_unused void *p_rxq, { return 0; } + + +/* this function does a fake walk through over completion queue + * to calculate number of BDs used by HW. + * At the end, it restores the state of completion queue. + */ +static uint16_t +qede_parse_fp_cqe(struct qede_rx_queue *rxq) +{ + uint16_t hw_comp_cons, sw_comp_cons, bd_count = 0; + union eth_rx_cqe *cqe, *orig_cqe = NULL; + + hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr); + sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); + + if (hw_comp_cons == sw_comp_cons) + return 0; + + /* Get the CQE from the completion ring */ + cqe = (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring); + orig_cqe = cqe; + + while (sw_comp_cons != hw_comp_cons) { + switch (cqe->fast_path_regular.type) { + case ETH_RX_CQE_TYPE_REGULAR: + bd_count += cqe->fast_path_regular.bd_num; + break; + case ETH_RX_CQE_TYPE_TPA_END: + bd_count += cqe->fast_path_tpa_end.num_of_bds; + break; + default: + break; + } + + cqe = + (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring); + sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); + } + + /* revert comp_ring to original state */ + ecore_chain_set_cons(&rxq->rx_comp_ring, sw_comp_cons, orig_cqe); + + return bd_count; +} + +int +qede_rx_descriptor_status(void *p_rxq, uint16_t offset) +{ + uint16_t hw_bd_cons, sw_bd_cons, sw_bd_prod; + uint16_t produced, consumed; + struct qede_rx_queue *rxq = p_rxq; + + if (offset > rxq->nb_rx_desc) + return -EINVAL; + + sw_bd_cons = ecore_chain_get_cons_idx(&rxq->rx_bd_ring); + sw_bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); + + /* find BDs used by HW from completion queue elements */ + hw_bd_cons = sw_bd_cons + qede_parse_fp_cqe(rxq); + + if (hw_bd_cons < sw_bd_cons) + /* wraparound case */ + consumed = (0xffff - sw_bd_cons) + hw_bd_cons; + else + consumed = hw_bd_cons - sw_bd_cons; + + if (offset <= consumed) + return RTE_ETH_RX_DESC_DONE; + + if (sw_bd_prod < sw_bd_cons) + /* wraparound case */ + produced = (0xffff - sw_bd_cons) + sw_bd_prod; + else + produced = sw_bd_prod - sw_bd_cons; + + if (offset <= produced) + return RTE_ETH_RX_DESC_AVAIL; + + return RTE_ETH_RX_DESC_UNAVAIL; +} |