From 7f83738b46e6e0dd17c7a23392ceaaef686ac08a Mon Sep 17 00:00:00 2001 From: Chenmin Sun Date: Sat, 28 Mar 2020 00:34:19 +0800 Subject: dpdk: DPDK 20.05 iavf flow director backporting to DPDK 20.02 0001 ~ 0014 patches are for virtual channel and PMD 0015 is the iavf fdir framework 0016 ~ 0017 are for the iavf fidr driver Type: feature Signed-off-by: Chenmin Sun Change-Id: I38e69ca0065a71cc6ba0b44ef7c7db51193a0899 --- ...exible-Rx-descriptor-support-in-normal-pa.patch | 729 +++++++++++++++++++++ 1 file changed, 729 insertions(+) create mode 100644 build/external/patches/dpdk_20.02/0009-net-iavf-flexible-Rx-descriptor-support-in-normal-pa.patch (limited to 'build/external/patches/dpdk_20.02/0009-net-iavf-flexible-Rx-descriptor-support-in-normal-pa.patch') diff --git a/build/external/patches/dpdk_20.02/0009-net-iavf-flexible-Rx-descriptor-support-in-normal-pa.patch b/build/external/patches/dpdk_20.02/0009-net-iavf-flexible-Rx-descriptor-support-in-normal-pa.patch new file mode 100644 index 00000000000..130262c2408 --- /dev/null +++ b/build/external/patches/dpdk_20.02/0009-net-iavf-flexible-Rx-descriptor-support-in-normal-pa.patch @@ -0,0 +1,729 @@ +From 3d10b7f1332d3f1326c182d3b7fa13669a528592 Mon Sep 17 00:00:00 2001 +From: Leyi Rong +Date: Wed, 8 Apr 2020 14:22:02 +0800 +Subject: [DPDK 09/17] net/iavf: flexible Rx descriptor support in normal path + +Support flexible Rx descriptor format in normal +path of iAVF PMD. + +Signed-off-by: Leyi Rong +--- + drivers/net/iavf/iavf.h | 2 + + drivers/net/iavf/iavf_ethdev.c | 8 + + drivers/net/iavf/iavf_rxtx.c | 479 ++++++++++++++++++++++++++++++--- + drivers/net/iavf/iavf_rxtx.h | 8 + + drivers/net/iavf/iavf_vchnl.c | 42 ++- + 5 files changed, 501 insertions(+), 38 deletions(-) + +diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h +index 526040c6e..67d625053 100644 +--- a/drivers/net/iavf/iavf.h ++++ b/drivers/net/iavf/iavf.h +@@ -97,6 +97,7 @@ struct iavf_info { + struct virtchnl_version_info virtchnl_version; + struct virtchnl_vf_resource *vf_res; /* VF resource */ + struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */ ++ uint64_t supported_rxdid; + + volatile enum virtchnl_ops pend_cmd; /* pending command not finished */ + uint32_t cmd_retval; /* return value of the cmd response from PF */ +@@ -225,6 +226,7 @@ int iavf_disable_queues(struct iavf_adapter *adapter); + int iavf_configure_rss_lut(struct iavf_adapter *adapter); + int iavf_configure_rss_key(struct iavf_adapter *adapter); + int iavf_configure_queues(struct iavf_adapter *adapter); ++int iavf_get_supported_rxdid(struct iavf_adapter *adapter); + int iavf_config_irq_map(struct iavf_adapter *adapter); + void iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add); + int iavf_dev_link_update(struct rte_eth_dev *dev, +diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c +index ee9f82249..d3a121eac 100644 +--- a/drivers/net/iavf/iavf_ethdev.c ++++ b/drivers/net/iavf/iavf_ethdev.c +@@ -1236,6 +1236,14 @@ iavf_init_vf(struct rte_eth_dev *dev) + goto err_rss; + } + } ++ ++ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) { ++ if (iavf_get_supported_rxdid(adapter) != 0) { ++ PMD_INIT_LOG(ERR, "failed to do get supported rxdid"); ++ goto err_rss; ++ } ++ } ++ + return 0; + err_rss: + rte_free(vf->rss_key); +diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c +index 9eccb7c41..67297dcb7 100644 +--- a/drivers/net/iavf/iavf_rxtx.c ++++ b/drivers/net/iavf/iavf_rxtx.c +@@ -346,6 +346,14 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + return -ENOMEM; + } + ++ if (vf->vf_res->vf_cap_flags & ++ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC && ++ vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) { ++ rxq->rxdid = IAVF_RXDID_COMMS_OVS_1; ++ } else { ++ rxq->rxdid = IAVF_RXDID_LEGACY_1; ++ } ++ + rxq->mp = mp; + rxq->nb_rx_desc = nb_desc; + rxq->rx_free_thresh = rx_free_thresh; +@@ -720,6 +728,20 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp) + } + } + ++static inline void ++iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb, ++ volatile union iavf_rx_flex_desc *rxdp) ++{ ++ if (rte_le_to_cpu_64(rxdp->wb.status_error0) & ++ (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) { ++ mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; ++ mb->vlan_tci = ++ rte_le_to_cpu_16(rxdp->wb.l2tag1); ++ } else { ++ mb->vlan_tci = 0; ++ } ++} ++ + /* Translate the rx descriptor status and error fields to pkt flags */ + static inline uint64_t + iavf_rxd_to_pkt_flags(uint64_t qword) +@@ -754,6 +776,87 @@ iavf_rxd_to_pkt_flags(uint64_t qword) + return flags; + } + ++/* Translate the rx flex descriptor status to pkt flags */ ++static inline void ++iavf_rxd_to_pkt_fields(struct rte_mbuf *mb, ++ volatile union iavf_rx_flex_desc *rxdp) ++{ ++ volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc = ++ (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp; ++ uint16_t stat_err; ++ ++#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC ++ stat_err = rte_le_to_cpu_16(desc->status_error0); ++ if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { ++ mb->ol_flags |= PKT_RX_RSS_HASH; ++ mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); ++ } ++#endif ++} ++ ++#define IAVF_RX_FLEX_ERR0_BITS \ ++ ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \ ++ (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \ ++ (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \ ++ (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \ ++ (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \ ++ (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S)) ++ ++/* Rx L3/L4 checksum */ ++static inline uint64_t ++iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0) ++{ ++ uint64_t flags = 0; ++ ++ /* check if HW has decoded the packet and checksum */ ++ if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S)))) ++ return 0; ++ ++ if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) { ++ flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD); ++ return flags; ++ } ++ ++ if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S))) ++ flags |= PKT_RX_IP_CKSUM_BAD; ++ else ++ flags |= PKT_RX_IP_CKSUM_GOOD; ++ ++ if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))) ++ flags |= PKT_RX_L4_CKSUM_BAD; ++ else ++ flags |= PKT_RX_L4_CKSUM_GOOD; ++ ++ if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))) ++ flags |= PKT_RX_EIP_CKSUM_BAD; ++ ++ return flags; ++} ++ ++/* If the number of free RX descriptors is greater than the RX free ++ * threshold of the queue, advance the Receive Descriptor Tail (RDT) ++ * register. Update the RDT with the value of the last processed RX ++ * descriptor minus 1, to guarantee that the RDT register is never ++ * equal to the RDH register, which creates a "full" ring situtation ++ * from the hardware point of view. ++ */ ++static inline void ++iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id) ++{ ++ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); ++ ++ if (nb_hold > rxq->rx_free_thresh) { ++ PMD_RX_LOG(DEBUG, ++ "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u", ++ rxq->port_id, rxq->queue_id, rx_id, nb_hold); ++ rx_id = (uint16_t)((rx_id == 0) ? ++ (rxq->nb_rx_desc - 1) : (rx_id - 1)); ++ IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id); ++ nb_hold = 0; ++ } ++ rxq->nb_rx_hold = nb_hold; ++} ++ + /* implement recv_pkts */ + uint16_t + iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +@@ -854,23 +957,256 @@ iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + } + rxq->rx_tail = rx_id; + +- /* If the number of free RX descriptors is greater than the RX free +- * threshold of the queue, advance the receive tail register of queue. +- * Update that register with the value of the last processed RX +- * descriptor minus 1. +- */ +- nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); +- if (nb_hold > rxq->rx_free_thresh) { +- PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " +- "nb_hold=%u nb_rx=%u", +- rxq->port_id, rxq->queue_id, +- rx_id, nb_hold, nb_rx); +- rx_id = (uint16_t)((rx_id == 0) ? +- (rxq->nb_rx_desc - 1) : (rx_id - 1)); +- IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id); +- nb_hold = 0; ++ iavf_update_rx_tail(rxq, nb_hold, rx_id); ++ ++ return nb_rx; ++} ++ ++/* implement recv_pkts for flexible Rx descriptor */ ++uint16_t ++iavf_recv_pkts_flex_rxd(void *rx_queue, ++ struct rte_mbuf **rx_pkts, uint16_t nb_pkts) ++{ ++ volatile union iavf_rx_desc *rx_ring; ++ volatile union iavf_rx_flex_desc *rxdp; ++ struct iavf_rx_queue *rxq; ++ union iavf_rx_flex_desc rxd; ++ struct rte_mbuf *rxe; ++ struct rte_eth_dev *dev; ++ struct rte_mbuf *rxm; ++ struct rte_mbuf *nmb; ++ uint16_t nb_rx; ++ uint16_t rx_stat_err0; ++ uint16_t rx_packet_len; ++ uint16_t rx_id, nb_hold; ++ uint64_t dma_addr; ++ uint64_t pkt_flags; ++ const uint32_t *ptype_tbl; ++ ++ nb_rx = 0; ++ nb_hold = 0; ++ rxq = rx_queue; ++ rx_id = rxq->rx_tail; ++ rx_ring = rxq->rx_ring; ++ ptype_tbl = rxq->vsi->adapter->ptype_tbl; ++ ++ while (nb_rx < nb_pkts) { ++ rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id]; ++ rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); ++ ++ /* Check the DD bit first */ ++ if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S))) ++ break; ++ IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id); ++ ++ nmb = rte_mbuf_raw_alloc(rxq->mp); ++ if (unlikely(!nmb)) { ++ dev = &rte_eth_devices[rxq->port_id]; ++ dev->data->rx_mbuf_alloc_failed++; ++ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " ++ "queue_id=%u", rxq->port_id, rxq->queue_id); ++ break; ++ } ++ ++ rxd = *rxdp; ++ nb_hold++; ++ rxe = rxq->sw_ring[rx_id]; ++ rx_id++; ++ if (unlikely(rx_id == rxq->nb_rx_desc)) ++ rx_id = 0; ++ ++ /* Prefetch next mbuf */ ++ rte_prefetch0(rxq->sw_ring[rx_id]); ++ ++ /* When next RX descriptor is on a cache line boundary, ++ * prefetch the next 4 RX descriptors and next 8 pointers ++ * to mbufs. ++ */ ++ if ((rx_id & 0x3) == 0) { ++ rte_prefetch0(&rx_ring[rx_id]); ++ rte_prefetch0(rxq->sw_ring[rx_id]); ++ } ++ rxm = rxe; ++ rxe = nmb; ++ dma_addr = ++ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); ++ rxdp->read.hdr_addr = 0; ++ rxdp->read.pkt_addr = dma_addr; ++ ++ rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) & ++ IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; ++ ++ rxm->data_off = RTE_PKTMBUF_HEADROOM; ++ rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM)); ++ rxm->nb_segs = 1; ++ rxm->next = NULL; ++ rxm->pkt_len = rx_packet_len; ++ rxm->data_len = rx_packet_len; ++ rxm->port = rxq->port_id; ++ rxm->ol_flags = 0; ++ rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M & ++ rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; ++ iavf_flex_rxd_to_vlan_tci(rxm, &rxd); ++ iavf_rxd_to_pkt_fields(rxm, &rxd); ++ pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0); ++ rxm->ol_flags |= pkt_flags; ++ ++ rx_pkts[nb_rx++] = rxm; + } +- rxq->nb_rx_hold = nb_hold; ++ rxq->rx_tail = rx_id; ++ ++ iavf_update_rx_tail(rxq, nb_hold, rx_id); ++ ++ return nb_rx; ++} ++ ++/* implement recv_scattered_pkts for flexible Rx descriptor */ ++uint16_t ++iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts, ++ uint16_t nb_pkts) ++{ ++ struct iavf_rx_queue *rxq = rx_queue; ++ union iavf_rx_flex_desc rxd; ++ struct rte_mbuf *rxe; ++ struct rte_mbuf *first_seg = rxq->pkt_first_seg; ++ struct rte_mbuf *last_seg = rxq->pkt_last_seg; ++ struct rte_mbuf *nmb, *rxm; ++ uint16_t rx_id = rxq->rx_tail; ++ uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len; ++ struct rte_eth_dev *dev; ++ uint16_t rx_stat_err0; ++ uint64_t dma_addr; ++ uint64_t pkt_flags; ++ ++ volatile union iavf_rx_desc *rx_ring = rxq->rx_ring; ++ volatile union iavf_rx_flex_desc *rxdp; ++ const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; ++ ++ while (nb_rx < nb_pkts) { ++ rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id]; ++ rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); ++ ++ /* Check the DD bit */ ++ if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S))) ++ break; ++ IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id); ++ ++ nmb = rte_mbuf_raw_alloc(rxq->mp); ++ if (unlikely(!nmb)) { ++ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " ++ "queue_id=%u", rxq->port_id, rxq->queue_id); ++ dev = &rte_eth_devices[rxq->port_id]; ++ dev->data->rx_mbuf_alloc_failed++; ++ break; ++ } ++ ++ rxd = *rxdp; ++ nb_hold++; ++ rxe = rxq->sw_ring[rx_id]; ++ rx_id++; ++ if (rx_id == rxq->nb_rx_desc) ++ rx_id = 0; ++ ++ /* Prefetch next mbuf */ ++ rte_prefetch0(rxq->sw_ring[rx_id]); ++ ++ /* When next RX descriptor is on a cache line boundary, ++ * prefetch the next 4 RX descriptors and next 8 pointers ++ * to mbufs. ++ */ ++ if ((rx_id & 0x3) == 0) { ++ rte_prefetch0(&rx_ring[rx_id]); ++ rte_prefetch0(rxq->sw_ring[rx_id]); ++ } ++ ++ rxm = rxe; ++ rxe = nmb; ++ dma_addr = ++ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); ++ ++ /* Set data buffer address and data length of the mbuf */ ++ rxdp->read.hdr_addr = 0; ++ rxdp->read.pkt_addr = dma_addr; ++ rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) & ++ IAVF_RX_FLX_DESC_PKT_LEN_M; ++ rxm->data_len = rx_packet_len; ++ rxm->data_off = RTE_PKTMBUF_HEADROOM; ++ ++ /* If this is the first buffer of the received packet, set the ++ * pointer to the first mbuf of the packet and initialize its ++ * context. Otherwise, update the total length and the number ++ * of segments of the current scattered packet, and update the ++ * pointer to the last mbuf of the current packet. ++ */ ++ if (!first_seg) { ++ first_seg = rxm; ++ first_seg->nb_segs = 1; ++ first_seg->pkt_len = rx_packet_len; ++ } else { ++ first_seg->pkt_len = ++ (uint16_t)(first_seg->pkt_len + ++ rx_packet_len); ++ first_seg->nb_segs++; ++ last_seg->next = rxm; ++ } ++ ++ /* If this is not the last buffer of the received packet, ++ * update the pointer to the last mbuf of the current scattered ++ * packet and continue to parse the RX ring. ++ */ ++ if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) { ++ last_seg = rxm; ++ continue; ++ } ++ ++ /* This is the last buffer of the received packet. If the CRC ++ * is not stripped by the hardware: ++ * - Subtract the CRC length from the total packet length. ++ * - If the last buffer only contains the whole CRC or a part ++ * of it, free the mbuf associated to the last buffer. If part ++ * of the CRC is also contained in the previous mbuf, subtract ++ * the length of that CRC part from the data length of the ++ * previous mbuf. ++ */ ++ rxm->next = NULL; ++ if (unlikely(rxq->crc_len > 0)) { ++ first_seg->pkt_len -= RTE_ETHER_CRC_LEN; ++ if (rx_packet_len <= RTE_ETHER_CRC_LEN) { ++ rte_pktmbuf_free_seg(rxm); ++ first_seg->nb_segs--; ++ last_seg->data_len = ++ (uint16_t)(last_seg->data_len - ++ (RTE_ETHER_CRC_LEN - rx_packet_len)); ++ last_seg->next = NULL; ++ } else { ++ rxm->data_len = (uint16_t)(rx_packet_len - ++ RTE_ETHER_CRC_LEN); ++ } ++ } ++ ++ first_seg->port = rxq->port_id; ++ first_seg->ol_flags = 0; ++ first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M & ++ rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; ++ iavf_flex_rxd_to_vlan_tci(first_seg, &rxd); ++ iavf_rxd_to_pkt_fields(first_seg, &rxd); ++ pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0); ++ ++ first_seg->ol_flags |= pkt_flags; ++ ++ /* Prefetch data of first segment, if configured to do so. */ ++ rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr, ++ first_seg->data_off)); ++ rx_pkts[nb_rx++] = first_seg; ++ first_seg = NULL; ++ } ++ ++ /* Record index of the next RX descriptor to probe. */ ++ rxq->rx_tail = rx_id; ++ rxq->pkt_first_seg = first_seg; ++ rxq->pkt_last_seg = last_seg; ++ ++ iavf_update_rx_tail(rxq, nb_hold, rx_id); + + return nb_rx; + } +@@ -1027,30 +1363,88 @@ iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + rxq->pkt_first_seg = first_seg; + rxq->pkt_last_seg = last_seg; + +- /* If the number of free RX descriptors is greater than the RX free +- * threshold of the queue, advance the Receive Descriptor Tail (RDT) +- * register. Update the RDT with the value of the last processed RX +- * descriptor minus 1, to guarantee that the RDT register is never +- * equal to the RDH register, which creates a "full" ring situtation +- * from the hardware point of view. ++ iavf_update_rx_tail(rxq, nb_hold, rx_id); ++ ++ return nb_rx; ++} ++ ++#define IAVF_LOOK_AHEAD 8 ++static inline int ++iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq) ++{ ++ volatile union iavf_rx_flex_desc *rxdp; ++ struct rte_mbuf **rxep; ++ struct rte_mbuf *mb; ++ uint16_t stat_err0; ++ uint16_t pkt_len; ++ int32_t s[IAVF_LOOK_AHEAD], nb_dd; ++ int32_t i, j, nb_rx = 0; ++ uint64_t pkt_flags; ++ const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; ++ ++ rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail]; ++ rxep = &rxq->sw_ring[rxq->rx_tail]; ++ ++ stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); ++ ++ /* Make sure there is at least 1 packet to receive */ ++ if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S))) ++ return 0; ++ ++ /* Scan LOOK_AHEAD descriptors at a time to determine which ++ * descriptors reference packets that are ready to be received. + */ +- nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); +- if (nb_hold > rxq->rx_free_thresh) { +- PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " +- "nb_hold=%u nb_rx=%u", +- rxq->port_id, rxq->queue_id, +- rx_id, nb_hold, nb_rx); +- rx_id = (uint16_t)(rx_id == 0 ? +- (rxq->nb_rx_desc - 1) : (rx_id - 1)); +- IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id); +- nb_hold = 0; ++ for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD, ++ rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) { ++ /* Read desc statuses backwards to avoid race condition */ ++ for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) ++ s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0); ++ ++ rte_smp_rmb(); ++ ++ /* Compute how many status bits were set */ ++ for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) ++ nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S); ++ ++ nb_rx += nb_dd; ++ ++ /* Translate descriptor info to mbuf parameters */ ++ for (j = 0; j < nb_dd; j++) { ++ IAVF_DUMP_RX_DESC(rxq, &rxdp[j], ++ rxq->rx_tail + ++ i * IAVF_LOOK_AHEAD + j); ++ ++ mb = rxep[j]; ++ pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) & ++ IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; ++ mb->data_len = pkt_len; ++ mb->pkt_len = pkt_len; ++ mb->ol_flags = 0; ++ ++ mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M & ++ rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)]; ++ iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]); ++ iavf_rxd_to_pkt_fields(mb, &rxdp[j]); ++ stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0); ++ pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0); ++ ++ mb->ol_flags |= pkt_flags; ++ } ++ ++ for (j = 0; j < IAVF_LOOK_AHEAD; j++) ++ rxq->rx_stage[i + j] = rxep[j]; ++ ++ if (nb_dd != IAVF_LOOK_AHEAD) ++ break; + } +- rxq->nb_rx_hold = nb_hold; ++ ++ /* Clear software ring entries */ ++ for (i = 0; i < nb_rx; i++) ++ rxq->sw_ring[rxq->rx_tail + i] = NULL; + + return nb_rx; + } + +-#define IAVF_LOOK_AHEAD 8 + static inline int + iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq) + { +@@ -1219,7 +1613,10 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + if (rxq->rx_nb_avail) + return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); + +- nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq); ++ if (rxq->rxdid == IAVF_RXDID_COMMS_OVS_1) ++ nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq); ++ else ++ nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq); + rxq->rx_next_avail = 0; + rxq->rx_nb_avail = nb_rx; + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx); +@@ -1663,6 +2060,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev) + { + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); ++ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + #ifdef RTE_ARCH_X86 + struct iavf_rx_queue *rxq; + int i; +@@ -1702,7 +2100,10 @@ iavf_set_rx_function(struct rte_eth_dev *dev) + if (dev->data->scattered_rx) { + PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).", + dev->data->port_id); +- dev->rx_pkt_burst = iavf_recv_scattered_pkts; ++ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) ++ dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd; ++ else ++ dev->rx_pkt_burst = iavf_recv_scattered_pkts; + } else if (adapter->rx_bulk_alloc_allowed) { + PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).", + dev->data->port_id); +@@ -1710,7 +2111,10 @@ iavf_set_rx_function(struct rte_eth_dev *dev) + } else { + PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).", + dev->data->port_id); +- dev->rx_pkt_burst = iavf_recv_pkts; ++ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) ++ dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd; ++ else ++ dev->rx_pkt_burst = iavf_recv_pkts; + } + } + +@@ -1797,6 +2201,7 @@ iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id) + + rxq = dev->data->rx_queues[queue_id]; + rxdp = &rxq->rx_ring[rxq->rx_tail]; ++ + while ((desc < rxq->nb_rx_desc) && + ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) & + IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) & +diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h +index 5e309631e..f33d1df41 100644 +--- a/drivers/net/iavf/iavf_rxtx.h ++++ b/drivers/net/iavf/iavf_rxtx.h +@@ -62,6 +62,7 @@ + #define iavf_rx_desc iavf_16byte_rx_desc + #else + #define iavf_rx_desc iavf_32byte_rx_desc ++#define iavf_rx_flex_desc iavf_32b_rx_flex_desc + #endif + + struct iavf_rxq_ops { +@@ -87,6 +88,7 @@ struct iavf_rx_queue { + struct rte_mbuf *pkt_first_seg; /* first segment of current packet */ + struct rte_mbuf *pkt_last_seg; /* last segment of current packet */ + struct rte_mbuf fake_mbuf; /* dummy mbuf */ ++ uint8_t rxdid; + + /* used for VPMD */ + uint16_t rxrearm_nb; /* number of remaining to be re-armed */ +@@ -379,9 +381,15 @@ void iavf_dev_tx_queue_release(void *txq); + void iavf_stop_queues(struct rte_eth_dev *dev); + uint16_t iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); ++uint16_t iavf_recv_pkts_flex_rxd(void *rx_queue, ++ struct rte_mbuf **rx_pkts, ++ uint16_t nb_pkts); + uint16_t iavf_recv_scattered_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); ++uint16_t iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, ++ struct rte_mbuf **rx_pkts, ++ uint16_t nb_pkts); + uint16_t iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + uint16_t iavf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, +diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c +index b7fb05d32..3f0d23a92 100644 +--- a/drivers/net/iavf/iavf_vchnl.c ++++ b/drivers/net/iavf/iavf_vchnl.c +@@ -88,6 +88,7 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args) + break; + case VIRTCHNL_OP_VERSION: + case VIRTCHNL_OP_GET_VF_RESOURCES: ++ case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS: + /* for init virtchnl ops, need to poll the response */ + do { + ret = iavf_read_msg_from_pf(adapter, args->out_size, +@@ -338,7 +339,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter) + * add advanced/optional offload capabilities + */ + +- caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED; ++ caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED | ++ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC; + + args.in_args = (uint8_t *)∩︀ + args.in_args_size = sizeof(caps); +@@ -375,6 +377,32 @@ iavf_get_vf_resource(struct iavf_adapter *adapter) + return 0; + } + ++int ++iavf_get_supported_rxdid(struct iavf_adapter *adapter) ++{ ++ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); ++ struct iavf_cmd_info args; ++ int ret; ++ ++ args.ops = VIRTCHNL_OP_GET_SUPPORTED_RXDIDS; ++ args.in_args = NULL; ++ args.in_args_size = 0; ++ args.out_buffer = vf->aq_resp; ++ args.out_size = IAVF_AQ_BUF_SZ; ++ ++ ret = iavf_execute_vf_cmd(adapter, &args); ++ if (ret) { ++ PMD_DRV_LOG(ERR, ++ "Failed to execute command of OP_GET_SUPPORTED_RXDIDS"); ++ return ret; ++ } ++ ++ vf->supported_rxdid = ++ ((struct virtchnl_supported_rxdids *)args.out_buffer)->supported_rxdids; ++ ++ return 0; ++} ++ + int + iavf_enable_queues(struct iavf_adapter *adapter) + { +@@ -567,6 +595,18 @@ iavf_configure_queues(struct iavf_adapter *adapter) + vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc; + vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr; + vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len; ++ ++ if (vf->vf_res->vf_cap_flags & ++ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC && ++ vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) { ++ vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_OVS_1; ++ PMD_DRV_LOG(NOTICE, "request RXDID == %d in " ++ "Queue[%d]", vc_qp->rxq.rxdid, i); ++ } else { ++ vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1; ++ PMD_DRV_LOG(NOTICE, "request RXDID == %d in " ++ "Queue[%d]", vc_qp->rxq.rxdid, i); ++ } + } + } + +-- +2.17.1 + -- cgit 1.2.3-korg