summaryrefslogtreecommitdiffstats
path: root/drivers/net/i40e/i40e_rxtx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/i40e/i40e_rxtx.c')
-rw-r--r--drivers/net/i40e/i40e_rxtx.c172
1 files changed, 93 insertions, 79 deletions
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 2a28ee34..8bfa2517 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -55,6 +55,10 @@
PKT_TX_OUTER_IP_CKSUM)
#define I40E_TX_OFFLOAD_MASK ( \
+ PKT_TX_OUTER_IPV4 | \
+ PKT_TX_OUTER_IPV6 | \
+ PKT_TX_IPV4 | \
+ PKT_TX_IPV6 | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_OUTER_IP_CKSUM | \
@@ -83,7 +87,8 @@ i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
(1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) {
- mb->ol_flags |= PKT_RX_QINQ_STRIPPED;
+ mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
+ PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
mb->vlan_tci_outer = mb->vlan_tci;
mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
@@ -1828,7 +1833,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = ETHER_CRC_LEN;
else
rxq->crc_len = 0;
@@ -2909,6 +2914,35 @@ i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.offloads = txq->offloads;
}
+static eth_rx_burst_t
+i40e_get_latest_rx_vec(bool scatter)
+{
+#ifdef RTE_ARCH_X86
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ return scatter ? i40e_recv_scattered_pkts_vec_avx2 :
+ i40e_recv_pkts_vec_avx2;
+#endif
+ return scatter ? i40e_recv_scattered_pkts_vec :
+ i40e_recv_pkts_vec;
+}
+
+static eth_rx_burst_t
+i40e_get_recommend_rx_vec(bool scatter)
+{
+#ifdef RTE_ARCH_X86
+ /*
+ * since AVX frequency can be different to base frequency, limit
+ * use of AVX2 version to later plaforms, not all those that could
+ * theoretically run it.
+ */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ return scatter ? i40e_recv_scattered_pkts_vec_avx2 :
+ i40e_recv_pkts_vec_avx2;
+#endif
+ return scatter ? i40e_recv_scattered_pkts_vec :
+ i40e_recv_pkts_vec;
+}
+
void __attribute__((cold))
i40e_set_rx_function(struct rte_eth_dev *dev)
{
@@ -2940,57 +2974,17 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
}
}
- if (dev->data->scattered_rx) {
- /* Set the non-LRO scattered callback: there are Vector and
- * single allocation versions.
- */
- if (ad->rx_vec_allowed) {
- PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
- "callback (port=%d).",
- dev->data->port_id);
-
- dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec;
-#ifdef RTE_ARCH_X86
- /*
- * since AVX frequency can be different to base
- * frequency, limit use of AVX2 version to later
- * plaforms, not all those that could theoretically
- * run it.
- */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
- dev->rx_pkt_burst =
- i40e_recv_scattered_pkts_vec_avx2;
-#endif
- } else {
- PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
- "allocation callback (port=%d).",
- dev->data->port_id);
- dev->rx_pkt_burst = i40e_recv_scattered_pkts;
- }
- /* If parameters allow we are going to choose between the following
- * callbacks:
- * - Vector
- * - Bulk Allocation
- * - Single buffer allocation (the simplest one)
- */
- } else if (ad->rx_vec_allowed) {
- PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
- "burst size no less than %d (port=%d).",
- RTE_I40E_DESCS_PER_LOOP,
- dev->data->port_id);
-
- dev->rx_pkt_burst = i40e_recv_pkts_vec;
-#ifdef RTE_ARCH_X86
- /*
- * since AVX frequency can be different to base
- * frequency, limit use of AVX2 version to later
- * plaforms, not all those that could theoretically
- * run it.
- */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
- dev->rx_pkt_burst = i40e_recv_pkts_vec_avx2;
-#endif
- } else if (ad->rx_bulk_alloc_allowed) {
+ if (ad->rx_vec_allowed) {
+ /* Vec Rx path */
+ PMD_INIT_LOG(DEBUG, "Vector Rx path will be used on port=%d.",
+ dev->data->port_id);
+ if (ad->use_latest_vec)
+ dev->rx_pkt_burst =
+ i40e_get_latest_rx_vec(dev->data->scattered_rx);
+ else
+ dev->rx_pkt_burst =
+ i40e_get_recommend_rx_vec(dev->data->scattered_rx);
+ } else if (!dev->data->scattered_rx && ad->rx_bulk_alloc_allowed) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"satisfied. Rx Burst Bulk Alloc function "
"will be used on port=%d.",
@@ -2998,12 +2992,12 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
} else {
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
- "satisfied, or Scattered Rx is requested "
- "(port=%d).",
+ /* Simple Rx Path. */
+ PMD_INIT_LOG(DEBUG, "Simple Rx path will be used on port=%d.",
dev->data->port_id);
-
- dev->rx_pkt_burst = i40e_recv_pkts;
+ dev->rx_pkt_burst = dev->data->scattered_rx ?
+ i40e_recv_scattered_pkts :
+ i40e_recv_pkts;
}
/* Propagate information about RX function choice through all queues. */
@@ -3049,6 +3043,31 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
txq->queue_id);
}
+static eth_tx_burst_t
+i40e_get_latest_tx_vec(void)
+{
+#ifdef RTE_ARCH_X86
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ return i40e_xmit_pkts_vec_avx2;
+#endif
+ return i40e_xmit_pkts_vec;
+}
+
+static eth_tx_burst_t
+i40e_get_recommend_tx_vec(void)
+{
+#ifdef RTE_ARCH_X86
+ /*
+ * since AVX frequency can be different to base frequency, limit
+ * use of AVX2 version to later plaforms, not all those that could
+ * theoretically run it.
+ */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ return i40e_xmit_pkts_vec_avx2;
+#endif
+ return i40e_xmit_pkts_vec;
+}
+
void __attribute__((cold))
i40e_set_tx_function(struct rte_eth_dev *dev)
{
@@ -3073,17 +3092,12 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
if (ad->tx_simple_allowed) {
if (ad->tx_vec_allowed) {
PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
- dev->tx_pkt_burst = i40e_xmit_pkts_vec;
-#ifdef RTE_ARCH_X86
- /*
- * since AVX frequency can be different to base
- * frequency, limit use of AVX2 version to later
- * plaforms, not all those that could theoretically
- * run it.
- */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
- dev->tx_pkt_burst = i40e_xmit_pkts_vec_avx2;
-#endif
+ if (ad->use_latest_vec)
+ dev->tx_pkt_burst =
+ i40e_get_latest_tx_vec();
+ else
+ dev->tx_pkt_burst =
+ i40e_get_recommend_tx_vec();
} else {
PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
dev->tx_pkt_burst = i40e_xmit_pkts_simple;
@@ -3166,13 +3180,13 @@ i40e_set_default_pctype_table(struct rte_eth_dev *dev)
}
/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
-int __attribute__((weak))
+__rte_weak int
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
{
return -1;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_recv_pkts_vec(
void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
@@ -3181,7 +3195,7 @@ i40e_recv_pkts_vec(
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_recv_scattered_pkts_vec(
void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
@@ -3190,7 +3204,7 @@ i40e_recv_scattered_pkts_vec(
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
uint16_t __rte_unused nb_pkts)
@@ -3198,7 +3212,7 @@ i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue,
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
uint16_t __rte_unused nb_pkts)
@@ -3206,25 +3220,25 @@ i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue,
return 0;
}
-int __attribute__((weak))
+__rte_weak int
i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
{
return -1;
}
-int __attribute__((weak))
+__rte_weak int
i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
{
return -1;
}
-void __attribute__((weak))
+__rte_weak void
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
{
return;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue,
struct rte_mbuf __rte_unused **tx_pkts,
uint16_t __rte_unused nb_pkts)
@@ -3232,7 +3246,7 @@ i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue,
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_xmit_pkts_vec_avx2(void __rte_unused * tx_queue,
struct rte_mbuf __rte_unused **tx_pkts,
uint16_t __rte_unused nb_pkts)