aboutsummaryrefslogtreecommitdiffstats
path: root/build/external/patches/dpdk_20.02/0013-net-iavf-support-flow-mark-in-AVX-path.patch
diff options
context:
space:
mode:
Diffstat (limited to 'build/external/patches/dpdk_20.02/0013-net-iavf-support-flow-mark-in-AVX-path.patch')
-rw-r--r--build/external/patches/dpdk_20.02/0013-net-iavf-support-flow-mark-in-AVX-path.patch121
1 files changed, 121 insertions, 0 deletions
diff --git a/build/external/patches/dpdk_20.02/0013-net-iavf-support-flow-mark-in-AVX-path.patch b/build/external/patches/dpdk_20.02/0013-net-iavf-support-flow-mark-in-AVX-path.patch
new file mode 100644
index 00000000000..74baf14a2b2
--- /dev/null
+++ b/build/external/patches/dpdk_20.02/0013-net-iavf-support-flow-mark-in-AVX-path.patch
@@ -0,0 +1,121 @@
+From f5de510dd842be737259ef31d1300b57890ae90e Mon Sep 17 00:00:00 2001
+From: Leyi Rong <leyi.rong@intel.com>
+Date: Wed, 8 Apr 2020 14:22:07 +0800
+Subject: [DPDK 13/17] net/iavf: support flow mark in AVX path
+
+Support Flow Director mark ID parsing from Flex
+Rx descriptor in AVX path.
+
+Signed-off-by: Leyi Rong <leyi.rong@intel.com>
+---
+ drivers/net/iavf/iavf_rxtx_vec_avx2.c | 72 +++++++++++++++++++++++++--
+ 1 file changed, 67 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+index b23188fd3..3bf5833fa 100644
+--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
++++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+@@ -616,6 +616,25 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
+ return received;
+ }
+
++static inline __m256i
++flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7)
++{
++#define FDID_MIS_MAGIC 0xFFFFFFFF
++ RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
++ RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
++ const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
++ PKT_RX_FDIR_ID);
++ /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
++ const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
++ __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
++ fdir_mis_mask);
++ /* this XOR op results to bit-reverse the fdir_mask */
++ fdir_mask = _mm256_xor_si256(fdir_mask, fdir_mis_mask);
++ const __m256i fdir_flags = _mm256_and_si256(fdir_mask, pkt_fdir_bit);
++
++ return fdir_flags;
++}
++
+ static inline uint16_t
+ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
+ struct rte_mbuf **rx_pkts,
+@@ -678,8 +697,8 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
+ const __m256i shuf_msk =
+ _mm256_set_epi8
+ (/* first descriptor */
+- 15, 14,
+- 13, 12, /* octet 12~15, 32 bits rss */
++ 0xFF, 0xFF,
++ 0xFF, 0xFF, /* rss not supported */
+ 11, 10, /* octet 10~11, 16 bits vlan_macip */
+ 5, 4, /* octet 4~5, 16 bits data_len */
+ 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */
+@@ -687,8 +706,8 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF, /*pkt_type set as unknown */
+ /* second descriptor */
+- 15, 14,
+- 13, 12, /* octet 12~15, 32 bits rss */
++ 0xFF, 0xFF,
++ 0xFF, 0xFF, /* rss not supported */
+ 11, 10, /* octet 10~11, 16 bits vlan_macip */
+ 5, 4, /* octet 4~5, 16 bits data_len */
+ 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */
+@@ -930,8 +949,51 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
+ rss_vlan_flag_bits);
+
+ /* merge flags */
+- const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
++ __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
+ rss_vlan_flags);
++
++ if (rxq->fdir_enabled) {
++ const __m256i fdir_id4_7 =
++ _mm256_unpackhi_epi32(raw_desc6_7, raw_desc4_5);
++
++ const __m256i fdir_id0_3 =
++ _mm256_unpackhi_epi32(raw_desc2_3, raw_desc0_1);
++
++ const __m256i fdir_id0_7 =
++ _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
++
++ const __m256i fdir_flags =
++ flex_rxd_to_fdir_flags_vec_avx2(fdir_id0_7);
++
++ /* merge with fdir_flags */
++ mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
++
++ /* write to mbuf: have to use scalar store here */
++ rx_pkts[i + 0]->hash.fdir.hi =
++ _mm256_extract_epi32(fdir_id0_7, 3);
++
++ rx_pkts[i + 1]->hash.fdir.hi =
++ _mm256_extract_epi32(fdir_id0_7, 7);
++
++ rx_pkts[i + 2]->hash.fdir.hi =
++ _mm256_extract_epi32(fdir_id0_7, 2);
++
++ rx_pkts[i + 3]->hash.fdir.hi =
++ _mm256_extract_epi32(fdir_id0_7, 6);
++
++ rx_pkts[i + 4]->hash.fdir.hi =
++ _mm256_extract_epi32(fdir_id0_7, 1);
++
++ rx_pkts[i + 5]->hash.fdir.hi =
++ _mm256_extract_epi32(fdir_id0_7, 5);
++
++ rx_pkts[i + 6]->hash.fdir.hi =
++ _mm256_extract_epi32(fdir_id0_7, 0);
++
++ rx_pkts[i + 7]->hash.fdir.hi =
++ _mm256_extract_epi32(fdir_id0_7, 4);
++ } /* if() on fdir_enabled */
++
+ /**
+ * At this point, we have the 8 sets of flags in the low 16-bits
+ * of each 32-bit value in vlan0.
+--
+2.17.1
+