diff options
author | Christian Ehrhardt <christian.ehrhardt@canonical.com> | 2017-05-16 14:51:32 +0200 |
---|---|---|
committer | Christian Ehrhardt <christian.ehrhardt@canonical.com> | 2017-05-16 16:20:45 +0200 |
commit | 7595afa4d30097c1177b69257118d8ad89a539be (patch) | |
tree | 4bfeadc905c977e45e54a90c42330553b8942e4e /drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c | |
parent | ce3d555e43e3795b5d9507fcfc76b7a0a92fd0d6 (diff) |
Imported Upstream version 17.05
Change-Id: Id1e419c5a214e4a18739663b91f0f9a549f1fdc6
Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c | 102 |
1 files changed, 53 insertions, 49 deletions
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c index abbf2841..a7bc199f 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c @@ -82,23 +82,10 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq) /* Initialize the mbufs in vector, process 2 mbufs in one loop */ for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) { __m128i vaddr0, vaddr1; - uintptr_t p0, p1; mb0 = rxep[0].mbuf; mb1 = rxep[1].mbuf; - /* - * Flush mbuf with pkt template. - * Data to be rearmed is 6 bytes long. - * Though, RX will overwrite ol_flags that are coming next - * anyway. So overwrite whole 8 bytes with one load: - * 6 bytes of rearm_data plus first 2 bytes of ol_flags. - */ - p0 = (uintptr_t)&mb0->rearm_data; - *(uint64_t *)p0 = rxq->mbuf_initializer; - p1 = (uintptr_t)&mb1->rearm_data; - *(uint64_t *)p1 = rxq->mbuf_initializer; - /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr)); vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr)); @@ -133,23 +120,12 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq) IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); } -/* Handling the offload flags (olflags) field takes computation - * time when receiving packets. Therefore we provide a flag to disable - * the processing of the olflags field when they are not needed. This - * gives improved performance, at the cost of losing the offload info - * in the received packet - */ -#ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE - static inline void -desc_to_olflags_v(__m128i descs[4], uint8_t vlan_flags, +desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags, struct rte_mbuf **rx_pkts) { __m128i ptype0, ptype1, vtag0, vtag1, csum; - union { - uint16_t e[4]; - uint64_t dword; - } vol; + __m128i rearm0, rearm1, rearm2, rearm3; /* mask everything except rss type */ const __m128i rsstype_msk = _mm_set_epi16( @@ -228,18 +204,41 @@ desc_to_olflags_v(__m128i descs[4], uint8_t vlan_flags, vtag1 = _mm_or_si128(vtag0, vtag1); vtag1 = _mm_or_si128(ptype0, vtag1); - vol.dword = _mm_cvtsi128_si64(vtag1); - rx_pkts[0]->ol_flags = vol.e[0]; - rx_pkts[1]->ol_flags = vol.e[1]; - rx_pkts[2]->ol_flags = vol.e[2]; - rx_pkts[3]->ol_flags = vol.e[3]; -} + /* + * At this point, we have the 4 sets of flags in the low 64-bits + * of vtag1 (4x16). + * We want to extract these, and merge them with the mbuf init data + * so we can do a single 16-byte write to the mbuf to set the flags + * and all the other initialization fields. Extracting the + * appropriate flags means that we have to do a shift and blend for + * each mbuf before we do the write. + */ +#ifdef RTE_MACHINE_CPUFLAG_SSE4_2 + + rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 8), 0x10); + rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 6), 0x10); + rearm2 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 4), 0x10); + rearm3 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 2), 0x10); + #else -#define desc_to_olflags_v(desc, vlan_flags, rx_pkts) do { \ - RTE_SET_USED(vlan_flags); \ - } while (0) -#endif + rearm0 = _mm_slli_si128(vtag1, 14); + rearm1 = _mm_slli_si128(vtag1, 12); + rearm2 = _mm_slli_si128(vtag1, 10); + rearm3 = _mm_slli_si128(vtag1, 8); + + rearm0 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm0, 48)); + rearm1 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm1, 48)); + rearm2 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm2, 48)); + rearm3 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm3, 48)); + +#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */ + + _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0); + _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1); + _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2); + _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3); +} /* * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP) @@ -268,6 +267,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, 0, 0 /* ignore pkt_type field */ ); __m128i dd_check, eop_check; + __m128i mbuf_init; uint8_t vlan_flags; /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */ @@ -313,6 +313,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, 0xFF, 0xFF ); + mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer); + /* Cache is empty -> need to scan the buffer rings, but first move * the next 'n' mbufs into the cache */ @@ -335,9 +337,13 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, __m128i descs[RTE_IXGBE_DESCS_PER_LOOP]; __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; __m128i zero, staterr, sterr_tmp1, sterr_tmp2; - __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */ + /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */ + __m128i mbp1; +#if defined(RTE_ARCH_X86_64) + __m128i mbp2; +#endif - /* B.1 load 1 mbuf point */ + /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */ mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]); /* Read desc statuses backwards to avoid race condition */ @@ -345,11 +351,13 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); rte_compiler_barrier(); - /* B.2 copy 2 mbuf point into rx_pkts */ + /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); - /* B.1 load 1 mbuf point */ +#if defined(RTE_ARCH_X86_64) + /* B.1 load 2 64 bit mbuf points */ mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]); +#endif descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); rte_compiler_barrier(); @@ -358,8 +366,10 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, rte_compiler_barrier(); descs[0] = _mm_loadu_si128((__m128i *)(rxdp)); +#if defined(RTE_ARCH_X86_64) /* B.2 copy 2 mbuf point into rx_pkts */ _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2); +#endif if (split_packet) { rte_mbuf_prefetch_part2(rx_pkts[pos]); @@ -385,7 +395,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]); /* set ol_flags with vlan packet type */ - desc_to_olflags_v(descs, vlan_flags, &rx_pkts[pos]); + desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]); /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust); @@ -425,12 +435,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, /* store the resulting 32-bit value */ *(int *)split_packet = _mm_cvtsi128_si32(eop_bits); split_packet += RTE_IXGBE_DESCS_PER_LOOP; - - /* zero-out next pointers */ - rx_pkts[pos]->next = NULL; - rx_pkts[pos + 1]->next = NULL; - rx_pkts[pos + 2]->next = NULL; - rx_pkts[pos + 3]->next = NULL; } /* C.3 calc available number of desc */ @@ -537,8 +541,8 @@ vtx(volatile union ixgbe_adv_tx_desc *txdp, } uint16_t -ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts) +ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) { struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue; volatile union ixgbe_adv_tx_desc *txdp; |