diff options
author | Damjan Marion <damarion@cisco.com> | 2018-05-07 15:20:25 +0200 |
---|---|---|
committer | Florin Coras <florin.coras@gmail.com> | 2018-05-07 19:44:31 +0000 |
commit | c755c05bd6d38a446e68e79068475bb9bab68010 (patch) | |
tree | 9849d5a5ac0bad7293fb1fe74c60e212a955f3d9 /src/plugins/dpdk/device | |
parent | 68e2ffb3ef7e1db908fc874e733d0f4db18e0cb5 (diff) |
dpdk: improve perf of buffer indices calc in the input node
Change-Id: I16557189aa4a763ec496cb4a45f6e12f2d46971f
Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/plugins/dpdk/device')
-rw-r--r-- | src/plugins/dpdk/device/node.c | 87 |
1 files changed, 53 insertions, 34 deletions
diff --git a/src/plugins/dpdk/device/node.c b/src/plugins/dpdk/device/node.c index b8fe834755c..9b59830af33 100644 --- a/src/plugins/dpdk/device/node.c +++ b/src/plugins/dpdk/device/node.c @@ -214,37 +214,62 @@ poll_rate_limit (dpdk_main_t * dm) */ static_always_inline void -dpdk_mbuf_to_buffer_index_x4 (vlib_main_t * vm, struct rte_mbuf **mb, - u32 * buffers) +dpdk_mbufs_to_buffer_indices (vlib_main_t * vm, struct rte_mbuf **mb, + u32 * bi, uword n_left) { #ifdef CLIB_HAVE_VEC256 - vlib_buffer_main_t *bm = &buffer_main; - u64x4 v = *(u64x4 *) mb; - u32x8 v2, mask = { 0, 2, 4, 6, 1, 3, 5, 7 }; - - /* load 4 pointers into 256-bit register */ - v = u64x4_load_unaligned (mb); - - /* vlib_buffer_t is straight after rte_mbuf so advance all 4 - pointers for size of rte_mbuf */ - v += u64x4_splat (sizeof (struct rte_mbuf)); - - /* calculate 4 buffer indices in paralled */ - v = (v - u64x4_splat (bm->buffer_mem_start)) >> CLIB_LOG2_CACHE_LINE_BYTES; - - /* permute 256-bit register so lower u32s of each buffer index are - * placed into lower 128-bits */ - v2 = u32x8_permute ((u32x8) v, mask); + u32x8 mask = { 0, 2, 4, 6, 1, 3, 5, 7 }; + u64x4 off4 = u64x4_splat (buffer_main.buffer_mem_start - + sizeof (struct rte_mbuf)); +#endif - /* extract lower 128-bits and save them to the array of buffer indices */ - u32x4_store_unaligned (u32x8_extract_lo (v2), buffers); + while (n_left >= 8) + { +#ifdef CLIB_HAVE_VEC256 + /* load 4 pointers into 256-bit register */ + u64x4 v0 = u64x4_load_unaligned (mb); + u64x4 v1 = u64x4_load_unaligned (mb + 4); + u32x8 v2, v3; + + /* calculate 4 buffer indices in parallel + vlib_buffer_t is straight after rte_mbuf so advance all 4 + pointers for size of rte_mbuf */ + v0 -= off4; + v1 -= off4; + + v0 >>= CLIB_LOG2_CACHE_LINE_BYTES; + v1 >>= CLIB_LOG2_CACHE_LINE_BYTES; + + /* permute 256-bit register so lower u32s of each buffer index are + * placed into lower 128-bits */ + v2 = u32x8_permute ((u32x8) v0, mask); + v3 = u32x8_permute ((u32x8) v1, mask); + + /* extract lower 128-bits and save them to the array of buffer indices */ + u32x4_store_unaligned (u32x8_extract_lo (v2), bi); + u32x4_store_unaligned (u32x8_extract_lo (v3), bi + 4); #else - /* equivalent non-nector implementation */ - buffers[0] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[0])); - buffers[1] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[1])); - buffers[2] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[2])); - buffers[3] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[3])); + /* equivalent non-nector implementation */ + bi[0] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[0])); + bi[1] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[1])); + bi[2] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[2])); + bi[3] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[3])); + bi[4] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[4])); + bi[5] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[4])); + bi[6] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[6])); + bi[7] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[7])); #endif + bi += 8; + mb += 8; + n_left -= 8; + } + while (n_left) + { + bi[0] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[0])); + bi += 1; + mb += 1; + n_left -= 1; + } } static_always_inline u8 @@ -539,6 +564,7 @@ dpdk_device_input (vlib_main_t * vm, dpdk_main_t * dm, dpdk_device_t * xd, } /* enqueue buffers to the next node */ + dpdk_mbufs_to_buffer_indices (vm, ptd->mbufs, ptd->buffers, n_rx_packets); n_left = n_rx_packets; next = ptd->next; buffers = ptd->buffers; @@ -554,10 +580,6 @@ dpdk_device_input (vlib_main_t * vm, dpdk_main_t * dm, dpdk_device_t * xd, u16x16 next16 = u16x16_load_unaligned (next); if (u16x16_is_all_equal (next16, next_index)) { - dpdk_mbuf_to_buffer_index_x4 (vm, mb, buffers); - dpdk_mbuf_to_buffer_index_x4 (vm, mb + 4, buffers + 4); - dpdk_mbuf_to_buffer_index_x4 (vm, mb + 8, buffers + 8); - dpdk_mbuf_to_buffer_index_x4 (vm, mb + 12, buffers + 12); clib_memcpy (to_next, buffers, 16 * sizeof (u32)); to_next += 16; n_left_to_next -= 16; @@ -568,7 +590,6 @@ dpdk_device_input (vlib_main_t * vm, dpdk_main_t * dm, dpdk_device_t * xd, } else { - dpdk_mbuf_to_buffer_index_x4 (vm, mb, buffers); clib_memcpy (to_next, buffers, 4 * sizeof (u32)); to_next += 4; n_left_to_next -= 4; @@ -588,7 +609,6 @@ dpdk_device_input (vlib_main_t * vm, dpdk_main_t * dm, dpdk_device_t * xd, #endif while (n_left >= 4 && n_left_to_next >= 4) { - dpdk_mbuf_to_buffer_index_x4 (vm, mb, buffers); clib_memcpy (to_next, buffers, 4 * sizeof (u32)); to_next += 4; n_left_to_next -= 4; @@ -606,8 +626,7 @@ dpdk_device_input (vlib_main_t * vm, dpdk_main_t * dm, dpdk_device_t * xd, } while (n_left && n_left_to_next) { - to_next[0] = buffers[0] = - vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[0])); + clib_memcpy (to_next, buffers, 1 * sizeof (u32)); to_next += 1; n_left_to_next -= 1; vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, |