diff options
author | Damjan Marion <damarion@cisco.com> | 2020-08-31 17:18:26 +0200 |
---|---|---|
committer | Matthew Smith <mgsmith@netgate.com> | 2020-08-31 18:46:21 +0000 |
commit | 90d05bc7fb834b5cf25bdd9bb6d92bb35e602494 (patch) | |
tree | e1de27a272922a18677918dbc95f902a3ddb5b20 /src/vlib/buffer_funcs.h | |
parent | f9c8fe41280a48f95216cab43eab8db00ee592e1 (diff) |
vppinfra: convert A_extend_to_B to B_from_A format of vector inlines
Make it shorter and same format when converting to biggor or smaller
types.
Type: refactor
Change-Id: I443d67e18ae65d779b4d9a0dce5406f7d9f0e4ac
Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/vlib/buffer_funcs.h')
-rw-r--r-- | src/vlib/buffer_funcs.h | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/src/vlib/buffer_funcs.h b/src/vlib/buffer_funcs.h index a2d779b1fa6..95b622c202c 100644 --- a/src/vlib/buffer_funcs.h +++ b/src/vlib/buffer_funcs.h @@ -221,8 +221,8 @@ vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count, se we maintain two-in-parallel variant */ while (count >= 8) { - u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi)); - u64x4 b1 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi + 4)); + u64x4 b0 = u64x4_from_u32x4 (u32x4_load_unaligned (bi)); + u64x4 b1 = u64x4_from_u32x4 (u32x4_load_unaligned (bi + 4)); /* shift and add to get vlib_buffer_t pointer */ u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b); u64x4_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 4); @@ -234,18 +234,18 @@ vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count, while (count >= 4) { #ifdef CLIB_HAVE_VEC256 - u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi)); + u64x4 b0 = u64x4_from_u32x4 (u32x4_load_unaligned (bi)); /* shift and add to get vlib_buffer_t pointer */ u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b); #elif defined (CLIB_HAVE_VEC128) u64x2 off = u64x2_splat (buffer_mem_start + offset); u32x4 bi4 = u32x4_load_unaligned (bi); - u64x2 b0 = u32x4_extend_to_u64x2 ((u32x4) bi4); + u64x2 b0 = u64x2_from_u32x4 ((u32x4) bi4); #if defined (__aarch64__) - u64x2 b1 = u32x4_extend_to_u64x2_high ((u32x4) bi4); + u64x2 b1 = u64x2_from_u32x4_high ((u32x4) bi4); #else bi4 = u32x4_shuffle (bi4, 2, 3, 0, 1); - u64x2 b1 = u32x4_extend_to_u64x2 ((u32x4) bi4); + u64x2 b1 = u64x2_from_u32x4 ((u32x4) bi4); #endif u64x2_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b); u64x2_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 2); |