aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/plugins/af_xdp/input.c4
-rw-r--r--src/vlib/buffer_funcs.h12
-rw-r--r--src/vnet/ip/ip_packet.h16
-rw-r--r--src/vppinfra/vector_avx2.h4
-rw-r--r--src/vppinfra/vector_neon.h4
-rw-r--r--src/vppinfra/vector_sse42.h4
6 files changed, 22 insertions, 22 deletions
diff --git a/src/plugins/af_xdp/input.c b/src/plugins/af_xdp/input.c
index c5b3488d438..e065ee2ab8f 100644
--- a/src/plugins/af_xdp/input.c
+++ b/src/plugins/af_xdp/input.c
@@ -146,8 +146,8 @@ wrap_around:
while (n >= 8)
{
#ifdef CLIB_HAVE_VEC256
- u64x4 b0 = u32x4_extend_to_u64x4 (*(u32x4u *) (bi + 0));
- u64x4 b1 = u32x4_extend_to_u64x4 (*(u32x4u *) (bi + 4));
+ u64x4 b0 = u64x4_from_u32x4 (*(u32x4u *) (bi + 0));
+ u64x4 b1 = u64x4_from_u32x4 (*(u32x4u *) (bi + 4));
*(u64x4u *) (fill + 0) = bi2addr (b0);
*(u64x4u *) (fill + 4) = bi2addr (b1);
#else
diff --git a/src/vlib/buffer_funcs.h b/src/vlib/buffer_funcs.h
index a2d779b1fa6..95b622c202c 100644
--- a/src/vlib/buffer_funcs.h
+++ b/src/vlib/buffer_funcs.h
@@ -221,8 +221,8 @@ vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count,
se we maintain two-in-parallel variant */
while (count >= 8)
{
- u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
- u64x4 b1 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi + 4));
+ u64x4 b0 = u64x4_from_u32x4 (u32x4_load_unaligned (bi));
+ u64x4 b1 = u64x4_from_u32x4 (u32x4_load_unaligned (bi + 4));
/* shift and add to get vlib_buffer_t pointer */
u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
u64x4_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 4);
@@ -234,18 +234,18 @@ vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count,
while (count >= 4)
{
#ifdef CLIB_HAVE_VEC256
- u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
+ u64x4 b0 = u64x4_from_u32x4 (u32x4_load_unaligned (bi));
/* shift and add to get vlib_buffer_t pointer */
u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
#elif defined (CLIB_HAVE_VEC128)
u64x2 off = u64x2_splat (buffer_mem_start + offset);
u32x4 bi4 = u32x4_load_unaligned (bi);
- u64x2 b0 = u32x4_extend_to_u64x2 ((u32x4) bi4);
+ u64x2 b0 = u64x2_from_u32x4 ((u32x4) bi4);
#if defined (__aarch64__)
- u64x2 b1 = u32x4_extend_to_u64x2_high ((u32x4) bi4);
+ u64x2 b1 = u64x2_from_u32x4_high ((u32x4) bi4);
#else
bi4 = u32x4_shuffle (bi4, 2, 3, 0, 1);
- u64x2 b1 = u32x4_extend_to_u64x2 ((u32x4) bi4);
+ u64x2 b1 = u64x2_from_u32x4 ((u32x4) bi4);
#endif
u64x2_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
u64x2_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 2);
diff --git a/src/vnet/ip/ip_packet.h b/src/vnet/ip/ip_packet.h
index e67cec1512f..52a65e78c24 100644
--- a/src/vnet/ip/ip_packet.h
+++ b/src/vnet/ip/ip_packet.h
@@ -176,10 +176,10 @@ ip_csum (void *data, u16 n_left)
v1 = u16x16_byte_swap (v1);
v2 = u16x16_byte_swap (v2);
#endif
- sum8 += u16x8_extend_to_u32x8 (u16x16_extract_lo (v1));
- sum8 += u16x8_extend_to_u32x8 (u16x16_extract_hi (v1));
- sum8 += u16x8_extend_to_u32x8 (u16x16_extract_lo (v2));
- sum8 += u16x8_extend_to_u32x8 (u16x16_extract_hi (v2));
+ sum8 += u32x8_from_u16x8 (u16x16_extract_lo (v1));
+ sum8 += u32x8_from_u16x8 (u16x16_extract_hi (v1));
+ sum8 += u32x8_from_u16x8 (u16x16_extract_lo (v2));
+ sum8 += u32x8_from_u16x8 (u16x16_extract_hi (v2));
n_left -= 32;
data += 64;
}
@@ -191,8 +191,8 @@ ip_csum (void *data, u16 n_left)
v1 = u16x16_byte_swap (v1);
#endif
v1 = u16x16_byte_swap (u16x16_load_unaligned (data));
- sum8 += u16x8_extend_to_u32x8 (u16x16_extract_lo (v1));
- sum8 += u16x8_extend_to_u32x8 (u16x16_extract_hi (v1));
+ sum8 += u32x8_from_u16x8 (u16x16_extract_lo (v1));
+ sum8 += u32x8_from_u16x8 (u16x16_extract_hi (v1));
n_left -= 16;
data += 32;
}
@@ -204,8 +204,8 @@ ip_csum (void *data, u16 n_left)
v1 = u16x16_byte_swap (v1);
#endif
v1 = u16x16_mask_last (v1, 16 - n_left);
- sum8 += u16x8_extend_to_u32x8 (u16x16_extract_lo (v1));
- sum8 += u16x8_extend_to_u32x8 (u16x16_extract_hi (v1));
+ sum8 += u32x8_from_u16x8 (u16x16_extract_lo (v1));
+ sum8 += u32x8_from_u16x8 (u16x16_extract_hi (v1));
}
sum8 = u32x8_hadd (sum8, zero);
diff --git a/src/vppinfra/vector_avx2.h b/src/vppinfra/vector_avx2.h
index 0511ec7e4e2..4cd2dc1202e 100644
--- a/src/vppinfra/vector_avx2.h
+++ b/src/vppinfra/vector_avx2.h
@@ -110,11 +110,11 @@ u8x32_msb_mask (u8x32 v)
return _mm256_movemask_epi8 ((__m256i) v);
}
-/* _extend_to_ */
+/* _from_ */
/* *INDENT-OFF* */
#define _(f,t,i) \
static_always_inline t \
-f##_extend_to_##t (f x) \
+t##_from_##f (f x) \
{ return (t) _mm256_cvt##i ((__m128i) x); }
_(u16x8, u32x8, epu16_epi32)
diff --git a/src/vppinfra/vector_neon.h b/src/vppinfra/vector_neon.h
index 15af098730e..ffcbe702d24 100644
--- a/src/vppinfra/vector_neon.h
+++ b/src/vppinfra/vector_neon.h
@@ -122,13 +122,13 @@ u32x4_hadd (u32x4 v1, u32x4 v2)
}
static_always_inline u64x2
-u32x4_extend_to_u64x2 (u32x4 v)
+u64x2_from_u32x4 (u32x4 v)
{
return vmovl_u32 (vget_low_u32 (v));
}
static_always_inline u64x2
-u32x4_extend_to_u64x2_high (u32x4 v)
+u64x2_from_u32x4_high (u32x4 v)
{
return vmovl_high_u32 (v);
}
diff --git a/src/vppinfra/vector_sse42.h b/src/vppinfra/vector_sse42.h
index 8c28dd7783c..effab3fe487 100644
--- a/src/vppinfra/vector_sse42.h
+++ b/src/vppinfra/vector_sse42.h
@@ -676,11 +676,11 @@ u32x4_shuffle (u32x4 v, const int a, const int b, const int c, const int d)
#endif
}
-/* _extend_to_ */
+/* _from_ */
/* *INDENT-OFF* */
#define _(f,t,i) \
static_always_inline t \
-f##_extend_to_##t (f x) \
+t##_from_##f (f x) \
{ return (t) _mm_cvt##i ((__m128i) x); }
_(u8x16, u16x8, epu8_epi16)