aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2018-07-27 01:47:57 +0200
committerDave Barach <openvpp@barachs.net>2018-07-27 14:15:50 +0000
commit5df580eec93c0c6fc07dd38f8713f671565b9c38 (patch)
tree318fc757e4b929e3265f5047595dfc9fb19d8ffc
parent28d4271d1a9103099e6711fc58f9a479c8722e60 (diff)
128-bit SIMD version of vlib_get_buffers
Change-Id: I1a28ddf535c80ecf4ba4bf31659ff2fead1d8a64 Signed-off-by: Damjan Marion <damarion@cisco.com>
-rw-r--r--src/vlib/buffer_funcs.h8
-rw-r--r--src/vppinfra/vector_sse42.h36
2 files changed, 44 insertions, 0 deletions
diff --git a/src/vlib/buffer_funcs.h b/src/vlib/buffer_funcs.h
index 205eaa37eaf..667063cd693 100644
--- a/src/vlib/buffer_funcs.h
+++ b/src/vlib/buffer_funcs.h
@@ -97,6 +97,14 @@ vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count,
u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
/* shift and add to get vlib_buffer_t pointer */
u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
+#elif defined (CLIB_HAVE_VEC128) && defined (__x86_64__)
+ u64x2 off = u64x2_splat (buffer_main.buffer_mem_start + offset);
+ u32x4 bi4 = u32x4_load_unaligned (bi);
+ u64x2 b0 = u32x4_extend_to_u64x2 ((u32x4) bi4);
+ bi4 = u32x4_shuffle (bi4, 2, 3, 0, 1);
+ u64x2 b1 = u32x4_extend_to_u64x2 ((u32x4) bi4);
+ u64x2_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
+ u64x2_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 2);
#else
b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset;
b[1] = ((u8 *) vlib_get_buffer (vm, bi[1])) + offset;
diff --git a/src/vppinfra/vector_sse42.h b/src/vppinfra/vector_sse42.h
index 5134855d2ae..76737940230 100644
--- a/src/vppinfra/vector_sse42.h
+++ b/src/vppinfra/vector_sse42.h
@@ -613,6 +613,42 @@ u8x16_shuffle (u8x16 v, u8x16 m)
return (u8x16) _mm_shuffle_epi8 ((__m128i) v, (__m128i) m);
}
+static_always_inline u32x4
+u32x4_shuffle (u32x4 v, const int a, const int b, const int c, const int d)
+{
+#ifdef __clang__
+ u32x4 r = { v[a], v[b], v[c], v[d] };
+ return r;
+#else
+ return (u32x4) _mm_shuffle_epi32 ((__m128i) v,
+ a | b << 2 | c << 4 | d << 6);
+#endif
+}
+
+/* _extend_to_ */
+/* *INDENT-OFF* */
+#define _(f,t,i) \
+static_always_inline t \
+f##_extend_to_##t (f x) \
+{ return (t) _mm_cvt##i ((__m128i) x); }
+
+_(u8x16, u16x8, epu8_epi16)
+_(u8x16, u32x4, epu8_epi32)
+_(u8x16, u64x2, epu8_epi64)
+_(u16x8, u32x4, epu16_epi32)
+_(u16x8, u64x2, epu16_epi64)
+_(u32x4, u64x2, epu32_epi64)
+
+_(i8x16, i16x8, epi8_epi16)
+_(i8x16, i32x4, epi8_epi32)
+_(i8x16, i64x2, epi8_epi64)
+_(i16x8, i32x4, epi16_epi32)
+_(i16x8, i64x2, epi16_epi64)
+_(i32x4, i64x2, epi32_epi64)
+#undef _
+/* *INDENT-ON* */
+
+
#endif /* included_vector_sse2_h */
/*