aboutsummaryrefslogtreecommitdiffstats
path: root/src/vppinfra
diff options
context:
space:
mode:
Diffstat (limited to 'src/vppinfra')
-rw-r--r--src/vppinfra/vector.h17
-rw-r--r--src/vppinfra/vector_avx2.h80
-rw-r--r--src/vppinfra/vector_avx512.h53
-rw-r--r--src/vppinfra/vector_sse42.h4
4 files changed, 149 insertions, 5 deletions
diff --git a/src/vppinfra/vector.h b/src/vppinfra/vector.h
index 094cc85e22b..fcff5e79d95 100644
--- a/src/vppinfra/vector.h
+++ b/src/vppinfra/vector.h
@@ -58,8 +58,11 @@
#define CLIB_HAVE_VEC128
#endif
-#if defined (__AVX__)
+#if defined (__AVX2__)
#define CLIB_HAVE_VEC256
+#if defined (__clang__) && __clang_major__ < 4
+#undef CLIB_HAVE_VEC256
+#endif
#endif
#if defined (__AVX512F__)
@@ -179,7 +182,7 @@ t##s##x##c##_sub (t##s##x##c v1, t##s##x##c v2) \
foreach_vec
#undef _
-/* this macro generate _splat inline funcitons for each scalar vector type */
+/* this macro generate _splat inline functions for each scalar vector type */
#define _(t, s, c) \
static_always_inline t##s##x##c \
t##s##x##c##_splat (t##s x) \
@@ -192,13 +195,21 @@ t##s##x##c##_splat (t##s x) \
\
return r; \
}
- foreach_int_vec foreach_uint_vec
+ foreach_vec128i foreach_vec128u
#undef _
#if defined (__SSE4_2__) && __GNUC__ >= 4
#include <vppinfra/vector_sse42.h>
#endif
+#if defined (__AVX2__)
+#include <vppinfra/vector_avx2.h>
+#endif
+
+#if defined (__AVX512F__)
+#include <vppinfra/vector_avx512.h>
+#endif
+
#if defined (__ALTIVEC__)
#include <vppinfra/vector_altivec.h>
#endif
diff --git a/src/vppinfra/vector_avx2.h b/src/vppinfra/vector_avx2.h
new file mode 100644
index 00000000000..ad7e7d4dea8
--- /dev/null
+++ b/src/vppinfra/vector_avx2.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_vector_avx2_h
+#define included_vector_avx2_h
+
+#include <vppinfra/clib.h>
+#include <x86intrin.h>
+
+#define foreach_avx2_vec256i \
+ _(i,8,32,epi8) _(i,16,16,epi16) _(i,32,8,epi32) _(i,64,4,epi64x)
+#define foreach_avx2_vec256u \
+ _(u,8,32,epi8) _(u,16,16,epi16) _(u,32,8,epi32) _(u,64,4,epi64x)
+#define foreach_avx2_vec256f \
+ _(f,32,8,ps) _(f,64,4,pd)
+
+/* splat, load_unaligned, store_unaligned, is_all_zero, is_all_equal */
+#define _(t, s, c, i) \
+static_always_inline t##s##x##c \
+t##s##x##c##_splat (t##s x) \
+{ return (t##s##x##c) _mm256_set1_##i (x); } \
+\
+static_always_inline t##s##x##c \
+t##s##x##c##_load_unaligned (void *p) \
+{ return (t##s##x##c) _mm256_loadu_si256 (p); } \
+\
+static_always_inline void \
+t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
+{ _mm256_storeu_si256 ((__m256i *) p, (__m256i) v); } \
+\
+static_always_inline int \
+t##s##x##c##_is_all_zero (t##s##x##c x) \
+{ return _mm256_testz_si256 ((__m256i) x, (__m256i) x); } \
+\
+static_always_inline int \
+t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
+{ return t##s##x##c##_is_all_zero (v != t##s##x##c##_splat (x)); }; \
+\
+
+foreach_avx2_vec256i foreach_avx2_vec256u
+#undef _
+ always_inline u32x8
+u32x8_permute (u32x8 v, u32x8 idx)
+{
+ return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx);
+}
+
+always_inline u32x4
+u32x8_extract_lo (u32x8 v)
+{
+ return (u32x4) _mm256_extracti128_si256 ((__m256i) v, 0);
+}
+
+always_inline u32x4
+u32x8_extract_hi (u32x8 v)
+{
+ return (u32x4) _mm256_extracti128_si256 ((__m256i) v, 1);
+}
+
+#endif /* included_vector_avx2_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/vector_avx512.h b/src/vppinfra/vector_avx512.h
new file mode 100644
index 00000000000..ac4c09b8375
--- /dev/null
+++ b/src/vppinfra/vector_avx512.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_vector_avx512_h
+#define included_vector_avx512_h
+
+#include <vppinfra/clib.h>
+#include <x86intrin.h>
+
+#define foreach_avx512_vec512i \
+ _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64)
+#define foreach_avx512_vec512u \
+ _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64)
+#define foreach_avx512_vec512f \
+ _(f,32,8,ps) _(f,64,4,pd)
+
+/* splat, load_unaligned, store_unaligned */
+#define _(t, s, c, i) \
+static_always_inline t##s##x##c \
+t##s##x##c##_splat (t##s x) \
+{ return (t##s##x##c) _mm512_set1_##i (x); } \
+\
+static_always_inline t##s##x##c \
+t##s##x##c##_load_unaligned (void *p) \
+{ return (t##s##x##c) _mm512_loadu_si512 (p); } \
+\
+static_always_inline void \
+t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
+{ _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); } \
+\
+
+foreach_avx512_vec512i foreach_avx512_vec512u
+#undef _
+#endif /* included_vector_avx512_h */
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/vector_sse42.h b/src/vppinfra/vector_sse42.h
index dab22deff7c..cf7f158b873 100644
--- a/src/vppinfra/vector_sse42.h
+++ b/src/vppinfra/vector_sse42.h
@@ -200,9 +200,9 @@ u64x2_write_hi (u64x2 x, u64 * a)
/* Unaligned loads/stores. */
#define _(t) \
- always_inline void t##_store_unaligned (t x, t * a) \
+ always_inline void t##_store_unaligned (t x, void * a) \
{ _mm_storeu_si128 ((__m128i *) a, (__m128i) x); } \
- always_inline t t##_load_unaligned (t * a) \
+ always_inline t t##_load_unaligned (void * a) \
{ return (t) _mm_loadu_si128 ((__m128i *) a); }
_(u8x16) _(u16x8) _(u32x4) _(u64x2) _(i8x16) _(i16x8) _(i32x4) _(i64x2)