aboutsummaryrefslogtreecommitdiffstats
path: root/src/vppinfra
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2021-04-22 18:08:28 +0200
committerFlorin Coras <florin.coras@gmail.com>2021-04-27 23:40:27 +0000
commitef0bac70995bb225fea8955009b34d5b823285ba (patch)
treedd84620d04a63000a2f613d3cc06383d56a0255e /src/vppinfra
parentd77d379ea6ac065becdf1b2e00351c77c7e1fe47 (diff)
vlib: improve enqueue_to_next buffer indices extraction
Type: improvement Change-Id: Ib7b2fa7d821f6d2708f6dc378a0f36f68c843f57 Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/vppinfra')
-rw-r--r--src/vppinfra/clib.h1
-rw-r--r--src/vppinfra/vector.h4
-rw-r--r--src/vppinfra/vector_avx2.h21
-rw-r--r--src/vppinfra/vector_avx512.h4
-rw-r--r--src/vppinfra/vector_funcs.h366
-rw-r--r--src/vppinfra/vector_sse42.h6
6 files changed, 72 insertions, 330 deletions
diff --git a/src/vppinfra/clib.h b/src/vppinfra/clib.h
index 4629a7defb8..7cdf4a41aaa 100644
--- a/src/vppinfra/clib.h
+++ b/src/vppinfra/clib.h
@@ -369,6 +369,7 @@ void qsort (void *base, uword n, uword size,
uword
clib_backtrace (uword * callers, uword max_callers, uword n_frames_to_skip);
+#include <vppinfra/byte_order.h>
#endif /* included_clib_h */
/*
diff --git a/src/vppinfra/vector.h b/src/vppinfra/vector.h
index d5bc955a2e5..fed6fa3c1fd 100644
--- a/src/vppinfra/vector.h
+++ b/src/vppinfra/vector.h
@@ -189,10 +189,6 @@ foreach_vec
#include <vppinfra/vector_neon.h>
#endif
-#if (defined(CLIB_HAVE_VEC128) || defined(CLIB_HAVE_VEC64))
-#include <vppinfra/vector_funcs.h>
-#endif
-
/* this macro generate _splat inline functions for each scalar vector type */
#ifndef CLIB_VEC128_SPLAT_DEFINED
#define _(t, s, c) \
diff --git a/src/vppinfra/vector_avx2.h b/src/vppinfra/vector_avx2.h
index c24ed728c3c..584bd207b27 100644
--- a/src/vppinfra/vector_avx2.h
+++ b/src/vppinfra/vector_avx2.h
@@ -75,6 +75,10 @@ u32x8_permute (u32x8 v, u32x8 idx)
return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx);
}
+#define u64x4_permute(v, m0, m1, m2, m3) \
+ (u64x4) _mm256_permute4x64_epi64 ( \
+ (__m256i) v, ((m0) | (m1) << 2 | (m2) << 4 | (m3) << 6))
+
/* _extract_lo, _extract_hi */
/* *INDENT-OFF* */
#define _(t1,t2) \
@@ -101,8 +105,17 @@ _(u64x2, u64x4)
#undef _
/* *INDENT-ON* */
+always_inline u8x32
+u16x16_pack (u16x16 lo, u16x16 hi)
+{
+ return (u8x32) _mm256_packus_epi16 ((__m256i) lo, (__m256i) hi);
+}
-
+always_inline i8x32
+i16x16_pack (i16x16 lo, i16x16 hi)
+{
+ return (i8x32) _mm256_packs_epi16 ((__m256i) lo, (__m256i) hi);
+}
static_always_inline u32
u8x32_msb_mask (u8x32 v)
@@ -110,6 +123,12 @@ u8x32_msb_mask (u8x32 v)
return _mm256_movemask_epi8 ((__m256i) v);
}
+static_always_inline u32
+i8x32_msb_mask (i8x32 v)
+{
+ return _mm256_movemask_epi8 ((__m256i) v);
+}
+
/* _from_ */
/* *INDENT-OFF* */
#define _(f,t,i) \
diff --git a/src/vppinfra/vector_avx512.h b/src/vppinfra/vector_avx512.h
index a688baec7fb..2f5763e3c92 100644
--- a/src/vppinfra/vector_avx512.h
+++ b/src/vppinfra/vector_avx512.h
@@ -323,8 +323,12 @@ _ (u8x16, u16, _mm, __m128i, epi8)
#endif
#undef _
+#ifdef CLIB_HAVE_VEC256
#define CLIB_HAVE_VEC256_COMPRESS
+#endif
+#ifdef CLIB_HAVE_VEC512
#define CLIB_HAVE_VEC512_COMPRESS
+#endif
#ifndef __AVX512VBMI2__
static_always_inline u16x16
diff --git a/src/vppinfra/vector_funcs.h b/src/vppinfra/vector_funcs.h
index db09de0f04c..c8670662910 100644
--- a/src/vppinfra/vector_funcs.h
+++ b/src/vppinfra/vector_funcs.h
@@ -1,334 +1,50 @@
-/*
- * Copyright (c) 2015 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2021 Cisco Systems, Inc.
*/
-/*
- Copyright (c) 2008 Eliot Dresselhaus
-
- Permission is hereby granted, free of charge, to any person obtaining
- a copy of this software and associated documentation files (the
- "Software"), to deal in the Software without restriction, including
- without limitation the rights to use, copy, modify, merge, publish,
- distribute, sublicense, and/or sell copies of the Software, and to
- permit persons to whom the Software is furnished to do so, subject to
- the following conditions:
-
- The above copyright notice and this permission notice shall be
- included in all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
#ifndef included_vector_funcs_h
#define included_vector_funcs_h
+#include <vppinfra/clib.h>
-#include <vppinfra/byte_order.h>
-
-/* Addition/subtraction. */
-#if CLIB_VECTOR_WORD_BITS == 128
-#define u8x_add u8x16_add
-#define u16x_add u16x8_add
-#define u32x_add u32x4_add
-#define u64x_add u64x2_add
-#define i8x_add i8x16_add
-#define i16x_add i16x8_add
-#define i32x_add i32x4_add
-#define i64x_add i64x2_add
-#define u8x_sub u8x16_sub
-#define u16x_sub u16x8_sub
-#define u32x_sub u32x4_sub
-#define u64x_sub u64x2_sub
-#define i8x_sub i8x16_sub
-#define i16x_sub i16x8_sub
-#define i32x_sub i32x4_sub
-#define i64x_sub i64x2_sub
-#endif
-
-#if CLIB_VECTOR_WORD_BITS == 64
-#define u8x_add u8x8_add
-#define u16x_add u16x4_add
-#define u32x_add u32x2_add
-#define i8x_add i8x8_add
-#define i16x_add i16x4_add
-#define i32x_add i32x2_add
-#define u8x_sub u8x8_sub
-#define u16x_sub u16x4_sub
-#define u32x_sub u32x2_sub
-#define i8x_sub i8x8_sub
-#define i16x_sub i16x4_sub
-#define i32x_sub i32x2_sub
-#endif
-
-/* Saturating addition/subtraction. */
-#if CLIB_VECTOR_WORD_BITS == 128
-#define u8x_add_saturate u8x16_add_saturate
-#define u16x_add_saturate u16x8_add_saturate
-#define i8x_add_saturate i8x16_add_saturate
-#define i16x_add_saturate i16x8_add_saturate
-#define u8x_sub_saturate u8x16_sub_saturate
-#define u16x_sub_saturate u16x8_sub_saturate
-#define i8x_sub_saturate i8x16_sub_saturate
-#define i16x_sub_saturate i16x8_sub_saturate
-#endif
-
-#if CLIB_VECTOR_WORD_BITS == 64
-#define u8x_add_saturate u8x8_add_saturate
-#define u16x_add_saturate u16x4_add_saturate
-#define i8x_add_saturate i8x8_add_saturate
-#define i16x_add_saturate i16x4_add_saturate
-#define u8x_sub_saturate u8x8_sub_saturate
-#define u16x_sub_saturate u16x4_sub_saturate
-#define i8x_sub_saturate i8x8_sub_saturate
-#define i16x_sub_saturate i16x4_sub_saturate
-#endif
-
-#define _vector_interleave(a,b,t) \
-do { \
- t _tmp_lo = t##_interleave_lo (a, b); \
- t _tmp_hi = t##_interleave_hi (a, b); \
- if (CLIB_ARCH_IS_LITTLE_ENDIAN) \
- (a) = _tmp_lo, (b) = _tmp_hi; \
- else \
- (a) = _tmp_hi, (b) = _tmp_lo; \
-} while (0)
-
-/* 128 bit interleaves. */
-#define u8x16_interleave(a,b) _vector_interleave(a,b,u8x16)
-#define i8x16_interleave(a,b) _vector_interleave(a,b,i8x16)
-#define u16x8_interleave(a,b) _vector_interleave(a,b,u16x8)
-#define i16x8_interleave(a,b) _vector_interleave(a,b,i16x8)
-#define u32x4_interleave(a,b) _vector_interleave(a,b,u32x4)
-#define i32x4_interleave(a,b) _vector_interleave(a,b,i32x4)
-#define u64x2_interleave(a,b) _vector_interleave(a,b,u64x2)
-#define i64x2_interleave(a,b) _vector_interleave(a,b,i64x2)
+/** \brief Compare 64 16-bit elemments with provied value and return bitmap
-/* 64 bit interleaves. */
-#define u8x8_interleave(a,b) _vector_interleave(a,b,u8x8)
-#define i8x8_interleave(a,b) _vector_interleave(a,b,i8x8)
-#define u16x4_interleave(a,b) _vector_interleave(a,b,u16x4)
-#define i16x4_interleave(a,b) _vector_interleave(a,b,i16x4)
-#define u32x2_interleave(a,b) _vector_interleave(a,b,u32x2)
-#define i32x2_interleave(a,b) _vector_interleave(a,b,i32x2)
-
-/* Word sized interleaves. */
-#if CLIB_VECTOR_WORD_BITS == 128
-#define u8x_interleave u8x16_interleave
-#define u16x_interleave u16x8_interleave
-#define u32x_interleave u32x4_interleave
-#define u64x_interleave u64x2_interleave
-#endif
-
-#if CLIB_VECTOR_WORD_BITS == 64
-#define u8x_interleave u8x8_interleave
-#define u16x_interleave u16x4_interleave
-#define u32x_interleave u32x2_interleave
-#define u64x_interleave(a,b) /* do nothing */
-#endif
-
-/* Vector word sized shifts. */
-#if CLIB_VECTOR_WORD_BITS == 128
-#define u8x_shift_left u8x16_shift_left
-#define i8x_shift_left i8x16_shift_left
-#define u16x_shift_left u16x8_shift_left
-#define i16x_shift_left i16x8_shift_left
-#define u32x_shift_left u32x4_shift_left
-#define i32x_shift_left i32x4_shift_left
-#define u64x_shift_left u64x2_shift_left
-#define i64x_shift_left i64x2_shift_left
-#define u8x_shift_right u8x16_shift_right
-#define i8x_shift_right i8x16_shift_right
-#define u16x_shift_right u16x8_shift_right
-#define i16x_shift_right i16x8_shift_right
-#define u32x_shift_right u32x4_shift_right
-#define i32x_shift_right i32x4_shift_right
-#define u64x_shift_right u64x2_shift_right
-#define i64x_shift_right i64x2_shift_right
-#define u8x_rotate_left u8x16_rotate_left
-#define i8x_rotate_left i8x16_rotate_left
-#define u16x_rotate_left u16x8_rotate_left
-#define i16x_rotate_left i16x8_rotate_left
-#define u32x_rotate_left u32x4_rotate_left
-#define i32x_rotate_left i32x4_rotate_left
-#define u64x_rotate_left u64x2_rotate_left
-#define i64x_rotate_left i64x2_rotate_left
-#define u8x_rotate_right u8x16_rotate_right
-#define i8x_rotate_right i8x16_rotate_right
-#define u16x_rotate_right u16x8_rotate_right
-#define i16x_rotate_right i16x8_rotate_right
-#define u32x_rotate_right u32x4_rotate_right
-#define i32x_rotate_right i32x4_rotate_right
-#define u64x_rotate_right u64x2_rotate_right
-#define i64x_rotate_right i64x2_rotate_right
-#define u8x_ishift_left u8x16_ishift_left
-#define i8x_ishift_left i8x16_ishift_left
-#define u16x_ishift_left u16x8_ishift_left
-#define i16x_ishift_left i16x8_ishift_left
-#define u32x_ishift_left u32x4_ishift_left
-#define i32x_ishift_left i32x4_ishift_left
-#define u64x_ishift_left u64x2_ishift_left
-#define i64x_ishift_left i64x2_ishift_left
-#define u8x_ishift_right u8x16_ishift_right
-#define i8x_ishift_right i8x16_ishift_right
-#define u16x_ishift_right u16x8_ishift_right
-#define i16x_ishift_right i16x8_ishift_right
-#define u32x_ishift_right u32x4_ishift_right
-#define i32x_ishift_right i32x4_ishift_right
-#define u64x_ishift_right u64x2_ishift_right
-#define i64x_ishift_right i64x2_ishift_right
-#define u8x_irotate_left u8x16_irotate_left
-#define i8x_irotate_left i8x16_irotate_left
-#define u16x_irotate_left u16x8_irotate_left
-#define i16x_irotate_left i16x8_irotate_left
-#define u32x_irotate_left u32x4_irotate_left
-#define i32x_irotate_left i32x4_irotate_left
-#define u64x_irotate_left u64x2_irotate_left
-#define i64x_irotate_left i64x2_irotate_left
-#define u8x_irotate_right u8x16_irotate_right
-#define i8x_irotate_right i8x16_irotate_right
-#define u16x_irotate_right u16x8_irotate_right
-#define i16x_irotate_right i16x8_irotate_right
-#define u32x_irotate_right u32x4_irotate_right
-#define i32x_irotate_right i32x4_irotate_right
-#define u64x_irotate_right u64x2_irotate_right
-#define i64x_irotate_right i64x2_irotate_right
-#endif
-
-#if CLIB_VECTOR_WORD_BITS == 64
-#define u8x_shift_left u8x8_shift_left
-#define i8x_shift_left i8x8_shift_left
-#define u16x_shift_left u16x4_shift_left
-#define i16x_shift_left i16x4_shift_left
-#define u32x_shift_left u32x2_shift_left
-#define i32x_shift_left i32x2_shift_left
-#define u8x_shift_right u8x8_shift_right
-#define i8x_shift_right i8x8_shift_right
-#define u16x_shift_right u16x4_shift_right
-#define i16x_shift_right i16x4_shift_right
-#define u32x_shift_right u32x2_shift_right
-#define i32x_shift_right i32x2_shift_right
-#define u8x_rotate_left u8x8_rotate_left
-#define i8x_rotate_left i8x8_rotate_left
-#define u16x_rotate_left u16x4_rotate_left
-#define i16x_rotate_left i16x4_rotate_left
-#define u32x_rotate_left u32x2_rotate_left
-#define i32x_rotate_left i32x2_rotate_left
-#define u8x_rotate_right u8x8_rotate_right
-#define i8x_rotate_right i8x8_rotate_right
-#define u16x_rotate_right u16x4_rotate_right
-#define i16x_rotate_right i16x4_rotate_right
-#define u32x_rotate_right u32x2_rotate_right
-#define i32x_rotate_right i32x2_rotate_right
-#define u8x_ishift_left u8x8_ishift_left
-#define i8x_ishift_left i8x8_ishift_left
-#define u16x_ishift_left u16x4_ishift_left
-#define i16x_ishift_left i16x4_ishift_left
-#define u32x_ishift_left u32x2_ishift_left
-#define i32x_ishift_left i32x2_ishift_left
-#define u8x_ishift_right u8x8_ishift_right
-#define i8x_ishift_right i8x8_ishift_right
-#define u16x_ishift_right u16x4_ishift_right
-#define i16x_ishift_right i16x4_ishift_right
-#define u32x_ishift_right u32x2_ishift_right
-#define i32x_ishift_right i32x2_ishift_right
-#define u8x_irotate_left u8x8_irotate_left
-#define i8x_irotate_left i8x8_irotate_left
-#define u16x_irotate_left u16x4_irotate_left
-#define i16x_irotate_left i16x4_irotate_left
-#define u32x_irotate_left u32x2_irotate_left
-#define i32x_irotate_left i32x2_irotate_left
-#define u8x_irotate_right u8x8_irotate_right
-#define i8x_irotate_right i8x8_irotate_right
-#define u16x_irotate_right u16x4_irotate_right
-#define i16x_irotate_right i16x4_irotate_right
-#define u32x_irotate_right u32x2_irotate_right
-#define i32x_irotate_right i32x2_irotate_right
-#endif
+ @param v value to compare elements with
+ @param a array of 64 u16 elements
+ @return u64 bitmap where each bit represents result of comparison
+*/
-#if CLIB_VECTOR_WORD_BITS == 128
-#define u8x_splat u8x16_splat
-#define i8x_splat i8x16_splat
-#define u16x_splat u16x8_splat
-#define i16x_splat i16x8_splat
-#define u32x_splat u32x4_splat
-#define i32x_splat i32x4_splat
-#define u64x_splat u64x2_splat
-#define i64x_splat i64x2_splat
-#endif
+static_always_inline u64
+clib_compare_u16_x64 (u16 v, u16 *a)
+{
+ u64 mask = 0;
+#if defined(CLIB_HAVE_VEC512) && !defined(__aarch64__)
+ u16x32 v32 = u16x32_splat (v);
+ u16x32u *av = (u16x32u *) a;
+ mask = ((u64) u16x32_is_equal_mask (av[0], v32) |
+ (u64) u16x32_is_equal_mask (av[1], v32) << 32);
+#elif defined(CLIB_HAVE_VEC256)
+ u16x16 v16 = u16x16_splat (v);
+ u16x16u *av = (u16x16u *) a;
+ i8x32 x;
+
+ x = i16x16_pack (v16 == av[0], v16 == av[1]);
+ mask = i8x32_msb_mask ((i8x32) u64x4_permute (x, 0, 2, 1, 3));
+ x = i16x16_pack (v16 == av[2], v16 == av[3]);
+ mask |= (u64) i8x32_msb_mask ((i8x32) u64x4_permute (x, 0, 2, 1, 3)) << 32;
+#elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
+ u16x8 idx8 = u16x8_splat (v);
+ u16x8u *av = (u16x8u *) a;
+ mask =
+ ((u64) i8x16_msb_mask (i16x8_pack (idx8 == av[0], idx8 == av[1])) |
+ (u64) i8x16_msb_mask (i16x8_pack (idx8 == av[2], idx8 == av[3])) << 16 |
+ (u64) i8x16_msb_mask (i16x8_pack (idx8 == av[4], idx8 == av[5])) << 32 |
+ (u64) i8x16_msb_mask (i16x8_pack (idx8 == av[6], idx8 == av[7])) << 48);
+#else
+ for (int i = 0; i < 64; i++)
+ if (a[i] == v)
+ mask |= 1ULL << i;
+#endif
+ return mask;
+}
-#if CLIB_VECTOR_WORD_BITS == 64
-#define u8x_splat u8x8_splat
-#define i8x_splat i8x8_splat
-#define u16x_splat u16x4_splat
-#define i16x_splat i16x4_splat
-#define u32x_splat u32x2_splat
-#define i32x_splat i32x2_splat
#endif
-
-#define u32x4_transpose_step(x,y) \
-do { \
- u32x4 _x = (x); \
- u32x4 _y = (y); \
- (x) = u32x4_interleave_lo (_x, _y); \
- (y) = u32x4_interleave_hi (_x, _y); \
-} while (0)
-
-/* 4x4 transpose: x_ij -> x_ji */
-#define u32x4_transpose(x0,x1,x2,x3) \
-do { \
- u32x4 _x0 = (u32x4) (x0); \
- u32x4 _x1 = (u32x4) (x1); \
- u32x4 _x2 = (u32x4) (x2); \
- u32x4 _x3 = (u32x4) (x3); \
- u32x4_transpose_step (_x0, _x2); \
- u32x4_transpose_step (_x1, _x3); \
- u32x4_transpose_step (_x0, _x1); \
- u32x4_transpose_step (_x2, _x3); \
- (x0) = (u32x4) _x0; \
- (x1) = (u32x4) _x1; \
- (x2) = (u32x4) _x2; \
- (x3) = (u32x4) _x3; \
-} while (0)
-
-#define i32x4_transpose(x0,x1,x2,x3) \
-do { \
- u32x4 _x0 = (u32x4) (x0); \
- u32x4 _x1 = (u32x4) (x1); \
- u32x4 _x2 = (u32x4) (x2); \
- u32x4 _x3 = (u32x4) (x3); \
- u32x4_transpose_step (_x0, _x2); \
- u32x4_transpose_step (_x1, _x3); \
- u32x4_transpose_step (_x0, _x1); \
- u32x4_transpose_step (_x2, _x3); \
- (x0) = (i32x4) _x0; \
- (x1) = (i32x4) _x1; \
- (x2) = (i32x4) _x2; \
- (x3) = (i32x4) _x3; \
-} while (0)
-
-#undef _
-
-#endif /* included_vector_funcs_h */
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
diff --git a/src/vppinfra/vector_sse42.h b/src/vppinfra/vector_sse42.h
index 06952f117dd..f86fad39b02 100644
--- a/src/vppinfra/vector_sse42.h
+++ b/src/vppinfra/vector_sse42.h
@@ -613,6 +613,12 @@ u8x16_msb_mask (u8x16 v)
return _mm_movemask_epi8 ((__m128i) v);
}
+static_always_inline u16
+i8x16_msb_mask (i8x16 v)
+{
+ return _mm_movemask_epi8 ((__m128i) v);
+}
+
#define CLIB_HAVE_VEC128_MSB_MASK
#undef _signed_binop