From d154a17989b1da7abbfdb87b98b90cc5f4d3295f Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Tue, 13 Jul 2021 21:12:41 +0200 Subject: vppinfra: put each vector function into own file Type: refactor Change-Id: I2dd9a18497992ac7e2686c14f5d17eccccda0cda Signed-off-by: Damjan Marion --- src/vppinfra/vector/compress.h | 78 +++++++++++++++ src/vppinfra/vector/mask_compare.h | 166 ++++++++++++++++++++++++++++++++ src/vppinfra/vector/test/compress.c | 81 ++++++++++++++++ src/vppinfra/vector/test/mask_compare.c | 95 ++++++++++++++++++ src/vppinfra/vector/test/test.c | 42 ++++++++ src/vppinfra/vector/test/test.h | 35 +++++++ 6 files changed, 497 insertions(+) create mode 100644 src/vppinfra/vector/compress.h create mode 100644 src/vppinfra/vector/mask_compare.h create mode 100644 src/vppinfra/vector/test/compress.c create mode 100644 src/vppinfra/vector/test/mask_compare.c create mode 100644 src/vppinfra/vector/test/test.c create mode 100644 src/vppinfra/vector/test/test.h (limited to 'src/vppinfra/vector') diff --git a/src/vppinfra/vector/compress.h b/src/vppinfra/vector/compress.h new file mode 100644 index 00000000000..1d5d84e77ea --- /dev/null +++ b/src/vppinfra/vector/compress.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: Apache-2.0 + * Copyright(c) 2021 Cisco Systems, Inc. + */ + +#ifndef included_vector_compress_h +#define included_vector_compress_h +#include +#include + +static_always_inline u32 * +clib_compress_u32_x64 (u32 *dst, u32 *src, u64 mask) +{ +#if defined(CLIB_HAVE_VEC512_COMPRESS) + u32x16u *sv = (u32x16u *) src; + for (int i = 0; i < 4; i++) + { + int cnt = _popcnt32 ((u16) mask); + u32x16_compress_store (sv[i], mask, dst); + dst += cnt; + mask >>= 16; + } + +#elif defined(CLIB_HAVE_VEC256_COMPRESS) + u32x8u *sv = (u32x8u *) src; + for (int i = 0; i < 8; i++) + { + int cnt = _popcnt32 ((u8) mask); + u32x8_compress_store (sv[i], mask, dst); + dst += cnt; + mask >>= 8; + } +#else + while (mask) + { + u16 bit = count_trailing_zeros (mask); + mask = clear_lowest_set_bit (mask); + dst++[0] = src[bit]; + } +#endif + return dst; +} + +/** \brief Compress array of 32-bit elemments into destination array based on + * mask + + @param dst destination array of u32 elements + @param src source array of u32 elements + @param mask array of u64 values representing compress mask + @param n_elts number of elements in the source array + @return number of elements stored in destionation array +*/ + +static_always_inline u32 +clib_compress_u32 (u32 *dst, u32 *src, u64 *mask, u32 n_elts) +{ + u32 *dst0 = dst; + while (n_elts >= 64) + { + if (mask[0] == ~0ULL) + { + clib_memcpy_u32 (dst, src, 64); + dst += 64; + } + else + dst = clib_compress_u32_x64 (dst, src, mask[0]); + + mask++; + src += 64; + n_elts -= 64; + } + + if (PREDICT_TRUE (n_elts == 0)) + return dst - dst0; + + return clib_compress_u32_x64 (dst, src, mask[0] & pow2_mask (n_elts)) - dst0; +} + +#endif diff --git a/src/vppinfra/vector/mask_compare.h b/src/vppinfra/vector/mask_compare.h new file mode 100644 index 00000000000..cac48a31f47 --- /dev/null +++ b/src/vppinfra/vector/mask_compare.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: Apache-2.0 + * Copyright(c) 2021 Cisco Systems, Inc. + */ + +#ifndef included_vector_mask_compare_h +#define included_vector_mask_compare_h +#include +#include + +static_always_inline u64 +clib_mask_compare_u16_x64 (u16 v, u16 *a, u32 n_elts) +{ + u64 mask = 0; +#if defined(CLIB_HAVE_VEC512) + u16x32 v32 = u16x32_splat (v); + u16x32u *av = (u16x32u *) a; + mask = ((u64) u16x32_is_equal_mask (av[0], v32) | + (u64) u16x32_is_equal_mask (av[1], v32) << 32); +#elif defined(CLIB_HAVE_VEC256) + u16x16 v16 = u16x16_splat (v); + u16x16u *av = (u16x16u *) a; + i8x32 x; + + x = i8x32_pack (v16 == av[0], v16 == av[1]); + mask = i8x32_msb_mask ((i8x32) u64x4_permute (x, 0, 2, 1, 3)); + x = i8x32_pack (v16 == av[2], v16 == av[3]); + mask |= (u64) i8x32_msb_mask ((i8x32) u64x4_permute (x, 0, 2, 1, 3)) << 32; +#elif defined(CLIB_HAVE_VEC128) && defined(__ARM_NEON) + u16x8 v8 = u16x8_splat (v); + u16x8 m = { 1, 2, 4, 8, 16, 32, 64, 128 }; + u16x8u *av = (u16x8u *) a; + + /* compare each u16 elemment with v8, result gives 0xffff in each element + of the resulting vector if comparison result is true. + Bitwise AND with m will give us one bit set for true result and offset + of that bit represend element index. Finally vaddvq_u16() gives us sum + of all elements of the vector which will give us u8 bitmap. */ + + for (int i = 0; i < 8; i++) + mask |= (u64) vaddvq_u16 ((av[i] == v8) & m) << (i * 8); + +#elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK) + u16x8 v8 = u16x8_splat (v); + u16x8u *av = (u16x8u *) a; + mask = ((u64) i8x16_msb_mask (i8x16_pack (v8 == av[0], v8 == av[1])) | + (u64) i8x16_msb_mask (i8x16_pack (v8 == av[2], v8 == av[3])) << 16 | + (u64) i8x16_msb_mask (i8x16_pack (v8 == av[4], v8 == av[5])) << 32 | + (u64) i8x16_msb_mask (i8x16_pack (v8 == av[6], v8 == av[7])) << 48); +#else + for (int i = 0; i < n_elts; i++) + if (a[i] == v) + mask |= 1ULL << i; +#endif + return mask; +} + +/** \brief Compare 16-bit elemments with provied value and return bitmap + + @param v value to compare elements with + @param a array of u16 elements + @param mask array of u64 where reuslting mask will be stored + @param n_elts number of elements in the array + @return none +*/ + +static_always_inline void +clib_mask_compare_u16 (u16 v, u16 *a, u64 *mask, u32 n_elts) +{ + while (n_elts >= 64) + { + mask++[0] = clib_mask_compare_u16_x64 (v, a, 64); + n_elts -= 64; + a += 64; + } + + if (PREDICT_TRUE (n_elts == 0)) + return; + + mask[0] = clib_mask_compare_u16_x64 (v, a, n_elts) & pow2_mask (n_elts); +} + +static_always_inline u64 +clib_mask_compare_u32_x64 (u32 v, u32 *a, u32 n_elts) +{ + u64 mask = 0; +#if defined(CLIB_HAVE_VEC512) + u32x16 v16 = u32x16_splat (v); + u32x16u *av = (u32x16u *) a; + mask = ((u64) u32x16_is_equal_mask (av[0], v16) | + (u64) u32x16_is_equal_mask (av[1], v16) << 16 | + (u64) u32x16_is_equal_mask (av[2], v16) << 32 | + (u64) u32x16_is_equal_mask (av[3], v16) << 48); +#elif defined(CLIB_HAVE_VEC256) + u32x8 v8 = u32x8_splat (v); + u32x8u *av = (u32x8u *) a; + u32x8 m = { 0, 4, 1, 5, 2, 6, 3, 7 }; + i8x32 c; + + c = i8x32_pack (i16x16_pack ((i32x8) (v8 == av[0]), (i32x8) (v8 == av[1])), + i16x16_pack ((i32x8) (v8 == av[2]), (i32x8) (v8 == av[3]))); + mask = i8x32_msb_mask ((i8x32) u32x8_permute ((u32x8) c, m)); + + c = i8x32_pack (i16x16_pack ((i32x8) (v8 == av[4]), (i32x8) (v8 == av[5])), + i16x16_pack ((i32x8) (v8 == av[6]), (i32x8) (v8 == av[7]))); + mask |= (u64) i8x32_msb_mask ((i8x32) u32x8_permute ((u32x8) c, m)) << 32; + +#elif defined(CLIB_HAVE_VEC128) && defined(__ARM_NEON) + u32x4 v4 = u32x4_splat (v); + u32x4 m = { 1, 2, 4, 8 }; + u32x4u *av = (u32x4u *) a; + + /* compare each u32 elemment with v4, result gives -1 in each element + of the resulting vector if comparison result is true. + Bitwise AND with m will give us one bit set for true result and offset + of that bit represend element index. Finally vaddvq_u32() gives us sum + of all elements of the vector which will give us u8 bitmap. */ + + for (int i = 0; i < 16; i++) + mask |= (u64) vaddvq_u32 ((av[i] == v4) & m) << (i * 4); + +#elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK) + u32x4 v4 = u32x4_splat (v); + u32x4u *av = (u32x4u *) a; + + for (int i = 0; i < 4; i++) + { + i16x8 p1 = i16x8_pack (v4 == av[0], v4 == av[1]); + i16x8 p2 = i16x8_pack (v4 == av[2], v4 == av[3]); + mask |= (u64) i8x16_msb_mask (i8x16_pack (p1, p2)) << (i * 16); + av += 4; + } + +#else + for (int i = 0; i < n_elts; i++) + if (a[i] == v) + mask |= 1ULL << i; +#endif + return mask; +} + +/** \brief Compare 32-bit elemments with provied value and return bitmap + + @param v value to compare elements with + @param a array of u32 elements + @param mask array of u64 where reuslting mask will be stored + @param n_elts number of elements in the array + @return none +*/ + +static_always_inline void +clib_mask_compare_u32 (u32 v, u32 *a, u64 *bitmap, u32 n_elts) +{ + while (n_elts >= 64) + { + bitmap++[0] = clib_mask_compare_u32_x64 (v, a, 64); + n_elts -= 64; + a += 64; + } + + if (PREDICT_TRUE (n_elts == 0)) + return; + + bitmap[0] = clib_mask_compare_u32_x64 (v, a, n_elts) & pow2_mask (n_elts); +} + +#endif diff --git a/src/vppinfra/vector/test/compress.c b/src/vppinfra/vector/test/compress.c new file mode 100644 index 00000000000..7e3eba9892d --- /dev/null +++ b/src/vppinfra/vector/test/compress.c @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: Apache-2.0 + * Copyright(c) 2021 Cisco Systems, Inc. + */ + +#include +#include +#include + +__clib_test_fn u32 +clib_compress_u32_wrapper (u32 *dst, u32 *src, u64 *mask, u32 n_elts) +{ + return clib_compress_u32 (dst, src, mask, n_elts); +} + +typedef struct +{ + u64 mask[10]; + u32 n_elts; +} compress_test_t; + +static compress_test_t tests[] = { + { .mask = { 1 }, .n_elts = 1 }, + { .mask = { 2 }, .n_elts = 2 }, + { .mask = { 3 }, .n_elts = 2 }, + { .mask = { 0, 1 }, .n_elts = 66 }, + { .mask = { 0, 2 }, .n_elts = 69 }, + { .mask = { 0, 3 }, .n_elts = 66 }, + { .mask = { ~0ULL, ~0ULL, ~0ULL, ~0ULL }, .n_elts = 62 }, + { .mask = { ~0ULL, ~0ULL, ~0ULL, ~0ULL }, .n_elts = 255 }, + { .mask = { ~0ULL, 1, 1, ~0ULL }, .n_elts = 256 }, +}; + +static clib_error_t * +test_clib_compress_u32 (clib_error_t *err) +{ + u32 src[513]; + u32 dst[513]; + u32 i, j; + + for (i = 0; i < ARRAY_LEN (src); i++) + src[i] = i; + + for (i = 0; i < ARRAY_LEN (tests); i++) + { + compress_test_t *t = tests + i; + u32 *dp = dst; + u32 r; + + for (j = 0; j < ARRAY_LEN (dst); j++) + dst[j] = 0xa5a5a5a5; + + r = clib_compress_u32_wrapper (dst, src, t->mask, t->n_elts); + + for (j = 0; j < t->n_elts; j++) + { + if ((t->mask[j >> 6] & (1ULL << (j & 0x3f))) == 0) + continue; + + if (dp[0] != src[j]) + return clib_error_return (err, + "wrong data in testcase %u at " + "(dst[%u] = 0x%x, src[%u] = 0x%x)", + i, dp - dst, dp[0], j, src[j]); + dp++; + } + + if (dst[dp - dst + 1] != 0xa5a5a5a5) + return clib_error_return (err, "buffer overrun in testcase %u", i); + + if (dp - dst != r) + return clib_error_return (err, "wrong number of elts in testcase %u", + i); + } + + return err; +} + +REGISTER_TEST (clib_compress_u32) = { + .name = "clib_compress_u32", + .fn = test_clib_compress_u32, +}; diff --git a/src/vppinfra/vector/test/mask_compare.c b/src/vppinfra/vector/test/mask_compare.c new file mode 100644 index 00000000000..64df0ee084a --- /dev/null +++ b/src/vppinfra/vector/test/mask_compare.c @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: Apache-2.0 + * Copyright(c) 2021 Cisco Systems, Inc. + */ + +#include +#include +#include + +__clib_test_fn void +clib_mask_compare_u16_wrapper (u16 v, u16 *a, u64 *mask, u32 n_elts) +{ + clib_mask_compare_u16 (v, a, mask, n_elts); +} + +__clib_test_fn void +clib_mask_compare_u32_wrapper (u32 v, u32 *a, u64 *mask, u32 n_elts) +{ + clib_mask_compare_u32 (v, a, mask, n_elts); +} + +static clib_error_t * +test_clib_mask_compare_u16 (clib_error_t *err) +{ + u16 array[513]; + u64 mask[10]; + u32 i, j; + + for (i = 0; i < ARRAY_LEN (array); i++) + array[i] = i; + + for (i = 0; i < ARRAY_LEN (array); i++) + { + for (j = 0; j < ARRAY_LEN (mask); j++) + mask[j] = 0xa5a5a5a5a5a5a5a5; + + clib_mask_compare_u16_wrapper (i, array, mask, i + 1); + + for (j = 0; j < (i >> 6); j++) + { + if (mask[j]) + return clib_error_return (err, "mask at position %u not zero", j); + } + if (mask[j] != 1ULL << (i & 0x3f)) + return clib_error_return (err, + "mask at position %u is %lx, expected %lx", + j, mask[j], 1ULL << (i % 64)); + + if (mask[j + 1] != 0xa5a5a5a5a5a5a5a5) + return clib_error_return (err, "mask overrun at position %u", j + 1); + } + return err; +} + +REGISTER_TEST (clib_mask_compare_u16) = { + .name = "clib_mask_compare_u16", + .fn = test_clib_mask_compare_u16, +}; + +static clib_error_t * +test_clib_mask_compare_u32 (clib_error_t *err) +{ + u32 array[513]; + u64 mask[10]; + u32 i, j; + + for (i = 0; i < ARRAY_LEN (array); i++) + array[i] = i; + + for (i = 0; i < ARRAY_LEN (array); i++) + { + for (j = 0; j < ARRAY_LEN (mask); j++) + mask[j] = 0xa5a5a5a5a5a5a5a5; + + clib_mask_compare_u32_wrapper (i, array, mask, i + 1); + + for (j = 0; j < (i >> 6); j++) + { + if (mask[j]) + return clib_error_return (err, "mask at position %u not zero", j); + } + if (mask[j] != 1ULL << (i & 0x3f)) + return clib_error_return (err, + "mask at position %u is %lx, expected %lx", + j, mask[j], 1ULL << (i % 64)); + + if (mask[j + 1] != 0xa5a5a5a5a5a5a5a5) + return clib_error_return (err, "mask overrun at position %u", j + 1); + } + return err; +} + +REGISTER_TEST (clib_mask_compare_u32) = { + .name = "clib_mask_compare_u32", + .fn = test_clib_mask_compare_u32, +}; diff --git a/src/vppinfra/vector/test/test.c b/src/vppinfra/vector/test/test.c new file mode 100644 index 00000000000..0e90bacce49 --- /dev/null +++ b/src/vppinfra/vector/test/test.c @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: Apache-2.0 + * Copyright(c) 2021 Cisco Systems, Inc. + */ + +#include +#include + +test_registration_t *test_registrations[CLIB_MARCH_TYPE_N_VARIANTS] = {}; + +int +main (int argc, char *argv[]) +{ + clib_mem_init (0, 64ULL << 20); + + for (int i = 0; i < CLIB_MARCH_TYPE_N_VARIANTS; i++) + { + test_registration_t *r = test_registrations[i]; + + if (r == 0) + continue; + + fformat (stdout, "\nMultiarch Variant: %U\n", format_march_variant, i); + fformat (stdout, + "-------------------------------------------------------\n"); + while (r) + { + clib_error_t *err; + err = (r->fn) (0); + fformat (stdout, "%-50s %s\n", r->name, err ? "FAIL" : "PASS"); + if (err) + { + clib_error_report (err); + fformat (stdout, "\n"); + } + + r = r->next; + } + } + + fformat (stdout, "\n"); + return 0; +} diff --git a/src/vppinfra/vector/test/test.h b/src/vppinfra/vector/test/test.h new file mode 100644 index 00000000000..bc499fb24e8 --- /dev/null +++ b/src/vppinfra/vector/test/test.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: Apache-2.0 + * Copyright(c) 2021 Cisco Systems, Inc. + */ + +#ifndef included_test_test_h +#define included_test_test_h + +#include + +typedef clib_error_t *(test_fn_t) (clib_error_t *); + +typedef struct test_registration_ +{ + char *name; + u8 multiarch : 1; + test_fn_t *fn; + struct test_registration_ *next; +} test_registration_t; + +extern test_registration_t *test_registrations[CLIB_MARCH_TYPE_N_VARIANTS]; + +#define __clib_test_fn static __clib_noinline __clib_section (".test_wrapper") + +#define REGISTER_TEST(x) \ + test_registration_t CLIB_MARCH_SFX (__test_##x); \ + static void __clib_constructor CLIB_MARCH_SFX (__test_registration_##x) ( \ + void) \ + { \ + test_registration_t *r = &CLIB_MARCH_SFX (__test_##x); \ + r->next = test_registrations[CLIB_MARCH_SFX (CLIB_MARCH_VARIANT_TYPE)]; \ + test_registrations[CLIB_MARCH_SFX (CLIB_MARCH_VARIANT_TYPE)] = r; \ + } \ + test_registration_t CLIB_MARCH_SFX (__test_##x) + +#endif -- cgit 1.2.3-korg