diff options
author | Damjan Marion <damarion@cisco.com> | 2023-03-15 11:42:06 +0000 |
---|---|---|
committer | Ole Tr�an <otroan@employees.org> | 2023-03-23 12:04:46 +0000 |
commit | b47376f0b404d2ba5526fba52b171d79b0f352f8 (patch) | |
tree | 5ffcdb47d48f4db0b87483fe6f99a7249831118c /src/vppinfra/vector_avx2.h | |
parent | 5527a78ed96043d2c26e3271066c50b44dd7fc0b (diff) |
vppinfra: AES-CBC and AES-GCM refactor and optimizations
- crypto code moved to vppinfra for better testing and reuse
- added 256-bit VAES support (Intel Client CPUs)
- added AES_GMAC functions
Change-Id: I960c8e14ca0a0126703e8f1589d86f32e2a98361
Type: improvement
Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/vppinfra/vector_avx2.h')
-rw-r--r-- | src/vppinfra/vector_avx2.h | 55 |
1 files changed, 55 insertions, 0 deletions
diff --git a/src/vppinfra/vector_avx2.h b/src/vppinfra/vector_avx2.h index 80c2e39bdfc..17271b8fcd0 100644 --- a/src/vppinfra/vector_avx2.h +++ b/src/vppinfra/vector_avx2.h @@ -223,6 +223,16 @@ u8x32_xor3 (u8x32 a, u8x32 b, u8x32 c) return a ^ b ^ c; } +static_always_inline u8x32 +u8x32_reflect_u8x16 (u8x32 x) +{ + static const u8x32 mask = { + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, + }; + return (u8x32) _mm256_shuffle_epi8 ((__m256i) x, (__m256i) mask); +} + static_always_inline u16x16 u16x16_mask_last (u16x16 v, u8 n_last) { @@ -332,6 +342,11 @@ u8x32_blend (u8x32 v1, u8x32 v2, u8x32 mask) (__m256i) mask); } +#define u8x32_word_shift_left(a, n) \ + (u8x32) _mm256_bslli_epi128 ((__m256i) a, n) +#define u8x32_word_shift_right(a, n) \ + (u8x32) _mm256_bsrli_epi128 ((__m256i) a, n) + #define u32x8_permute_lanes(a, b, m) \ (u32x8) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m) #define u64x4_permute_lanes(a, b, m) \ @@ -407,6 +422,46 @@ u8x32_splat_u8x16 (u8x16 a) return (u8x32) _mm256_broadcastsi128_si256 ((__m128i) a); } +static_always_inline u32x8 +u32x8_splat_u32x4 (u32x4 a) +{ + return (u32x8) _mm256_broadcastsi128_si256 ((__m128i) a); +} + +static_always_inline u8x32 +u8x32_load_partial (u8 *data, uword n) +{ +#if defined(CLIB_HAVE_VEC256_MASK_LOAD_STORE) + return u8x32_mask_load_zero (data, pow2_mask (n)); +#else + u8x32 r = {}; + if (n > 16) + { + r = u8x32_insert_lo (r, *(u8x16u *) data); + r = u8x32_insert_hi (r, u8x16_load_partial (data + 16, n - 16)); + } + else + r = u8x32_insert_lo (r, u8x16_load_partial (data, n)); + return r; +#endif +} + +static_always_inline void +u8x32_store_partial (u8x32 r, u8 *data, uword n) +{ +#if defined(CLIB_HAVE_VEC256_MASK_LOAD_STORE) + u8x32_mask_store (r, data, pow2_mask (n)); +#else + if (n > 16) + { + *(u8x16u *) data = u8x32_extract_lo (r); + u8x16_store_partial (u8x32_extract_hi (r), data + 16, n - 16); + } + else + u8x16_store_partial (u8x32_extract_lo (r), data, n); +#endif +} + #endif /* included_vector_avx2_h */ /* |