diff options
author | Damjan Marion <damarion@cisco.com> | 2019-03-19 15:38:40 +0100 |
---|---|---|
committer | Neale Ranns <nranns@cisco.com> | 2019-03-26 10:31:01 +0000 |
commit | c59b9a26ed9a6bc083db2868b6993add6fd2ba5b (patch) | |
tree | 70496bdc4ad01ab9e11cd07913f2aec681fac324 /src/vppinfra/string.h | |
parent | 8e22054209ae9c4f08dae16f1aff910d8c8d0b76 (diff) |
ipsec: esp-encrypt rework
Change-Id: Ibe7f806b9d600994e83c9f1be526fdb0a1ef1833
Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/vppinfra/string.h')
-rw-r--r-- | src/vppinfra/string.h | 76 |
1 files changed, 76 insertions, 0 deletions
diff --git a/src/vppinfra/string.h b/src/vppinfra/string.h index d9cd8fe1af9..4755a9868d6 100644 --- a/src/vppinfra/string.h +++ b/src/vppinfra/string.h @@ -214,6 +214,82 @@ memset_s_inline (void *s, rsize_t smax, int c, rsize_t n) #define clib_memset(s,c,n) memset_s_inline(s,n,c,n) static_always_inline void +clib_memcpy_le (u8 * dst, u8 * src, u8 len, u8 max_len) +{ +#if defined (CLIB_HxAVE_VEC256) + u8x32 s, d; + u8x32 mask = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 + }; + u8x32 lv = u8x32_splat (len); + u8x32 add = u8x32_splat (32); + + s = u8x32_load_unaligned (src); + d = u8x32_load_unaligned (dst); + d = u8x32_blend (d, s, u8x32_is_greater (lv, mask)); + u8x32_store_unaligned (d, dst); + + if (max_len <= 32) + return; + + mask += add; + s = u8x32_load_unaligned (src + 32); + d = u8x32_load_unaligned (dst + 32); + d = u8x32_blend (d, s, u8x32_is_greater (lv, mask)); + u8x32_store_unaligned (d, dst + 32); + +#elif defined (CLIB_HAVE_VEC128) && !defined (__aarch64__) + u8x16 s, d; + u8x16 mask = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; + u8x16 lv = u8x16_splat (len); + u8x16 add = u8x16_splat (16); + + s = u8x16_load_unaligned (src); + d = u8x16_load_unaligned (dst); + d = u8x16_blend (d, s, u8x16_is_greater (lv, mask)); + u8x16_store_unaligned (d, dst); + + if (max_len <= 16) + return; + + mask += add; + s = u8x16_load_unaligned (src + 16); + d = u8x16_load_unaligned (dst + 16); + d = u8x16_blend (d, s, u8x16_is_greater (lv, mask)); + u8x16_store_unaligned (d, dst + 16); + + if (max_len <= 32) + return; + + mask += add; + s = u8x16_load_unaligned (src + 32); + d = u8x16_load_unaligned (dst + 32); + d = u8x16_blend (d, s, u8x16_is_greater (lv, mask)); + u8x16_store_unaligned (d, dst + 32); + + mask += add; + s = u8x16_load_unaligned (src + 48); + d = u8x16_load_unaligned (dst + 48); + d = u8x16_blend (d, s, u8x16_is_greater (lv, mask)); + u8x16_store_unaligned (d, dst + 48); +#else + clib_memcpy_fast (dst, src, len); +#endif +} + +static_always_inline void +clib_memcpy_le64 (u8 * dst, u8 * src, u8 len) +{ + clib_memcpy_le (dst, src, len, 64); +} + +static_always_inline void +clib_memcpy_le32 (u8 * dst, u8 * src, u8 len) +{ + clib_memcpy_le (dst, src, len, 32); +} + +static_always_inline void clib_memset_u64 (void *p, u64 val, uword count) { u64 *ptr = p; |