From 8e66b9bf4ba90279631e6a0e8ccc2eab5f9156c2 Mon Sep 17 00:00:00 2001 From: Gabriel Ganne Date: Thu, 14 Dec 2017 16:20:37 +0100 Subject: Use crc32 wrapper (VPP-1086) This allows arm platforms to also take advantage of crc32 hardware acceleration. * add a wrapper for crc32_u64. It's the only one really used. Using it instead of a call to clib_crc32c() eases building symmetrical hash functions. * replace #ifdef on SSE4 by a test on clib_crc32c_uses_intrinsics. Note: keep the test on i386 * fix typo in lb test log Change-Id: I03a0897b70f6c1717e6901d93cf0fe024d5facb5 Signed-off-by: Gabriel Ganne --- src/plugins/lb/lbhash.h | 12 ++++++------ src/vnet/bfd/bfd_main.c | 8 ++++---- src/vppinfra/crc32.h | 5 +++++ src/vppinfra/cuckoo_8_8.h | 4 ++-- test/test_lb.py | 2 +- 5 files changed, 18 insertions(+), 13 deletions(-) diff --git a/src/plugins/lb/lbhash.h b/src/plugins/lb/lbhash.h index c514fb57d75..10d3beab13b 100644 --- a/src/plugins/lb/lbhash.h +++ b/src/plugins/lb/lbhash.h @@ -101,16 +101,16 @@ void lb_hash_free(lb_hash_t *h) vec_free(mem); } -#if __SSE4_2__ && !defined (__i386__) +#if defined(clib_crc32c_uses_intrinsics) && !defined (__i386__) static_always_inline u32 lb_hash_hash(u64 k0, u64 k1, u64 k2, u64 k3, u64 k4) { u64 val = 0; - val = _mm_crc32_u64(val, k0); - val = _mm_crc32_u64(val, k1); - val = _mm_crc32_u64(val, k2); - val = _mm_crc32_u64(val, k3); - val = _mm_crc32_u64(val, k4); + val = crc32_u64(val, k0); + val = crc32_u64(val, k1); + val = crc32_u64(val, k2); + val = crc32_u64(val, k3); + val = crc32_u64(val, k4); return (u32) val; } #else diff --git a/src/vnet/bfd/bfd_main.c b/src/vnet/bfd/bfd_main.c index 5d1c5404333..668a44e89e5 100644 --- a/src/vnet/bfd/bfd_main.c +++ b/src/vnet/bfd/bfd_main.c @@ -39,10 +39,10 @@ static u64 bfd_calc_echo_checksum (u32 discriminator, u64 expire_time, u32 secret) { u64 checksum = 0; -#if __SSE4_2__ && !defined (__i386__) - checksum = _mm_crc32_u64 (0, discriminator); - checksum = _mm_crc32_u64 (checksum, expire_time); - checksum = _mm_crc32_u64 (checksum, secret); +#if defined(clib_crc32c_uses_intrinsics) && !defined (__i386__) + checksum = crc32_u64 (0, discriminator); + checksum = crc32_u64 (checksum, expire_time); + checksum = crc32_u64 (checksum, secret); #else checksum = clib_xxhash (discriminator ^ expire_time ^ secret); #endif diff --git a/src/vppinfra/crc32.h b/src/vppinfra/crc32.h index 242278ff241..340b539558e 100644 --- a/src/vppinfra/crc32.h +++ b/src/vppinfra/crc32.h @@ -22,6 +22,8 @@ #define clib_crc32c_uses_intrinsics #include +#define crc32_u64 _mm_crc32_u64 + static_always_inline u32 clib_crc32c (u8 * s, int len) { @@ -52,6 +54,9 @@ clib_crc32c (u8 * s, int len) #define clib_crc32c_uses_intrinsics #include + +#define crc32_u64 __crc32cd + static_always_inline u32 clib_crc32c (u8 * s, int len) { diff --git a/src/vppinfra/cuckoo_8_8.h b/src/vppinfra/cuckoo_8_8.h index 608cb0e943c..6fe334da32d 100644 --- a/src/vppinfra/cuckoo_8_8.h +++ b/src/vppinfra/cuckoo_8_8.h @@ -91,8 +91,8 @@ format_cuckoo_kvp_8_8 (u8 * s, va_list * args) always_inline u64 clib_cuckoo_hash_8_8 (clib_cuckoo_kv_8_8_t * v) { -#if __SSE4_2__ && !defined (__i386__) - return _mm_crc32_u64 (0, v->key); +#if defined(clib_crc32c_uses_intrinsics) && !defined (__i386__) + return crc32_u64 (0, v->key); #else return clib_xxhash (v->key); #endif diff --git a/test/test_lb.py b/test/test_lb.py index ab9a209638a..e653b60b0ab 100644 --- a/test/test_lb.py +++ b/test/test_lb.py @@ -142,7 +142,7 @@ class TestLB(VppTestCase): # is not completly biased. for asid in self.ass: if load[asid] < len(self.packets) / (len(self.ass) * 2): - self.log( + self.logger.error( "ASS is not balanced: load[%d] = %d" % (asid, load[asid])) raise Exception("Load Balancer algorithm is biased") -- cgit 1.2.3-korg