From 8e66b9bf4ba90279631e6a0e8ccc2eab5f9156c2 Mon Sep 17 00:00:00 2001 From: Gabriel Ganne Date: Thu, 14 Dec 2017 16:20:37 +0100 Subject: Use crc32 wrapper (VPP-1086) This allows arm platforms to also take advantage of crc32 hardware acceleration. * add a wrapper for crc32_u64. It's the only one really used. Using it instead of a call to clib_crc32c() eases building symmetrical hash functions. * replace #ifdef on SSE4 by a test on clib_crc32c_uses_intrinsics. Note: keep the test on i386 * fix typo in lb test log Change-Id: I03a0897b70f6c1717e6901d93cf0fe024d5facb5 Signed-off-by: Gabriel Ganne --- src/plugins/lb/lbhash.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'src/plugins') diff --git a/src/plugins/lb/lbhash.h b/src/plugins/lb/lbhash.h index c514fb57d75..10d3beab13b 100644 --- a/src/plugins/lb/lbhash.h +++ b/src/plugins/lb/lbhash.h @@ -101,16 +101,16 @@ void lb_hash_free(lb_hash_t *h) vec_free(mem); } -#if __SSE4_2__ && !defined (__i386__) +#if defined(clib_crc32c_uses_intrinsics) && !defined (__i386__) static_always_inline u32 lb_hash_hash(u64 k0, u64 k1, u64 k2, u64 k3, u64 k4) { u64 val = 0; - val = _mm_crc32_u64(val, k0); - val = _mm_crc32_u64(val, k1); - val = _mm_crc32_u64(val, k2); - val = _mm_crc32_u64(val, k3); - val = _mm_crc32_u64(val, k4); + val = crc32_u64(val, k0); + val = crc32_u64(val, k1); + val = crc32_u64(val, k2); + val = crc32_u64(val, k3); + val = crc32_u64(val, k4); return (u32) val; } #else -- cgit 1.2.3-korg