summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/vppinfra/hash.c47
1 files changed, 28 insertions, 19 deletions
diff --git a/src/vppinfra/hash.c b/src/vppinfra/hash.c
index b3db9f82b9c..121fa385705 100644
--- a/src/vppinfra/hash.c
+++ b/src/vppinfra/hash.c
@@ -80,27 +80,33 @@ static u8 *hash_format_pair_default (u8 * s, va_list * args);
#if uword_bits == 64
static inline u64
-get_unaligned_as_u64 (void const *data, int n)
+zap64 (u64 x, word n)
{
- int i;
- u64 r = 0;
- u8 const *p = (u8 const *) data;
-
+#define _(n) (((u64) 1 << (u64) (8*(n))) - (u64) 1)
+ static u64 masks_little_endian[] = {
+ 0, _(1), _(2), _(3), _(4), _(5), _(6), _(7),
+ };
+ static u64 masks_big_endian[] = {
+ 0, ~_(7), ~_(6), ~_(5), ~_(4), ~_(3), ~_(2), ~_(1),
+ };
+#undef _
if (clib_arch_is_big_endian)
- {
- for (i = 0; i < n; i++)
- r |= ((u64) ((*(p + i)) << (u8) (1 << (8 - i))));
- }
+ return x & masks_big_endian[n];
else
- {
- for (i = 0; i < n; i++)
- r |= ((u64) ((*(p + i)) << (u8) (1 << i)));
- }
-
- return r;
+ return x & masks_little_endian[n];
}
-static inline u64
+/**
+ * make address-sanitizer skip this:
+ * clib_mem_unaligned + zap64 casts its input as u64, computes a mask
+ * according to the input length, and returns the casted maked value.
+ * Therefore all the 8 Bytes of the u64 are systematically read, which
+ * rightfully causes address-sanitizer to raise an error on smaller inputs.
+ *
+ * However the invalid Bytes are discarded within zap64(), whicj is why
+ * this can be silenced safely.
+ */
+static inline u64 __attribute__ ((no_sanitize_address))
hash_memory64 (void *p, word n_bytes, u64 state)
{
u64 *q = p;
@@ -126,16 +132,19 @@ hash_memory64 (void *p, word n_bytes, u64 state)
case 2:
a += clib_mem_unaligned (q + 0, u64);
b += clib_mem_unaligned (q + 1, u64);
- c += get_unaligned_as_u64 (q + 2, n % sizeof (u64)) << 8;
+ if (n % sizeof (u64))
+ c += zap64 (clib_mem_unaligned (q + 2, u64), n % sizeof (u64)) << 8;
break;
case 1:
a += clib_mem_unaligned (q + 0, u64);
- b += get_unaligned_as_u64 (q + 1, n % sizeof (u64));
+ if (n % sizeof (u64))
+ b += zap64 (clib_mem_unaligned (q + 1, u64), n % sizeof (u64));
break;
case 0:
- a += get_unaligned_as_u64 (q + 0, n % sizeof (u64));
+ if (n % sizeof (u64))
+ a += zap64 (clib_mem_unaligned (q + 0, u64), n % sizeof (u64));
break;
}