diff options
author | Benoît Ganne <bganne@cisco.com> | 2019-04-15 15:28:21 +0200 |
---|---|---|
committer | Damjan Marion <dmarion@me.com> | 2019-11-27 10:50:28 +0000 |
commit | 9fb6d40eb3d4a2da8f45187de773498b784596e6 (patch) | |
tree | e785ebfbe73b847146debb2dae4a4304c51aa9cf /src/vppinfra/hash.c | |
parent | 99fbf0574f099f09b7b46dcabe5bb50d78091dce (diff) |
misc: add address sanitizer heap instrumentation
Introduce AddressSanitizer support: https://github.com/google/sanitizers/
This starts with heap instrumentation. vlib_buffer, bihash and stack
instrumentation should follow.
Type: feature
Change-Id: I7f20e235b2f79db72efd0e756f22c75f717a9884
Signed-off-by: Benoît Ganne <bganne@cisco.com>
Diffstat (limited to 'src/vppinfra/hash.c')
-rw-r--r-- | src/vppinfra/hash.c | 16 |
1 files changed, 12 insertions, 4 deletions
diff --git a/src/vppinfra/hash.c b/src/vppinfra/hash.c index b6f0901dd68..6115b0cffd6 100644 --- a/src/vppinfra/hash.c +++ b/src/vppinfra/hash.c @@ -109,7 +109,7 @@ zap64 (u64 x, word n) * The above is true *unless* the extra bytes cross a page boundary * into unmapped or no-access space, hence the boundary crossing check. */ -static inline u64 __attribute__ ((no_sanitize_address)) +static inline u64 hash_memory64 (void *p, word n_bytes, u64 state) { u64 *q = p; @@ -154,7 +154,9 @@ hash_memory64 (void *p, word n_bytes, u64 state) { if (PREDICT_TRUE (page_boundary_crossing == 0)) c += - zap64 (clib_mem_unaligned (q + 2, u64), n % sizeof (u64)) << 8; + zap64 (CLIB_MEM_OVERFLOW + (clib_mem_unaligned (q + 2, u64), q + 2, sizeof (u64)), + n % sizeof (u64)) << 8; else { clib_memcpy_fast (tmp.as_u8, q + 2, n % sizeof (u64)); @@ -168,7 +170,10 @@ hash_memory64 (void *p, word n_bytes, u64 state) if (n % sizeof (u64)) { if (PREDICT_TRUE (page_boundary_crossing == 0)) - b += zap64 (clib_mem_unaligned (q + 1, u64), n % sizeof (u64)); + b += + zap64 (CLIB_MEM_OVERFLOW + (clib_mem_unaligned (q + 1, u64), q + 1, sizeof (u64)), + n % sizeof (u64)); else { clib_memcpy_fast (tmp.as_u8, q + 1, n % sizeof (u64)); @@ -181,7 +186,10 @@ hash_memory64 (void *p, word n_bytes, u64 state) if (n % sizeof (u64)) { if (PREDICT_TRUE (page_boundary_crossing == 0)) - a += zap64 (clib_mem_unaligned (q + 0, u64), n % sizeof (u64)); + a += + zap64 (CLIB_MEM_OVERFLOW + (clib_mem_unaligned (q + 0, u64), q + 0, sizeof (u64)), + n % sizeof (u64)); else { clib_memcpy_fast (tmp.as_u8, q, n % sizeof (u64)); |