summaryrefslogtreecommitdiffstats
path: root/src/vppinfra/hash.c
diff options
context:
space:
mode:
authorBenoît Ganne <bganne@cisco.com>2021-09-08 15:36:56 +0200
committerDamjan Marion <dmarion@me.com>2021-10-07 15:36:04 +0000
commit9685624a50613344ad6cc1405fa561ea86888f06 (patch)
treee6c473c77117ea8f775743d489c4b8d39bc5cc22 /src/vppinfra/hash.c
parent9888fdad4170627b0b26c902cc22d9df23ba56c2 (diff)
vppinfra: asan: improve overflow semantic
Type: improvement Change-Id: Ia63899b82e34f179f9efa921e4630b598f2a86cb Signed-off-by: Benoît Ganne <bganne@cisco.com>
Diffstat (limited to 'src/vppinfra/hash.c')
-rw-r--r--src/vppinfra/hash.c28
1 files changed, 16 insertions, 12 deletions
diff --git a/src/vppinfra/hash.c b/src/vppinfra/hash.c
index fc6c4518048..d40083fdb5b 100644
--- a/src/vppinfra/hash.c
+++ b/src/vppinfra/hash.c
@@ -153,10 +153,12 @@ hash_memory64 (void *p, word n_bytes, u64 state)
if (n % sizeof (u64))
{
if (PREDICT_TRUE (page_boundary_crossing == 0))
- c +=
- zap64 (CLIB_MEM_OVERFLOW
- (clib_mem_unaligned (q + 2, u64), q + 2, sizeof (u64)),
- n % sizeof (u64)) << 8;
+ {
+ CLIB_MEM_OVERFLOW_PUSH (q + 2, sizeof (u64));
+ c += zap64 (clib_mem_unaligned (q + 2, u64), n % sizeof (u64))
+ << 8;
+ CLIB_MEM_OVERFLOW_POP ();
+ }
else
{
clib_memcpy_fast (tmp.as_u8, q + 2, n % sizeof (u64));
@@ -170,10 +172,11 @@ hash_memory64 (void *p, word n_bytes, u64 state)
if (n % sizeof (u64))
{
if (PREDICT_TRUE (page_boundary_crossing == 0))
- b +=
- zap64 (CLIB_MEM_OVERFLOW
- (clib_mem_unaligned (q + 1, u64), q + 1, sizeof (u64)),
- n % sizeof (u64));
+ {
+ CLIB_MEM_OVERFLOW_PUSH (q + 1, sizeof (u64));
+ b += zap64 (clib_mem_unaligned (q + 1, u64), n % sizeof (u64));
+ CLIB_MEM_OVERFLOW_POP ();
+ }
else
{
clib_memcpy_fast (tmp.as_u8, q + 1, n % sizeof (u64));
@@ -186,10 +189,11 @@ hash_memory64 (void *p, word n_bytes, u64 state)
if (n % sizeof (u64))
{
if (PREDICT_TRUE (page_boundary_crossing == 0))
- a +=
- zap64 (CLIB_MEM_OVERFLOW
- (clib_mem_unaligned (q + 0, u64), q + 0, sizeof (u64)),
- n % sizeof (u64));
+ {
+ CLIB_MEM_OVERFLOW_PUSH (q + 0, sizeof (u64));
+ a += zap64 (clib_mem_unaligned (q + 0, u64), n % sizeof (u64));
+ CLIB_MEM_OVERFLOW_POP ();
+ }
else
{
clib_memcpy_fast (tmp.as_u8, q, n % sizeof (u64));