diff options
author | Gabriel Ganne <gabriel.ganne@enea.com> | 2017-10-24 09:58:45 +0200 |
---|---|---|
committer | Dave Barach <openvpp@barachs.net> | 2017-11-01 10:06:19 +0000 |
commit | 53ae29e0608868be4f6a9cced21c39e72e294d0b (patch) | |
tree | 3bd13676023e9974231eb51e345a9ea6f8ef36fc /src | |
parent | 2e2a0ebf0b3b30a8e8d0e39de5b0fdc3b82ab14c (diff) |
fix clib_mem_unaligned() invalid read
clib_mem_unaligned + zap64 casts its input as u64, computes a mask
according to the input length, and returns the casted maked value.
Therefore all the 8 Bytes of the u64 are systematically read, and
the invalid ones are discarded.
For example, for a 5-Bytes string, we will do an invalid read of size 3,
even though those 3 Bytes are never used.
This patch proposes to only read what we have at the cost of reading as
a u64 in one call, but that way, we do not trigger an invalid read
error.
Change-Id: I3e0b31c4113d9c8e53aa5fa3d3d396ec80f06a27
Signed-off-by: Gabriel Ganne <gabriel.ganne@enea.com>
Diffstat (limited to 'src')
-rw-r--r-- | src/vppinfra/hash.c | 35 |
1 files changed, 18 insertions, 17 deletions
diff --git a/src/vppinfra/hash.c b/src/vppinfra/hash.c index 062ad8823e1..b3db9f82b9c 100644 --- a/src/vppinfra/hash.c +++ b/src/vppinfra/hash.c @@ -80,20 +80,24 @@ static u8 *hash_format_pair_default (u8 * s, va_list * args); #if uword_bits == 64 static inline u64 -zap64 (u64 x, word n) +get_unaligned_as_u64 (void const *data, int n) { -#define _(n) (((u64) 1 << (u64) (8*(n))) - (u64) 1) - static u64 masks_little_endian[] = { - 0, _(1), _(2), _(3), _(4), _(5), _(6), _(7), - }; - static u64 masks_big_endian[] = { - 0, ~_(7), ~_(6), ~_(5), ~_(4), ~_(3), ~_(2), ~_(1), - }; -#undef _ + int i; + u64 r = 0; + u8 const *p = (u8 const *) data; + if (clib_arch_is_big_endian) - return x & masks_big_endian[n]; + { + for (i = 0; i < n; i++) + r |= ((u64) ((*(p + i)) << (u8) (1 << (8 - i)))); + } else - return x & masks_little_endian[n]; + { + for (i = 0; i < n; i++) + r |= ((u64) ((*(p + i)) << (u8) (1 << i))); + } + + return r; } static inline u64 @@ -122,19 +126,16 @@ hash_memory64 (void *p, word n_bytes, u64 state) case 2: a += clib_mem_unaligned (q + 0, u64); b += clib_mem_unaligned (q + 1, u64); - if (n % sizeof (u64)) - c += zap64 (clib_mem_unaligned (q + 2, u64), n % sizeof (u64)) << 8; + c += get_unaligned_as_u64 (q + 2, n % sizeof (u64)) << 8; break; case 1: a += clib_mem_unaligned (q + 0, u64); - if (n % sizeof (u64)) - b += zap64 (clib_mem_unaligned (q + 1, u64), n % sizeof (u64)); + b += get_unaligned_as_u64 (q + 1, n % sizeof (u64)); break; case 0: - if (n % sizeof (u64)) - a += zap64 (clib_mem_unaligned (q + 0, u64), n % sizeof (u64)); + a += get_unaligned_as_u64 (q + 0, n % sizeof (u64)); break; } |