aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Barach <dave@barachs.net>2019-10-09 12:57:13 -0400
committerDamjan Marion <dmarion@me.com>2019-10-11 12:30:58 +0000
commit7e2cea3d26701ff1d80fda7d8ca907890e3e7baa (patch)
tree49dfbace1a56e09ef46402573090f9880fa57f2f
parent8563cb389a7e8d6d4e042e146c0d94b8af98ca7a (diff)
vppinfra: fix page boundary crossing bug in hash_memory64
Fix a day-1 bug, possibly dating back as far as 2002. The zap64() game involves fetching 8 byte chunks, and clearing octets not to be included in the key. That's fine *unless* the 8-byte fetch happens to cross a page boundary into unmapped or no-access space. Type: fix Signed-off-by: Dave Barach <dave@barachs.net> Change-Id: I4607e9840032257c96ba7387f86c931c0921749d
-rw-r--r--src/plugins/unittest/util_test.c62
-rw-r--r--src/vppinfra/hash.c51
2 files changed, 109 insertions, 4 deletions
diff --git a/src/plugins/unittest/util_test.c b/src/plugins/unittest/util_test.c
index d2f2715d763..67fe0093ab8 100644
--- a/src/plugins/unittest/util_test.c
+++ b/src/plugins/unittest/util_test.c
@@ -14,6 +14,7 @@
*/
#include <vlib/vlib.h>
+#include <sys/mman.h>
static clib_error_t *
test_crash_command_fn (vlib_main_t * vm,
@@ -45,6 +46,67 @@ VLIB_CLI_COMMAND (test_crash_command, static) =
};
/* *INDENT-ON* */
+static clib_error_t *
+test_hash_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ uword hash1, hash2;
+ u8 *baseaddr;
+ u8 *key_loc;
+
+ baseaddr = mmap (NULL, 8192, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0 /* offset */ );
+
+ if (baseaddr == 0)
+ {
+ clib_unix_warning ("mmap");
+ return 0;
+ }
+
+ if (mprotect (baseaddr + (4 << 10), (4 << 10), PROT_NONE) < 0)
+ {
+ clib_unix_warning ("mprotect");
+ return 0;
+ }
+
+ key_loc = baseaddr + (4 << 10) - 4;
+ key_loc[0] = 0xde;
+ key_loc[1] = 0xad;
+ key_loc[2] = 0xbe;
+ key_loc[3] = 0xef;
+
+ hash1 = hash_memory (key_loc, 4, 0ULL);
+
+ vlib_cli_output (vm, "hash1 is %llx", hash1);
+
+ key_loc = baseaddr;
+
+ key_loc[0] = 0xde;
+ key_loc[1] = 0xad;
+ key_loc[2] = 0xbe;
+ key_loc[3] = 0xef;
+
+ hash2 = hash_memory (key_loc, 4, 0ULL);
+
+ vlib_cli_output (vm, "hash2 is %llx", hash2);
+
+ if (hash1 == hash2)
+ vlib_cli_output (vm, "PASS...");
+ else
+ vlib_cli_output (vm, "FAIL...");
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (test_hash_command, static) =
+{
+ .path = "test hash_memory",
+ .short_help = "page boundary crossing test",
+ .function = test_hash_command_fn,
+};
+/* *INDENT-ON* */
+
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/src/vppinfra/hash.c b/src/vppinfra/hash.c
index eae79d48592..b6f0901dd68 100644
--- a/src/vppinfra/hash.c
+++ b/src/vppinfra/hash.c
@@ -103,14 +103,32 @@ zap64 (u64 x, word n)
* Therefore all the 8 Bytes of the u64 are systematically read, which
* rightfully causes address-sanitizer to raise an error on smaller inputs.
*
- * However the invalid Bytes are discarded within zap64(), whicj is why
+ * However the invalid Bytes are discarded within zap64(), which is why
* this can be silenced safely.
+ *
+ * The above is true *unless* the extra bytes cross a page boundary
+ * into unmapped or no-access space, hence the boundary crossing check.
*/
static inline u64 __attribute__ ((no_sanitize_address))
hash_memory64 (void *p, word n_bytes, u64 state)
{
u64 *q = p;
u64 a, b, c, n;
+ int page_boundary_crossing;
+ u64 start_addr, end_addr;
+ union
+ {
+ u8 as_u8[8];
+ u64 as_u64;
+ } tmp;
+
+ /*
+ * If the request crosses a 4k boundary, it's not OK to assume
+ * that the zap64 game is safe. 4k is the minimum known page size.
+ */
+ start_addr = (u64) p;
+ end_addr = start_addr + n_bytes + 7;
+ page_boundary_crossing = (start_addr >> 12) != (end_addr >> 12);
a = b = 0x9e3779b97f4a7c13LL;
c = state;
@@ -133,18 +151,43 @@ hash_memory64 (void *p, word n_bytes, u64 state)
a += clib_mem_unaligned (q + 0, u64);
b += clib_mem_unaligned (q + 1, u64);
if (n % sizeof (u64))
- c += zap64 (clib_mem_unaligned (q + 2, u64), n % sizeof (u64)) << 8;
+ {
+ if (PREDICT_TRUE (page_boundary_crossing == 0))
+ c +=
+ zap64 (clib_mem_unaligned (q + 2, u64), n % sizeof (u64)) << 8;
+ else
+ {
+ clib_memcpy_fast (tmp.as_u8, q + 2, n % sizeof (u64));
+ c += zap64 (tmp.as_u64, n % sizeof (u64)) << 8;
+ }
+ }
break;
case 1:
a += clib_mem_unaligned (q + 0, u64);
if (n % sizeof (u64))
- b += zap64 (clib_mem_unaligned (q + 1, u64), n % sizeof (u64));
+ {
+ if (PREDICT_TRUE (page_boundary_crossing == 0))
+ b += zap64 (clib_mem_unaligned (q + 1, u64), n % sizeof (u64));
+ else
+ {
+ clib_memcpy_fast (tmp.as_u8, q + 1, n % sizeof (u64));
+ b += zap64 (tmp.as_u64, n % sizeof (u64));
+ }
+ }
break;
case 0:
if (n % sizeof (u64))
- a += zap64 (clib_mem_unaligned (q + 0, u64), n % sizeof (u64));
+ {
+ if (PREDICT_TRUE (page_boundary_crossing == 0))
+ a += zap64 (clib_mem_unaligned (q + 0, u64), n % sizeof (u64));
+ else
+ {
+ clib_memcpy_fast (tmp.as_u8, q, n % sizeof (u64));
+ a += zap64 (tmp.as_u64, n % sizeof (u64));
+ }
+ }
break;
}