summaryrefslogtreecommitdiffstats
path: root/src/vppinfra
diff options
context:
space:
mode:
Diffstat (limited to 'src/vppinfra')
-rw-r--r--src/vppinfra/CMakeLists.txt1
-rw-r--r--src/vppinfra/dlmalloc.c23
-rw-r--r--src/vppinfra/hash.c16
-rw-r--r--src/vppinfra/mem.h43
-rw-r--r--src/vppinfra/mem_dlmalloc.c2
-rw-r--r--src/vppinfra/pool.h13
-rw-r--r--src/vppinfra/sanitizer.h62
-rw-r--r--src/vppinfra/vec.c14
-rw-r--r--src/vppinfra/vec.h3
-rw-r--r--src/vppinfra/vec_bootstrap.h1
10 files changed, 145 insertions, 33 deletions
diff --git a/src/vppinfra/CMakeLists.txt b/src/vppinfra/CMakeLists.txt
index accce6d004f..1c234cce234 100644
--- a/src/vppinfra/CMakeLists.txt
+++ b/src/vppinfra/CMakeLists.txt
@@ -88,6 +88,7 @@ set(VPPINFRA_SRCS
)
set(VPPINFRA_HEADERS
+ sanitizer.h
bihash_16_8.h
bihash_24_8.h
bihash_40_8.h
diff --git a/src/vppinfra/dlmalloc.c b/src/vppinfra/dlmalloc.c
index 524c57b210b..451666e4c9c 100644
--- a/src/vppinfra/dlmalloc.c
+++ b/src/vppinfra/dlmalloc.c
@@ -6,6 +6,7 @@
*/
#include <vppinfra/dlmalloc.h>
+#include <vppinfra/sanitizer.h>
/*------------------------------ internal #includes ---------------------- */
@@ -459,6 +460,7 @@ static FORCEINLINE void x86_clear_lock(int* sl) {
#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
/* Plain spin locks use single word (embedded in malloc_states) */
+CLIB_NOSANITIZE_ADDR
static int spin_acquire_lock(int *sl) {
int spins = 0;
while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) {
@@ -1284,6 +1286,7 @@ static struct malloc_state _gm_;
((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
/* Return segment holding given address */
+CLIB_NOSANITIZE_ADDR
static msegmentptr segment_holding(mstate m, char* addr) {
msegmentptr sp = &m->seg;
for (;;) {
@@ -1295,6 +1298,7 @@ static msegmentptr segment_holding(mstate m, char* addr) {
}
/* Return true if segment contains a segment link */
+CLIB_NOSANITIZE_ADDR
static int has_segment_link(mstate m, msegmentptr ss) {
msegmentptr sp = &m->seg;
for (;;) {
@@ -1612,6 +1616,7 @@ static size_t traverse_and_check(mstate m);
#if (FOOTERS && !INSECURE)
/* Check if (alleged) mstate m has expected magic field */
+CLIB_NOSANITIZE_ADDR
static inline int
ok_magic (const mstate m)
{
@@ -2078,6 +2083,7 @@ static void do_check_malloc_state(mstate m) {
/* ----------------------------- statistics ------------------------------ */
#if !NO_MALLINFO
+CLIB_NOSANITIZE_ADDR
static struct dlmallinfo internal_mallinfo(mstate m) {
struct dlmallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
ensure_initialization();
@@ -2842,6 +2848,7 @@ static void* sys_alloc(mstate m, size_t nb) {
/* ----------------------- system deallocation -------------------------- */
/* Unmap and unlink any mmapped segments that don't contain used chunks */
+CLIB_NOSANITIZE_ADDR
static size_t release_unused_segments(mstate m) {
size_t released = 0;
int nsegs = 0;
@@ -2889,6 +2896,7 @@ static size_t release_unused_segments(mstate m) {
return released;
}
+CLIB_NOSANITIZE_ADDR
static int sys_trim(mstate m, size_t pad) {
size_t released = 0;
ensure_initialization();
@@ -2957,6 +2965,7 @@ static int sys_trim(mstate m, size_t pad) {
/* Consolidate and bin a chunk. Differs from exported versions
of free mainly in that the chunk need not be marked as inuse.
*/
+CLIB_NOSANITIZE_ADDR
static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
mchunkptr next = chunk_plus_offset(p, psize);
if (!pinuse(p)) {
@@ -3028,6 +3037,7 @@ static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
/* ---------------------------- malloc --------------------------- */
/* allocate a large request from the best fitting chunk in a treebin */
+CLIB_NOSANITIZE_ADDR
static void* tmalloc_large(mstate m, size_t nb) {
tchunkptr v = 0;
size_t rsize = -nb; /* Unsigned negation */
@@ -3099,6 +3109,7 @@ static void* tmalloc_large(mstate m, size_t nb) {
}
/* allocate a small request from the best fitting chunk in a treebin */
+CLIB_NOSANITIZE_ADDR
static void* tmalloc_small(mstate m, size_t nb) {
tchunkptr t, v;
size_t rsize;
@@ -3484,6 +3495,7 @@ static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
return newp;
}
+CLIB_NOSANITIZE_ADDR
static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
void* mem = 0;
if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
@@ -4101,6 +4113,7 @@ void mspace_get_address_and_size (mspace msp, char **addrp, size_t *sizep)
*sizep = this_seg->size;
}
+CLIB_NOSANITIZE_ADDR
int mspace_is_heap_object (mspace msp, void *p)
{
msegment *this_seg;
@@ -4155,6 +4168,7 @@ int mspace_enable_disable_trace (mspace msp, int enable)
return (was_enabled);
}
+CLIB_NOSANITIZE_ADDR
int mspace_is_traced (mspace msp)
{
mstate ms = (mstate)msp;
@@ -4164,6 +4178,7 @@ int mspace_is_traced (mspace msp)
return 0;
}
+CLIB_NOSANITIZE_ADDR
void* mspace_get_aligned (mspace msp,
unsigned long n_user_data_bytes,
unsigned long align,
@@ -4264,6 +4279,7 @@ void* mspace_get_aligned (mspace msp,
return (void *) searchp;
}
+CLIB_NOSANITIZE_ADDR
void mspace_put (mspace msp, void *p_arg)
{
char *object_header;
@@ -4287,7 +4303,7 @@ void mspace_put (mspace msp, void *p_arg)
mheap_put_trace ((unsigned long)p_arg, psize);
}
-#if CLIB_DEBUG > 0
+#if CLIB_DEBUG > 0 && !defined(CLIB_SANITIZE_ADDR)
/* Poison the object */
{
size_t psize = mspace_usable_size (object_header);
@@ -4313,6 +4329,7 @@ void mspace_put_no_offset (mspace msp, void *p_arg)
mspace_free (msp, p_arg);
}
+CLIB_NOSANITIZE_ADDR
size_t mspace_usable_size_with_delta (const void *p)
{
size_t usable_size;
@@ -4338,6 +4355,7 @@ size_t mspace_usable_size_with_delta (const void *p)
versions. This is not so nice but better than the alternatives.
*/
+CLIB_NOSANITIZE_ADDR
void* mspace_malloc(mspace msp, size_t bytes) {
mstate ms = (mstate)msp;
if (!ok_magic(ms)) {
@@ -4452,6 +4470,7 @@ void* mspace_malloc(mspace msp, size_t bytes) {
return 0;
}
+CLIB_NOSANITIZE_ADDR
void mspace_free(mspace msp, void* mem) {
if (mem != 0) {
mchunkptr p = mem2chunk(mem);
@@ -4789,6 +4808,7 @@ size_t mspace_set_footprint_limit(mspace msp, size_t bytes) {
}
#if !NO_MALLINFO
+CLIB_NOSANITIZE_ADDR
struct dlmallinfo mspace_mallinfo(mspace msp) {
mstate ms = (mstate)msp;
if (!ok_magic(ms)) {
@@ -4798,6 +4818,7 @@ struct dlmallinfo mspace_mallinfo(mspace msp) {
}
#endif /* NO_MALLINFO */
+CLIB_NOSANITIZE_ADDR
size_t mspace_usable_size(const void* mem) {
if (mem != 0) {
mchunkptr p = mem2chunk(mem);
diff --git a/src/vppinfra/hash.c b/src/vppinfra/hash.c
index b6f0901dd68..6115b0cffd6 100644
--- a/src/vppinfra/hash.c
+++ b/src/vppinfra/hash.c
@@ -109,7 +109,7 @@ zap64 (u64 x, word n)
* The above is true *unless* the extra bytes cross a page boundary
* into unmapped or no-access space, hence the boundary crossing check.
*/
-static inline u64 __attribute__ ((no_sanitize_address))
+static inline u64
hash_memory64 (void *p, word n_bytes, u64 state)
{
u64 *q = p;
@@ -154,7 +154,9 @@ hash_memory64 (void *p, word n_bytes, u64 state)
{
if (PREDICT_TRUE (page_boundary_crossing == 0))
c +=
- zap64 (clib_mem_unaligned (q + 2, u64), n % sizeof (u64)) << 8;
+ zap64 (CLIB_MEM_OVERFLOW
+ (clib_mem_unaligned (q + 2, u64), q + 2, sizeof (u64)),
+ n % sizeof (u64)) << 8;
else
{
clib_memcpy_fast (tmp.as_u8, q + 2, n % sizeof (u64));
@@ -168,7 +170,10 @@ hash_memory64 (void *p, word n_bytes, u64 state)
if (n % sizeof (u64))
{
if (PREDICT_TRUE (page_boundary_crossing == 0))
- b += zap64 (clib_mem_unaligned (q + 1, u64), n % sizeof (u64));
+ b +=
+ zap64 (CLIB_MEM_OVERFLOW
+ (clib_mem_unaligned (q + 1, u64), q + 1, sizeof (u64)),
+ n % sizeof (u64));
else
{
clib_memcpy_fast (tmp.as_u8, q + 1, n % sizeof (u64));
@@ -181,7 +186,10 @@ hash_memory64 (void *p, word n_bytes, u64 state)
if (n % sizeof (u64))
{
if (PREDICT_TRUE (page_boundary_crossing == 0))
- a += zap64 (clib_mem_unaligned (q + 0, u64), n % sizeof (u64));
+ a +=
+ zap64 (CLIB_MEM_OVERFLOW
+ (clib_mem_unaligned (q + 0, u64), q + 0, sizeof (u64)),
+ n % sizeof (u64));
else
{
clib_memcpy_fast (tmp.as_u8, q, n % sizeof (u64));
diff --git a/src/vppinfra/mem.h b/src/vppinfra/mem.h
index 14b2761c881..d4819b7f989 100644
--- a/src/vppinfra/mem.h
+++ b/src/vppinfra/mem.h
@@ -53,6 +53,7 @@
#include <vppinfra/os.h>
#include <vppinfra/string.h> /* memcpy, clib_memset */
+#include <vppinfra/sanitizer.h>
#define CLIB_MAX_MHEAPS 256
@@ -96,6 +97,17 @@ clib_mem_set_per_cpu_heap (u8 * new_heap)
return old;
}
+always_inline uword
+clib_mem_size_nocheck (void *p)
+{
+#if USE_DLMALLOC == 0
+ mheap_elt_t *e = mheap_user_pointer_to_elt (p);
+ return mheap_elt_data_bytes (e);
+#else
+ return mspace_usable_size_with_delta (p);
+#endif
+}
+
/* Memory allocator which may call os_out_of_memory() if it fails */
always_inline void *
clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
@@ -119,29 +131,21 @@ clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
uword offset;
heap = mheap_get_aligned (heap, size, align, align_offset, &offset);
clib_per_cpu_mheaps[cpu] = heap;
-
- if (offset != ~0)
- {
- p = heap + offset;
- return p;
- }
- else
- {
- if (os_out_of_memory_on_failure)
- os_out_of_memory ();
- return 0;
- }
+ if (PREDICT_TRUE (offset != ~0))
+ p = heap + offset;
#else
p = mspace_get_aligned (heap, size, align, align_offset);
- if (PREDICT_FALSE (p == 0))
+#endif /* USE_DLMALLOC */
+
+ if (PREDICT_FALSE (0 == p))
{
if (os_out_of_memory_on_failure)
os_out_of_memory ();
return 0;
}
+ CLIB_MEM_UNPOISON (p, size);
return p;
-#endif /* USE_DLMALLOC */
}
/* Memory allocator which calls os_out_of_memory() when it fails */
@@ -226,6 +230,8 @@ clib_mem_free (void *p)
/* Make sure object is in the correct heap. */
ASSERT (clib_mem_is_heap_object (p));
+ CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
+
#if USE_DLMALLOC == 0
mheap_put (heap, (u8 *) p - heap);
#else
@@ -254,20 +260,15 @@ clib_mem_realloc (void *p, uword new_size, uword old_size)
always_inline uword
clib_mem_size (void *p)
{
-#if USE_DLMALLOC == 0
- mheap_elt_t *e = mheap_user_pointer_to_elt (p);
ASSERT (clib_mem_is_heap_object (p));
- return mheap_elt_data_bytes (e);
-#else
- ASSERT (clib_mem_is_heap_object (p));
- return mspace_usable_size_with_delta (p);
-#endif
+ return clib_mem_size_nocheck (p);
}
always_inline void
clib_mem_free_s (void *p)
{
uword size = clib_mem_size (p);
+ CLIB_MEM_UNPOISON (p, size);
memset_s_inline (p, size, 0, size);
clib_mem_free (p);
}
diff --git a/src/vppinfra/mem_dlmalloc.c b/src/vppinfra/mem_dlmalloc.c
index 7a53a8bb43b..5628e2714c7 100644
--- a/src/vppinfra/mem_dlmalloc.c
+++ b/src/vppinfra/mem_dlmalloc.c
@@ -19,6 +19,7 @@
#include <vppinfra/lock.h>
#include <vppinfra/hash.h>
#include <vppinfra/elf_clib.h>
+#include <vppinfra/sanitizer.h>
void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
@@ -219,6 +220,7 @@ clib_mem_init (void *memory, uword memory_size)
if (mheap_trace_main.lock == 0)
clib_spinlock_init (&mheap_trace_main.lock);
+ CLIB_MEM_POISON (mspace_least_addr (heap), mspace_footprint (heap));
return heap;
}
diff --git a/src/vppinfra/pool.h b/src/vppinfra/pool.h
index 747a7170800..e6ffe1e874a 100644
--- a/src/vppinfra/pool.h
+++ b/src/vppinfra/pool.h
@@ -205,6 +205,7 @@ do { \
clib_bitmap_andnoti_notrim (_pool_var (p)->free_bitmap, \
_pool_var (i)); \
_vec_len (_pool_var (p)->free_indices) = _pool_var (l) - 1; \
+ CLIB_MEM_UNPOISON((E), sizeof((E)[0])); \
} \
else \
{ \
@@ -285,10 +286,12 @@ do { \
/** Free an object E in pool P. */
#define pool_put(P,E) \
do { \
- pool_header_t * _pool_var (p) = pool_header (P); \
- uword _pool_var (l) = (E) - (P); \
- ASSERT (vec_is_member (P, E)); \
- ASSERT (! pool_is_free (P, E)); \
+ typeof (P) _pool_var(p__) = (P); \
+ typeof (E) _pool_var(e__) = (E); \
+ pool_header_t * _pool_var (p) = pool_header (_pool_var(p__)); \
+ uword _pool_var (l) = _pool_var(e__) - _pool_var(p__); \
+ ASSERT (vec_is_member (_pool_var(p__), _pool_var(e__))); \
+ ASSERT (! pool_is_free (_pool_var(p__), _pool_var(e__))); \
\
/* Add element to free bitmap and to free list. */ \
_pool_var (p)->free_bitmap = \
@@ -305,6 +308,8 @@ do { \
} \
else \
vec_add1 (_pool_var (p)->free_indices, _pool_var (l)); \
+ \
+ CLIB_MEM_POISON(_pool_var(e__), sizeof(_pool_var(e__)[0])); \
} while (0)
/** Free pool element with given index. */
diff --git a/src/vppinfra/sanitizer.h b/src/vppinfra/sanitizer.h
new file mode 100644
index 00000000000..d099d3a941f
--- /dev/null
+++ b/src/vppinfra/sanitizer.h
@@ -0,0 +1,62 @@
+#ifndef _included_clib_sanitizer_h
+#define _included_clib_sanitizer_h
+
+#ifdef CLIB_SANITIZE_ADDR
+
+#include <sanitizer/asan_interface.h>
+#include <vppinfra/clib.h>
+
+#define CLIB_NOSANITIZE_ADDR __attribute__((no_sanitize_address))
+#define CLIB_MEM_POISON(a, s) ASAN_POISON_MEMORY_REGION((a), (s))
+#define CLIB_MEM_UNPOISON(a, s) ASAN_UNPOISON_MEMORY_REGION((a), (s))
+
+#define CLIB_MEM_OVERFLOW(f, src, n) \
+ ({ \
+ typeof (f) clib_mem_overflow_ret__; \
+ const void *clib_mem_overflow_src__ = (src); \
+ size_t clib_mem_overflow_n__ = (n); \
+ const void *clib_mem_overflow_start__ = __asan_region_is_poisoned((void *)clib_mem_overflow_src__, clib_mem_overflow_n__); \
+ clib_mem_overflow_n__ -= (size_t)(clib_mem_overflow_start__ - clib_mem_overflow_src__); \
+ if (clib_mem_overflow_start__) \
+ CLIB_MEM_UNPOISON(clib_mem_overflow_start__, clib_mem_overflow_n__); \
+ clib_mem_overflow_ret__ = f; \
+ if (clib_mem_overflow_start__) \
+ CLIB_MEM_POISON(clib_mem_overflow_start__, clib_mem_overflow_n__); \
+ clib_mem_overflow_ret__; \
+ })
+
+#define CLIB_MEM_OVERFLOW_LOAD(f, src) \
+ ({ \
+ typeof(src) clib_mem_overflow_load_src__ = (src); \
+ CLIB_MEM_OVERFLOW(f(clib_mem_overflow_load_src__), clib_mem_overflow_load_src__, sizeof(typeof(f(clib_mem_overflow_load_src__)))); \
+ })
+
+static_always_inline void
+CLIB_MEM_POISON_LEN (void *src, size_t oldlen, size_t newlen)
+{
+ if (oldlen > newlen)
+ CLIB_MEM_POISON (src + newlen, oldlen - newlen);
+ else if (newlen > oldlen)
+ CLIB_MEM_UNPOISON (src + oldlen, newlen - oldlen);
+}
+
+#else /* CLIB_SANITIZE_ADDR */
+
+#define CLIB_NOSANITIZE_ADDR
+#define CLIB_MEM_POISON(a, s) (void)(a)
+#define CLIB_MEM_UNPOISON(a, s) (void)(a)
+#define CLIB_MEM_OVERFLOW(a, b, c) a
+#define CLIB_MEM_OVERFLOW_LOAD(f, src) f(src)
+#define CLIB_MEM_POISON_LEN(a, b, c)
+
+#endif /* CLIB_SANITIZE_ADDR */
+
+#endif /* _included_clib_sanitizer_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/vec.c b/src/vppinfra/vec.c
index 32788286a63..16372e9ef22 100644
--- a/src/vppinfra/vec.c
+++ b/src/vppinfra/vec.c
@@ -58,8 +58,10 @@ vec_resize_allocate_memory (void *v,
{
new = clib_mem_alloc_aligned_at_offset (data_bytes, data_align, header_bytes, 1 /* yes, call os_out_of_memory */
);
- data_bytes = clib_mem_size (new);
- clib_memset (new, 0, data_bytes);
+ new_alloc_bytes = clib_mem_size (new);
+ CLIB_MEM_UNPOISON (new + data_bytes, new_alloc_bytes - data_bytes);
+ clib_memset (new, 0, new_alloc_bytes);
+ CLIB_MEM_POISON (new + data_bytes, new_alloc_bytes - data_bytes);
v = new + header_bytes;
_vec_len (v) = length_increment;
return v;
@@ -75,7 +77,10 @@ vec_resize_allocate_memory (void *v,
/* Need to resize? */
if (data_bytes <= old_alloc_bytes)
- return v;
+ {
+ CLIB_MEM_UNPOISON (v, data_bytes);
+ return v;
+ }
new_alloc_bytes = (old_alloc_bytes * 3) / 2;
if (new_alloc_bytes < data_bytes)
@@ -92,6 +97,7 @@ vec_resize_allocate_memory (void *v,
("vec_resize fails, length increment %d, data bytes %d, alignment %d",
length_increment, data_bytes, data_align);
+ CLIB_MEM_UNPOISON (old, old_alloc_bytes);
clib_memcpy_fast (new, old, old_alloc_bytes);
clib_mem_free (old);
@@ -100,7 +106,9 @@ vec_resize_allocate_memory (void *v,
v = new;
/* Zero new memory. */
+ CLIB_MEM_UNPOISON (new + data_bytes, new_alloc_bytes - data_bytes);
memset (v + old_alloc_bytes, 0, new_alloc_bytes - old_alloc_bytes);
+ CLIB_MEM_POISON (new + data_bytes, new_alloc_bytes - data_bytes);
return v + header_bytes;
}
diff --git a/src/vppinfra/vec.h b/src/vppinfra/vec.h
index 461c0de3347..021b2295964 100644
--- a/src/vppinfra/vec.h
+++ b/src/vppinfra/vec.h
@@ -138,6 +138,7 @@ _vec_resize_inline (void *v,
/* Typically we'll not need to resize. */
if (new_data_bytes <= clib_mem_size (p))
{
+ CLIB_MEM_UNPOISON (v, data_bytes);
vh->len += length_increment;
return v;
}
@@ -794,6 +795,7 @@ do { \
if (_v(n) > 0) \
clib_memset ((V) + _v(l) - _v(n), 0, _v(n) * sizeof ((V)[0])); \
_vec_len (V) -= _v(n); \
+ CLIB_MEM_POISON(vec_end(V), _v(n) * sizeof ((V)[0])); \
} while (0)
/** \brief Delete the element at index I
@@ -808,6 +810,7 @@ do { \
if (_vec_del_i < _vec_del_l) \
(v)[_vec_del_i] = (v)[_vec_del_l]; \
_vec_len (v) = _vec_del_l; \
+ CLIB_MEM_POISON(vec_end(v), sizeof ((v)[0])); \
} while (0)
/** \brief Append v2 after v1. Result in v1.
diff --git a/src/vppinfra/vec_bootstrap.h b/src/vppinfra/vec_bootstrap.h
index 5c42e5ea914..fbb01b685ca 100644
--- a/src/vppinfra/vec_bootstrap.h
+++ b/src/vppinfra/vec_bootstrap.h
@@ -164,6 +164,7 @@ vec_aligned_header_end (void *v, uword header_bytes, uword align)
#define vec_set_len(v, l) do { \
ASSERT(v); \
ASSERT((l) <= vec_max_len(v)); \
+ CLIB_MEM_POISON_LEN((void *)(v), _vec_len(v) * sizeof((v)[0]), (l) * sizeof((v)[0])); \
_vec_len(v) = (l); \
} while (0)
#else /* __COVERITY__ */