aboutsummaryrefslogtreecommitdiffstats
path: root/src/vppinfra/dlmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/vppinfra/dlmalloc.c')
-rw-r--r--src/vppinfra/dlmalloc.c23
1 files changed, 22 insertions, 1 deletions
diff --git a/src/vppinfra/dlmalloc.c b/src/vppinfra/dlmalloc.c
index 524c57b210b..451666e4c9c 100644
--- a/src/vppinfra/dlmalloc.c
+++ b/src/vppinfra/dlmalloc.c
@@ -6,6 +6,7 @@
*/
#include <vppinfra/dlmalloc.h>
+#include <vppinfra/sanitizer.h>
/*------------------------------ internal #includes ---------------------- */
@@ -459,6 +460,7 @@ static FORCEINLINE void x86_clear_lock(int* sl) {
#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
/* Plain spin locks use single word (embedded in malloc_states) */
+CLIB_NOSANITIZE_ADDR
static int spin_acquire_lock(int *sl) {
int spins = 0;
while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) {
@@ -1284,6 +1286,7 @@ static struct malloc_state _gm_;
((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
/* Return segment holding given address */
+CLIB_NOSANITIZE_ADDR
static msegmentptr segment_holding(mstate m, char* addr) {
msegmentptr sp = &m->seg;
for (;;) {
@@ -1295,6 +1298,7 @@ static msegmentptr segment_holding(mstate m, char* addr) {
}
/* Return true if segment contains a segment link */
+CLIB_NOSANITIZE_ADDR
static int has_segment_link(mstate m, msegmentptr ss) {
msegmentptr sp = &m->seg;
for (;;) {
@@ -1612,6 +1616,7 @@ static size_t traverse_and_check(mstate m);
#if (FOOTERS && !INSECURE)
/* Check if (alleged) mstate m has expected magic field */
+CLIB_NOSANITIZE_ADDR
static inline int
ok_magic (const mstate m)
{
@@ -2078,6 +2083,7 @@ static void do_check_malloc_state(mstate m) {
/* ----------------------------- statistics ------------------------------ */
#if !NO_MALLINFO
+CLIB_NOSANITIZE_ADDR
static struct dlmallinfo internal_mallinfo(mstate m) {
struct dlmallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
ensure_initialization();
@@ -2842,6 +2848,7 @@ static void* sys_alloc(mstate m, size_t nb) {
/* ----------------------- system deallocation -------------------------- */
/* Unmap and unlink any mmapped segments that don't contain used chunks */
+CLIB_NOSANITIZE_ADDR
static size_t release_unused_segments(mstate m) {
size_t released = 0;
int nsegs = 0;
@@ -2889,6 +2896,7 @@ static size_t release_unused_segments(mstate m) {
return released;
}
+CLIB_NOSANITIZE_ADDR
static int sys_trim(mstate m, size_t pad) {
size_t released = 0;
ensure_initialization();
@@ -2957,6 +2965,7 @@ static int sys_trim(mstate m, size_t pad) {
/* Consolidate and bin a chunk. Differs from exported versions
of free mainly in that the chunk need not be marked as inuse.
*/
+CLIB_NOSANITIZE_ADDR
static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
mchunkptr next = chunk_plus_offset(p, psize);
if (!pinuse(p)) {
@@ -3028,6 +3037,7 @@ static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
/* ---------------------------- malloc --------------------------- */
/* allocate a large request from the best fitting chunk in a treebin */
+CLIB_NOSANITIZE_ADDR
static void* tmalloc_large(mstate m, size_t nb) {
tchunkptr v = 0;
size_t rsize = -nb; /* Unsigned negation */
@@ -3099,6 +3109,7 @@ static void* tmalloc_large(mstate m, size_t nb) {
}
/* allocate a small request from the best fitting chunk in a treebin */
+CLIB_NOSANITIZE_ADDR
static void* tmalloc_small(mstate m, size_t nb) {
tchunkptr t, v;
size_t rsize;
@@ -3484,6 +3495,7 @@ static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
return newp;
}
+CLIB_NOSANITIZE_ADDR
static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
void* mem = 0;
if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
@@ -4101,6 +4113,7 @@ void mspace_get_address_and_size (mspace msp, char **addrp, size_t *sizep)
*sizep = this_seg->size;
}
+CLIB_NOSANITIZE_ADDR
int mspace_is_heap_object (mspace msp, void *p)
{
msegment *this_seg;
@@ -4155,6 +4168,7 @@ int mspace_enable_disable_trace (mspace msp, int enable)
return (was_enabled);
}
+CLIB_NOSANITIZE_ADDR
int mspace_is_traced (mspace msp)
{
mstate ms = (mstate)msp;
@@ -4164,6 +4178,7 @@ int mspace_is_traced (mspace msp)
return 0;
}
+CLIB_NOSANITIZE_ADDR
void* mspace_get_aligned (mspace msp,
unsigned long n_user_data_bytes,
unsigned long align,
@@ -4264,6 +4279,7 @@ void* mspace_get_aligned (mspace msp,
return (void *) searchp;
}
+CLIB_NOSANITIZE_ADDR
void mspace_put (mspace msp, void *p_arg)
{
char *object_header;
@@ -4287,7 +4303,7 @@ void mspace_put (mspace msp, void *p_arg)
mheap_put_trace ((unsigned long)p_arg, psize);
}
-#if CLIB_DEBUG > 0
+#if CLIB_DEBUG > 0 && !defined(CLIB_SANITIZE_ADDR)
/* Poison the object */
{
size_t psize = mspace_usable_size (object_header);
@@ -4313,6 +4329,7 @@ void mspace_put_no_offset (mspace msp, void *p_arg)
mspace_free (msp, p_arg);
}
+CLIB_NOSANITIZE_ADDR
size_t mspace_usable_size_with_delta (const void *p)
{
size_t usable_size;
@@ -4338,6 +4355,7 @@ size_t mspace_usable_size_with_delta (const void *p)
versions. This is not so nice but better than the alternatives.
*/
+CLIB_NOSANITIZE_ADDR
void* mspace_malloc(mspace msp, size_t bytes) {
mstate ms = (mstate)msp;
if (!ok_magic(ms)) {
@@ -4452,6 +4470,7 @@ void* mspace_malloc(mspace msp, size_t bytes) {
return 0;
}
+CLIB_NOSANITIZE_ADDR
void mspace_free(mspace msp, void* mem) {
if (mem != 0) {
mchunkptr p = mem2chunk(mem);
@@ -4789,6 +4808,7 @@ size_t mspace_set_footprint_limit(mspace msp, size_t bytes) {
}
#if !NO_MALLINFO
+CLIB_NOSANITIZE_ADDR
struct dlmallinfo mspace_mallinfo(mspace msp) {
mstate ms = (mstate)msp;
if (!ok_magic(ms)) {
@@ -4798,6 +4818,7 @@ struct dlmallinfo mspace_mallinfo(mspace msp) {
}
#endif /* NO_MALLINFO */
+CLIB_NOSANITIZE_ADDR
size_t mspace_usable_size(const void* mem) {
if (mem != 0) {
mchunkptr p = mem2chunk(mem);