aboutsummaryrefslogtreecommitdiffstats
path: root/src/vppinfra/dlmalloc.c
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2022-03-19 00:07:52 +0100
committerFlorin Coras <florin.coras@gmail.com>2022-03-30 18:27:13 +0000
commit299571aca34d36e637e43cfbba6275662d0d7795 (patch)
treea48be21950d082afb7dd93562f76f0ba554e8919 /src/vppinfra/dlmalloc.c
parent9539647b895c456ca53892a9259e3127c6b92d35 (diff)
vppinfra: vector allocator rework
- support of in-place growth of vectors (if there is available space next to existing alloc) - drops the need for alloc_aligned_at_offset from memory allocator, which allows easier swap to different memory allocator and reduces malloc overhead - rework of pool and vec macros to inline functions to improve debuggability - fix alignment - in many cases macros were not using native alignment of the particular datatype. Explicitly setting alignment with XXX_aligned() versions of the macro is not needed anymore in > 99% of cases - fix ASAN usage - avoid use of vector of voids, this was root cause of several bugs found in vec_* and pool_* function where sizeof() was used on voids instead of real vector data type - introduce minimal alignment which is currently 8 bytes, vectors will be always aligned at least to that value (underlay allocator actually always provide 16-byte aligned allocs) Type: improvement Change-Id: I20f4b081bb13bbf7bc0ace85cc4e301787f12fdf Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/vppinfra/dlmalloc.c')
-rw-r--r--src/vppinfra/dlmalloc.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/src/vppinfra/dlmalloc.c b/src/vppinfra/dlmalloc.c
index 36c80b09b87..03e7d874032 100644
--- a/src/vppinfra/dlmalloc.c
+++ b/src/vppinfra/dlmalloc.c
@@ -3420,7 +3420,7 @@ void* dlcalloc(size_t n_elements, size_t elem_size) {
/* ------------ Internal support for realloc, memalign, etc -------------- */
/* Try to realloc; only in-place unless can_move true */
-static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
+static CLIB_NOSANITIZE_ADDR mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
int can_move) {
mchunkptr newp = 0;
size_t oldsize = chunksize(p);
@@ -4118,7 +4118,7 @@ void mspace_get_address_and_size (mspace msp, char **addrp, size_t *sizep)
*sizep = this_seg->size;
}
-CLIB_NOSANITIZE_ADDR __clib_export
+CLIB_NOSANITIZE_ADDR
int mspace_is_heap_object (mspace msp, void *p)
{
msegment *this_seg;
@@ -4185,7 +4185,7 @@ int mspace_is_traced (mspace msp)
return 0;
}
-CLIB_NOSANITIZE_ADDR __clib_export
+CLIB_NOSANITIZE_ADDR
void* mspace_get_aligned (mspace msp,
unsigned long n_user_data_bytes,
unsigned long align,
@@ -4265,7 +4265,7 @@ void* mspace_get_aligned (mspace msp,
return (void *) searchp;
}
-CLIB_NOSANITIZE_ADDR __clib_export
+CLIB_NOSANITIZE_ADDR
void mspace_put (mspace msp, void *p_arg)
{
char *object_header;
@@ -4315,7 +4315,7 @@ void mspace_put_no_offset (mspace msp, void *p_arg)
mspace_free (msp, p_arg);
}
-CLIB_NOSANITIZE_ADDR __clib_export
+CLIB_NOSANITIZE_ADDR
size_t mspace_usable_size_with_delta (const void *p)
{
size_t usable_size;
@@ -4623,6 +4623,7 @@ void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
return mem;
}
+CLIB_NOSANITIZE_ADDR
void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) {
void* mem = 0;
if (oldmem != 0) {
@@ -4655,6 +4656,7 @@ void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) {
return mem;
}
+CLIB_NOSANITIZE_ADDR
void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
mstate ms = (mstate)msp;
if (!ok_magic(ms)) {