summaryrefslogtreecommitdiffstats
path: root/src/common
diff options
context:
space:
mode:
authorHanoh Haim <hhaim@cisco.com>2016-06-14 12:38:08 +0300
committerHanoh Haim <hhaim@cisco.com>2016-06-14 12:38:08 +0300
commit0681152c5dc0c7eda6ec23ccd903188156b4b38c (patch)
tree08f5b0cb62ec39dcad78f287786cfa1f0f9d0efd /src/common
parente56edd675d810d65e4e565561d83d39daa8354fb (diff)
add ef code, not connected yet
Diffstat (limited to 'src/common')
-rw-r--r--src/common/ef/efence.cpp930
-rw-r--r--src/common/ef/efence.h42
-rw-r--r--src/common/ef/eftest.c219
-rw-r--r--src/common/ef/page.cpp193
-rw-r--r--src/common/ef/print.cpp170
-rw-r--r--src/common/ef/tstheap.c61
6 files changed, 1615 insertions, 0 deletions
diff --git a/src/common/ef/efence.cpp b/src/common/ef/efence.cpp
new file mode 100644
index 00000000..1340a12a
--- /dev/null
+++ b/src/common/ef/efence.cpp
@@ -0,0 +1,930 @@
+/*
+ * Electric Fence - Red-Zone memory allocator.
+ * Bruce Perens, 1988, 1993
+ *
+ * This is a special version of malloc() and company for debugging software
+ * that is suspected of overrunning or underrunning the boundaries of a
+ * malloc buffer, or touching free memory.
+ *
+ * It arranges for each malloc buffer to be followed (or preceded)
+ * in the address space by an inaccessable virtual memory page,
+ * and for free memory to be inaccessable. If software touches the
+ * inaccessable page, it will get an immediate segmentation
+ * fault. It is then trivial to uncover the offending code using a debugger.
+ *
+ * An advantage of this product over most malloc debuggers is that this one
+ * detects reading out of bounds as well as writing, and this one stops on
+ * the exact instruction that causes the error, rather than waiting until the
+ * next boundary check.
+ *
+ * There is one product that debugs malloc buffer overruns
+ * better than Electric Fence: "Purify" from Purify Systems, and that's only
+ * a small part of what Purify does. I'm not affiliated with Purify, I just
+ * respect a job well done.
+ *
+ * This version of malloc() should not be linked into production software,
+ * since it tremendously increases the time and memory overhead of malloc().
+ * Each malloc buffer will consume a minimum of two virtual memory pages,
+ * this is 16 kilobytes on many systems. On some systems it will be necessary
+ * to increase the amount of swap space in order to debug large programs that
+ * perform lots of allocation, because of the per-buffer overhead.
+ */
+#include "efence.h"
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include <string.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdint.h>
+
+
+extern C_LINKAGE void * ef_malloc(size_t size);
+extern C_LINKAGE void ef_free(void * address);
+extern C_LINKAGE void * ef_memalign(size_t alignment, size_t userSize);
+extern C_LINKAGE void * ef_calloc(size_t nelem, size_t elsize);
+extern C_LINKAGE void * ef_valloc (size_t size);
+extern C_LINKAGE void * ef_realloc(void * oldBuffer, size_t newSize);
+extern C_LINKAGE void ef_init(void);
+
+
+
+
+
+#ifdef malloc
+#undef malloc
+#endif
+
+#ifdef calloc
+#undef calloc
+#endif
+
+static const char version[] = "\n Electric Fence 2.1"
+ " Copyright (C) 1987-1998 Bruce Perens.\n";
+
+/*
+ * MEMORY_CREATION_SIZE is the amount of memory to get from the operating
+ * system at one time. We'll break that memory down into smaller pieces for
+ * malloc buffers. One megabyte is probably a good value.
+ */
+#define MEMORY_CREATION_SIZE 10* 1024 * 1024
+
+/*
+ * Enum Mode indicates the status of a malloc buffer.
+ */
+enum _Mode {
+ NOT_IN_USE = 0, /* Available to represent a malloc buffer. */
+ FREE, /* A free buffer. */
+ ALLOCATED, /* A buffer that is in use. */
+ PROTECTED, /* A freed buffer that can not be allocated again. */
+ INTERNAL_USE /* A buffer used internally by malloc(). */
+};
+typedef enum _Mode Mode;
+
+/*
+ * Struct Slot contains all of the information about a malloc buffer except
+ * for the contents of its memory.
+ */
+struct _Slot {
+ void * userAddress;
+ void * internalAddress;
+ size_t userSize;
+ size_t internalSize;
+ Mode mode;
+};
+typedef struct _Slot Slot;
+
+ /*
+ * EF_DISABLE_BANNER is a global variable used to control whether
+ * Electric Fence prints its usual startup message. If the value is
+ * -1, it will be set from the environment default to 0 at run time.
+ */
+int EF_DISABLE_BANNER = 1;
+
+
+/*
+ * EF_ALIGNMENT is a global variable used to control the default alignment
+ * of buffers returned by malloc(), calloc(), and realloc(). It is all-caps
+ * so that its name matches the name of the environment variable that is used
+ * to set it. This gives the programmer one less name to remember.
+ * If the value is -1, it will be set from the environment or sizeof(int)
+ * at run time.
+ */
+int EF_ALIGNMENT = 8;
+
+/*
+ * EF_PROTECT_FREE is a global variable used to control the disposition of
+ * memory that is released using free(). It is all-caps so that its name
+ * matches the name of the environment variable that is used to set it.
+ * If its value is greater non-zero, memory released by free is made
+ * inaccessable and never allocated again. Any software that touches free
+ * memory will then get a segmentation fault. If its value is zero, freed
+ * memory will be available for reallocation, but will still be inaccessable
+ * until it is reallocated.
+ * If the value is -1, it will be set from the environment or to 0 at run-time.
+ */
+int EF_PROTECT_FREE = -1;
+
+/*
+ * EF_PROTECT_BELOW is used to modify the behavior of the allocator. When
+ * its value is non-zero, the allocator will place an inaccessable page
+ * immediately _before_ the malloc buffer in the address space, instead
+ * of _after_ it. Use this to detect malloc buffer under-runs, rather than
+ * over-runs. It won't detect both at the same time, so you should test your
+ * software twice, once with this value clear, and once with it set.
+ * If the value is -1, it will be set from the environment or to zero at
+ * run-time
+ */
+int EF_PROTECT_BELOW = -1;
+
+/*
+ * EF_ALLOW_MALLOC_0 is set if Electric Fence is to allow malloc(0). I
+ * trap malloc(0) by default because it is a common source of bugs.
+ */
+int EF_ALLOW_MALLOC_0 = 0;
+
+/*
+ * EF_FREE_WIPES is set if Electric Fence is to wipe the memory content
+ * of freed blocks. This makes it easier to check if memory is freed or
+ * not
+ */
+int EF_FREE_WIPES = 1;
+
+
+static int malloc_init =0;
+/*
+
+ * allocationList points to the array of slot structures used to manage the
+ * malloc arena.
+ */
+static Slot * allocationList = 0;
+
+/*
+ * allocationListSize is the size of the allocation list. This will always
+ * be a multiple of the page size.
+ */
+static size_t allocationListSize = 0;
+
+/*
+ * slotCount is the number of Slot structures in allocationList.
+ */
+static size_t slotCount = 0;
+
+/*
+ * unUsedSlots is the number of Slot structures that are currently available
+ * to represent new malloc buffers. When this number gets too low, we will
+ * create new slots.
+ */
+static size_t unUsedSlots = 0;
+
+/*
+ * slotsPerPage is the number of slot structures that fit in a virtual
+ * memory page.
+ */
+static size_t slotsPerPage = 0;
+
+/*
+ * internalUse is set when allocating and freeing the allocatior-internal
+ * data structures.
+ */
+static int internalUse = 0;
+
+/*
+ * noAllocationListProtection is set to tell malloc() and free() not to
+ * manipulate the protection of the allocation list. This is only set in
+ * realloc(), which does it to save on slow system calls, and in
+ * allocateMoreSlots(), which does it because it changes the allocation list.
+ */
+static int noAllocationListProtection = 0;
+
+/*
+ * bytesPerPage is set at run-time to the number of bytes per virtual-memory
+ * page, as returned by Page_Size().
+ */
+static size_t bytesPerPage = 0;
+
+ /*
+ * mutex to enable multithreaded operation
+ */
+static pthread_mutex_t mutex ;
+
+
+static void lock() {
+ /* reentrant mutex -see init */
+ pthread_mutex_lock(&mutex);
+}
+
+static void unlock() {
+ pthread_mutex_unlock(&mutex);
+}
+
+
+
+/*
+ * internalError is called for those "shouldn't happen" errors in the
+ * allocator.
+ */
+static void
+internalError(void)
+{
+ EF_Abort("Internal error in allocator.");
+}
+
+/*
+ * initialize sets up the memory allocation arena and the run-time
+ * configuration information.
+ */
+static void
+initialize(void)
+{
+ size_t size = MEMORY_CREATION_SIZE;
+ size_t slack;
+ char * string;
+ Slot * slot;
+
+ if ( EF_DISABLE_BANNER == -1 ) {
+ if ( (string = getenv("EF_DISABLE_BANNER")) != 0 )
+ EF_DISABLE_BANNER = atoi(string);
+ else
+ EF_DISABLE_BANNER = 0;
+ }
+
+ if ( EF_DISABLE_BANNER == 0 )
+ EF_Print(version);
+
+ /*
+ * Import the user's environment specification of the default
+ * alignment for malloc(). We want that alignment to be under
+ * user control, since smaller alignment lets us catch more bugs,
+ * however some software will break if malloc() returns a buffer
+ * that is not word-aligned.
+ *
+ * I would like
+ * alignment to be zero so that we could catch all one-byte
+ * overruns, however if malloc() is asked to allocate an odd-size
+ * buffer and returns an address that is not word-aligned, or whose
+ * size is not a multiple of the word size, software breaks.
+ * This was the case with the Sun string-handling routines,
+ * which can do word fetches up to three bytes beyond the end of a
+ * string. I handle this problem in part by providing
+ * byte-reference-only versions of the string library functions, but
+ * there are other functions that break, too. Some in X Windows, one
+ * in Sam Leffler's TIFF library, and doubtless many others.
+ */
+ if ( EF_ALIGNMENT == -1 ) {
+ if ( (string = getenv("EF_ALIGNMENT")) != 0 )
+ EF_ALIGNMENT = (size_t)atoi(string);
+ else
+ EF_ALIGNMENT = sizeof(int);
+ }
+
+ /*
+ * See if the user wants to protect the address space below a buffer,
+ * rather than that above a buffer.
+ */
+ if ( EF_PROTECT_BELOW == -1 ) {
+ if ( (string = getenv("EF_PROTECT_BELOW")) != 0 )
+ EF_PROTECT_BELOW = (atoi(string) != 0);
+ else
+ EF_PROTECT_BELOW = 0;
+ }
+
+ /*
+ * See if the user wants to protect memory that has been freed until
+ * the program exits, rather than until it is re-allocated.
+ */
+ if ( EF_PROTECT_FREE == -1 ) {
+ if ( (string = getenv("EF_PROTECT_FREE")) != 0 )
+ EF_PROTECT_FREE = (atoi(string) != 0);
+ else
+ EF_PROTECT_FREE = 0;
+ }
+
+ /*
+ * See if the user wants to allow malloc(0).
+ */
+ if ( EF_ALLOW_MALLOC_0 == -1 ) {
+ if ( (string = getenv("EF_ALLOW_MALLOC_0")) != 0 )
+ EF_ALLOW_MALLOC_0 = (atoi(string) != 0);
+ else
+ EF_ALLOW_MALLOC_0 = 0;
+ }
+
+ /*
+ * See if the user wants us to wipe out freed memory.
+ */
+ if ( EF_FREE_WIPES == -1 ) {
+ if ( (string = getenv("EF_FREE_WIPES")) != 0 )
+ EF_FREE_WIPES = (atoi(string) != 0);
+ else
+ EF_FREE_WIPES = 0;
+ }
+
+ /*
+ * Get the run-time configuration of the virtual memory page size.
+ */
+ bytesPerPage = Page_Size();
+
+ /*
+ * Figure out how many Slot structures to allocate at one time.
+ */
+ slotCount = slotsPerPage = bytesPerPage / sizeof(Slot);
+ allocationListSize = bytesPerPage;
+
+ if ( allocationListSize > size )
+ size = allocationListSize;
+
+ if ( (slack = size % bytesPerPage) != 0 )
+ size += bytesPerPage - slack;
+
+ /*
+ * Allocate memory, and break it up into two malloc buffers. The
+ * first buffer will be used for Slot structures, the second will
+ * be marked free.
+ */
+ slot = allocationList = (Slot *)Page_Create(size);
+ memset((char *)allocationList, 0, allocationListSize);
+
+ slot[0].internalSize = slot[0].userSize = allocationListSize;
+ slot[0].internalAddress = slot[0].userAddress = allocationList;
+ slot[0].mode = INTERNAL_USE;
+ if ( size > allocationListSize ) {
+ slot[1].internalAddress = slot[1].userAddress
+ = ((char *)slot[0].internalAddress) + slot[0].internalSize;
+ slot[1].internalSize
+ = slot[1].userSize = size - slot[0].internalSize;
+ slot[1].mode = FREE;
+ }
+
+ /*
+ * Deny access to the free page, so that we will detect any software
+ * that treads upon free memory.
+ */
+ Page_DenyAccess(slot[1].internalAddress, slot[1].internalSize);
+
+ /*
+ * Account for the two slot structures that we've used.
+ */
+ unUsedSlots = slotCount - 2;
+}
+
+/*
+ * allocateMoreSlots is called when there are only enough slot structures
+ * left to support the allocation of a single malloc buffer.
+ */
+static void
+allocateMoreSlots(void)
+{
+ size_t newSize = allocationListSize + bytesPerPage;
+ void * newAllocation;
+ void * oldAllocation = allocationList;
+
+ Page_AllowAccess(allocationList, allocationListSize);
+ noAllocationListProtection = 1;
+ internalUse = 1;
+
+ newAllocation = ef_malloc(newSize);
+ memcpy(newAllocation, allocationList, allocationListSize);
+ memset(&(((char *)newAllocation)[allocationListSize]), 0, bytesPerPage);
+
+ allocationList = (Slot *)newAllocation;
+ allocationListSize = newSize;
+ slotCount += slotsPerPage;
+ unUsedSlots += slotsPerPage;
+
+ ef_free(oldAllocation);
+
+ /*
+ * Keep access to the allocation list open at this point, because
+ * I am returning to memalign(), which needs that access.
+ */
+ noAllocationListProtection = 0;
+ internalUse = 0;
+}
+
+/*
+ * This is the memory allocator. When asked to allocate a buffer, allocate
+ * it in such a way that the end of the buffer is followed by an inaccessable
+ * memory page. If software overruns that buffer, it will touch the bad page
+ * and get an immediate segmentation fault. It's then easy to zero in on the
+ * offending code with a debugger.
+ *
+ * There are a few complications. If the user asks for an odd-sized buffer,
+ * we would have to have that buffer start on an odd address if the byte after
+ * the end of the buffer was to be on the inaccessable page. Unfortunately,
+ * there is lots of software that asks for odd-sized buffers and then
+ * requires that the returned address be word-aligned, or the size of the
+ * buffer be a multiple of the word size. An example are the string-processing
+ * functions on Sun systems, which do word references to the string memory
+ * and may refer to memory up to three bytes beyond the end of the string.
+ * For this reason, I take the alignment requests to memalign() and valloc()
+ * seriously, and
+ *
+ * Electric Fence wastes lots of memory. I do a best-fit allocator here
+ * so that it won't waste even more. It's slow, but thrashing because your
+ * working set is too big for a system's RAM is even slower.
+ */
+extern C_LINKAGE void *
+ef_memalign(size_t alignment, size_t userSize)
+{
+ register Slot * slot;
+ register size_t count;
+ Slot * fullSlot = 0;
+ Slot * emptySlots[2];
+ size_t internalSize;
+ size_t slack;
+ char * address;
+
+
+ if ( userSize == 0 && !EF_ALLOW_MALLOC_0 )
+ EF_Abort("Allocating 0 bytes, probably a bug.");
+
+ /*
+ * If EF_PROTECT_BELOW is set, all addresses returned by malloc()
+ * and company will be page-aligned.
+ */
+ if ( !EF_PROTECT_BELOW && alignment > 1 ) {
+ if ( (slack = userSize % alignment) != 0 )
+ userSize += alignment - slack;
+ }
+
+ /*
+ * The internal size of the buffer is rounded up to the next page-size
+ * boudary, and then we add another page's worth of memory for the
+ * dead page.
+ */
+ internalSize = userSize + bytesPerPage;
+ if ( (slack = internalSize % bytesPerPage) != 0 )
+ internalSize += bytesPerPage - slack;
+
+ /*
+ * These will hold the addresses of two empty Slot structures, that
+ * can be used to hold information for any memory I create, and any
+ * memory that I mark free.
+ */
+ emptySlots[0] = 0;
+ emptySlots[1] = 0;
+
+ /*
+ * The internal memory used by the allocator is currently
+ * inaccessable, so that errant programs won't scrawl on the
+ * allocator's arena. I'll un-protect it here so that I can make
+ * a new allocation. I'll re-protect it before I return.
+ */
+ if ( !noAllocationListProtection )
+ Page_AllowAccess(allocationList, allocationListSize);
+
+ /*
+ * If I'm running out of empty slots, create some more before
+ * I don't have enough slots left to make an allocation.
+ */
+ if ( !internalUse && unUsedSlots < 7 ) {
+ allocateMoreSlots();
+ }
+
+ /*
+ * Iterate through all of the slot structures. Attempt to find a slot
+ * containing free memory of the exact right size. Accept a slot with
+ * more memory than we want, if the exact right size is not available.
+ * Find two slot structures that are not in use. We will need one if
+ * we split a buffer into free and allocated parts, and the second if
+ * we have to create new memory and mark it as free.
+ *
+ */
+
+ for ( slot = allocationList, count = slotCount ; count > 0; count-- ) {
+ if ( slot->mode == FREE
+ && slot->internalSize >= internalSize ) {
+ if ( !fullSlot
+ ||slot->internalSize < fullSlot->internalSize){
+ fullSlot = slot;
+ if ( slot->internalSize == internalSize
+ && emptySlots[0] )
+ break; /* All done, */
+ }
+ }
+ else if ( slot->mode == NOT_IN_USE ) {
+ if ( !emptySlots[0] )
+ emptySlots[0] = slot;
+ else if ( !emptySlots[1] )
+ emptySlots[1] = slot;
+ else if ( fullSlot
+ && fullSlot->internalSize == internalSize )
+ break; /* All done. */
+ }
+ slot++;
+ }
+ if ( !emptySlots[0] )
+ internalError();
+
+ if ( !fullSlot ) {
+ /*
+ * I get here if I haven't been able to find a free buffer
+ * with all of the memory I need. I'll have to create more
+ * memory. I'll mark it all as free, and then split it into
+ * free and allocated portions later.
+ */
+ size_t chunkSize = MEMORY_CREATION_SIZE;
+
+ if ( !emptySlots[1] )
+ internalError();
+
+ if ( chunkSize < internalSize )
+ chunkSize = internalSize;
+
+ if ( (slack = chunkSize % bytesPerPage) != 0 )
+ chunkSize += bytesPerPage - slack;
+
+ /* Use up one of the empty slots to make the full slot. */
+ fullSlot = emptySlots[0];
+ emptySlots[0] = emptySlots[1];
+ fullSlot->internalAddress = Page_Create(chunkSize);
+ fullSlot->internalSize = chunkSize;
+ fullSlot->mode = FREE;
+ unUsedSlots--;
+ }
+
+ /*
+ * If I'm allocating memory for the allocator's own data structures,
+ * mark it INTERNAL_USE so that no errant software will be able to
+ * free it.
+ */
+ if ( internalUse )
+ fullSlot->mode = INTERNAL_USE;
+ else
+ fullSlot->mode = ALLOCATED;
+
+ /*
+ * If the buffer I've found is larger than I need, split it into
+ * an allocated buffer with the exact amount of memory I need, and
+ * a free buffer containing the surplus memory.
+ */
+ if ( fullSlot->internalSize > internalSize ) {
+ emptySlots[0]->internalSize
+ = fullSlot->internalSize - internalSize;
+ emptySlots[0]->internalAddress
+ = ((char *)fullSlot->internalAddress) + internalSize;
+ emptySlots[0]->mode = FREE;
+ fullSlot->internalSize = internalSize;
+ unUsedSlots--;
+ }
+
+ if ( !EF_PROTECT_BELOW ) {
+ /*
+ * Arrange the buffer so that it is followed by an inaccessable
+ * memory page. A buffer overrun that touches that page will
+ * cause a segmentation fault.
+ */
+ address = (char *)fullSlot->internalAddress;
+
+ /* Set up the "live" page. */
+ if ( internalSize - bytesPerPage > 0 )
+ Page_AllowAccess(
+ fullSlot->internalAddress
+ ,internalSize - bytesPerPage);
+
+ address += internalSize - bytesPerPage;
+
+ /* Set up the "dead" page. */
+ Page_DenyAccess(address, bytesPerPage);
+
+ /* Figure out what address to give the user. */
+ address -= userSize;
+ }
+ else { /* EF_PROTECT_BELOW != 0 */
+ /*
+ * Arrange the buffer so that it is preceded by an inaccessable
+ * memory page. A buffer underrun that touches that page will
+ * cause a segmentation fault.
+ */
+ address = (char *)fullSlot->internalAddress;
+
+ /* Set up the "dead" page. */
+ Page_DenyAccess(address, bytesPerPage);
+
+ address += bytesPerPage;
+
+ /* Set up the "live" page. */
+ if ( internalSize - bytesPerPage > 0 )
+ Page_AllowAccess(address, internalSize - bytesPerPage);
+ }
+
+ fullSlot->userAddress = address;
+ fullSlot->userSize = userSize;
+
+ /*
+ * Make the pool's internal memory inaccessable, so that the program
+ * being debugged can't stomp on it.
+ */
+ if ( !internalUse )
+ Page_DenyAccess(allocationList, allocationListSize);
+
+ return address;
+}
+
+/*
+ * Find the slot structure for a user address.
+ */
+static Slot *
+slotForUserAddress(void * address)
+{
+ register Slot * slot = allocationList;
+ register size_t count = slotCount;
+
+ for ( ; count > 0; count-- ) {
+ if ( slot->userAddress == address )
+ return slot;
+ slot++;
+ }
+
+ return 0;
+}
+
+/*
+ * Find the slot structure for an internal address.
+ */
+static Slot *
+slotForInternalAddress(void * address)
+{
+ register Slot * slot = allocationList;
+ register size_t count = slotCount;
+
+ for ( ; count > 0; count-- ) {
+ if ( slot->internalAddress == address )
+ return slot;
+ slot++;
+ }
+ return 0;
+}
+
+/*
+ * Given the internal address of a buffer, find the buffer immediately
+ * before that buffer in the address space. This is used by free() to
+ * coalesce two free buffers into one.
+ */
+static Slot *
+slotForInternalAddressPreviousTo(void * address)
+{
+ register Slot * slot = allocationList;
+ register size_t count = slotCount;
+
+ for ( ; count > 0; count-- ) {
+ if ( ((char *)slot->internalAddress)
+ + slot->internalSize == address )
+ return slot;
+ slot++;
+ }
+ return 0;
+}
+
+extern C_LINKAGE void
+ef_free(void * address)
+{
+ Slot * slot;
+ Slot * previousSlot = 0;
+ Slot * nextSlot = 0;
+
+ //printf(" ::free %p \n",address);
+ lock();
+
+ if ( address == 0 ) {
+ unlock();
+ return;
+ }
+
+ if ( allocationList == 0 )
+ EF_Abort("free() called before first malloc().");
+
+ if ( !noAllocationListProtection )
+ Page_AllowAccess(allocationList, allocationListSize);
+
+ slot = slotForUserAddress(address);
+
+ if ( !slot )
+ EF_Abort("free(%a): address not from malloc().", address);
+
+ if ( slot->mode != ALLOCATED ) {
+ if ( internalUse && slot->mode == INTERNAL_USE )
+ /* Do nothing. */;
+ else {
+ EF_Abort(
+ "free(%a): freeing free memory."
+ ,address);
+ }
+ }
+
+ if ( EF_PROTECT_FREE )
+ slot->mode = PROTECTED;
+ else
+ slot->mode = FREE;
+
+ if ( EF_FREE_WIPES )
+ memset(slot->userAddress, 0xbd, slot->userSize);
+
+ previousSlot = slotForInternalAddressPreviousTo(slot->internalAddress);
+ nextSlot = slotForInternalAddress(
+ ((char *)slot->internalAddress) + slot->internalSize);
+
+ if ( previousSlot
+ && (previousSlot->mode == FREE || previousSlot->mode == PROTECTED) ) {
+ /* Coalesce previous slot with this one. */
+ previousSlot->internalSize += slot->internalSize;
+ if ( EF_PROTECT_FREE )
+ previousSlot->mode = PROTECTED;
+
+ slot->internalAddress = slot->userAddress = 0;
+ slot->internalSize = slot->userSize = 0;
+ slot->mode = NOT_IN_USE;
+ slot = previousSlot;
+ unUsedSlots++;
+ }
+ if ( nextSlot
+ && (nextSlot->mode == FREE || nextSlot->mode == PROTECTED) ) {
+ /* Coalesce next slot with this one. */
+ slot->internalSize += nextSlot->internalSize;
+ nextSlot->internalAddress = nextSlot->userAddress = 0;
+ nextSlot->internalSize = nextSlot->userSize = 0;
+ nextSlot->mode = NOT_IN_USE;
+ unUsedSlots++;
+ }
+
+ slot->userAddress = slot->internalAddress;
+ slot->userSize = slot->internalSize;
+
+ /*
+ * Free memory is _always_ set to deny access. When EF_PROTECT_FREE
+ * is true, free memory is never reallocated, so it remains access
+ * denied for the life of the process. When EF_PROTECT_FREE is false,
+ * the memory may be re-allocated, at which time access to it will be
+ * allowed again.
+ */
+ Page_DenyAccess(slot->internalAddress, slot->internalSize);
+
+ if ( !noAllocationListProtection )
+ Page_DenyAccess(allocationList, allocationListSize);
+
+ unlock();
+}
+
+extern C_LINKAGE void *
+ef_realloc(void * oldBuffer, size_t newSize)
+{
+ void * newBuffer = ef_malloc(newSize);
+
+ lock();
+
+ if ( oldBuffer ) {
+ size_t size;
+ Slot * slot;
+
+ Page_AllowAccess(allocationList, allocationListSize);
+ noAllocationListProtection = 1;
+
+ slot = slotForUserAddress(oldBuffer);
+
+ if ( slot == 0 )
+ EF_Abort(
+ "realloc(%a, %d): address not from malloc()."
+ ,oldBuffer
+ ,newSize);
+
+ if ( newSize < (size = slot->userSize) )
+ size = newSize;
+
+ if ( size > 0 )
+ memcpy(newBuffer, oldBuffer, size);
+
+ ef_free(oldBuffer);
+ noAllocationListProtection = 0;
+ Page_DenyAccess(allocationList, allocationListSize);
+
+ if ( size < newSize )
+ memset(&(((char *)newBuffer)[size]), 0, newSize - size);
+
+ /* Internal memory was re-protected in free() */
+ }
+ unlock();
+
+ return newBuffer;
+}
+
+extern C_LINKAGE void *
+ef_malloc(size_t size)
+{
+
+ if ( malloc_init == 0 ){
+ ef_init();
+ }
+
+
+ void *allocation;
+
+ lock();
+ allocation=ef_memalign(EF_ALIGNMENT, size);
+
+ /* put 0xaa into the memset to find uninit issues */
+ memset(allocation,0xaa,size);
+ #if 0
+ int i;
+ uint8_t *p=(uint8_t *)allocation;
+ for (i=0; i<size; i++) {
+ p[i]=(rand()&0xff);
+ }
+ #endif
+
+ unlock();
+ //printf(":: alloc %p %d \n",allocation,(int)size);
+ return allocation;
+}
+
+extern C_LINKAGE void *
+ef_calloc(size_t nelem, size_t elsize)
+{
+ size_t size = nelem * elsize;
+ void * allocation;
+
+ lock();
+
+ allocation = ef_malloc(size);
+ memset(allocation, 0, size);
+ unlock();
+
+ return allocation;
+}
+
+/*
+ * This will catch more bugs if you remove the page alignment, but it
+ * will break some software.
+ */
+extern C_LINKAGE void *
+ef_valloc (size_t size)
+{
+ void * allocation;
+
+ lock();
+ allocation= ef_memalign(bytesPerPage, size);
+ unlock();
+
+ return allocation;
+}
+
+
+#define REPLACE_MALLOC
+
+#ifdef REPLACE_MALLOC
+
+extern C_LINKAGE void
+free(void * address)
+{
+ ef_free(address);
+}
+
+extern C_LINKAGE void *
+realloc(void * oldBuffer, size_t newSize)
+{
+ return (ef_realloc(oldBuffer, newSize));
+}
+
+extern C_LINKAGE void *
+malloc(size_t size)
+{
+ return (ef_malloc(size));
+}
+
+extern C_LINKAGE void *
+calloc(size_t nelem, size_t elsize)
+{
+ return (ef_calloc(nelem, elsize));
+}
+
+/*
+ * This will catch more bugs if you remove the page alignment, but it
+ * will break some software.
+ */
+extern C_LINKAGE void *
+valloc (size_t size)
+{
+ return (ef_valloc(size));
+
+}
+#endif
+
+
+
+extern C_LINKAGE void ef_init(void ){
+
+ if ( malloc_init == 0 ){
+ malloc_init=1;
+ pthread_mutexattr_t Attr;
+
+ pthread_mutexattr_init(&Attr);
+ pthread_mutexattr_settype(&Attr, PTHREAD_MUTEX_RECURSIVE);
+
+ if ( pthread_mutex_init(&mutex, &Attr) != 0 ){
+ exit(-1);
+ }
+ initialize();
+ }
+
+}
+
diff --git a/src/common/ef/efence.h b/src/common/ef/efence.h
new file mode 100644
index 00000000..60eb30ff
--- /dev/null
+++ b/src/common/ef/efence.h
@@ -0,0 +1,42 @@
+#include <sys/types.h>
+#include <sys/param.h>
+
+/*
+ * ef_number is the largest unsigned integer we'll need. On systems that
+ * support 64-bit pointers, this may be "unsigned long long".
+ */
+#if defined(USE_LONG_LONG)
+typedef unsigned long long ef_number;
+#else
+typedef unsigned long ef_number;
+#endif
+
+/*
+ * NBBY is the number of bits per byte. Some systems define it in
+ * <sys/param.h> .
+ */
+#ifndef NBBY
+#define NBBY 8
+#endif
+
+/*
+ * This is used to declare functions with "C" linkage if we are compiling
+ * with C++ .
+ */
+#ifdef __cplusplus
+#define C_LINKAGE "C"
+#else
+#define C_LINKAGE
+#endif
+
+void Page_AllowAccess(void * address, size_t size);
+void * Page_Create(size_t size);
+void Page_Delete(void * address, size_t size);
+void Page_DenyAccess(void * address, size_t size);
+size_t Page_Size(void);
+
+void EF_Abort(const char * message, ...);
+void EF_Exit(const char * message, ...);
+void EF_Print(const char * message, ...);
+void EF_Lock();
+void EF_UnLock();
diff --git a/src/common/ef/eftest.c b/src/common/ef/eftest.c
new file mode 100644
index 00000000..372ac596
--- /dev/null
+++ b/src/common/ef/eftest.c
@@ -0,0 +1,219 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <setjmp.h>
+#include <signal.h>
+#include "efence.h"
+
+/*
+ * Electric Fence confidence tests.
+ * Make sure all of the various functions of Electric Fence work correctly.
+ */
+
+#ifndef PAGE_PROTECTION_VIOLATED_SIGNAL
+#define PAGE_PROTECTION_VIOLATED_SIGNAL SIGSEGV
+#endif
+
+struct diagnostic {
+ int (*test)(void);
+ int expectedStatus;
+ const char * explanation;
+};
+
+extern int EF_PROTECT_BELOW;
+extern int EF_ALIGNMENT;
+
+static sigjmp_buf env;
+
+/*
+ * There is still too little standardization of the arguments and return
+ * type of signal handler functions.
+ */
+static
+void
+segmentationFaultHandler(
+int signalNumber
+#if ( defined(_AIX) )
+, ...
+#endif
+)
+ {
+ signal(PAGE_PROTECTION_VIOLATED_SIGNAL, SIG_DFL);
+ siglongjmp(env, 1);
+}
+
+static int
+gotSegmentationFault(int (*test)(void))
+{
+ if ( sigsetjmp(env,1) == 0 ) {
+ int status;
+
+ signal(PAGE_PROTECTION_VIOLATED_SIGNAL
+ ,segmentationFaultHandler);
+ status = (*test)();
+ signal(PAGE_PROTECTION_VIOLATED_SIGNAL, SIG_DFL);
+ return status;
+ }
+ else
+ return 1;
+}
+
+static char * allocation;
+/* c is global so that assignments to it won't be optimized out. */
+char c;
+
+static int
+testSizes(void)
+{
+ /*
+ * If ef_number can't hold all of the bits of a void *, have the user
+ * add -DUSE_ LONG_LONG to the compiler flags so that ef_number will be
+ * declared as "unsigned long long" instead of "unsigned long".
+ */
+ return ( sizeof(ef_number) < sizeof(void *) );
+}
+
+static int
+allocateMemory(void)
+{
+ allocation = (char *)malloc(1);
+
+ if ( allocation != 0 )
+ return 0;
+ else
+ return 1;
+}
+
+static int
+freeMemory(void)
+{
+ free(allocation);
+ return 0;
+}
+
+static int
+protectBelow(void)
+{
+ EF_PROTECT_BELOW = 1;
+ return 0;
+}
+
+static int
+read0(void)
+{
+ c = *allocation;
+
+ return 0;
+}
+
+static int
+write0(void)
+{
+ *allocation = 1;
+
+ return 0;
+}
+
+static int
+read1(void)
+{
+ c = allocation[1];
+
+ return 0;
+}
+
+static int
+readMinus1(void)
+{
+ c = allocation[-1];
+ return 0;
+}
+
+static struct diagnostic diagnostics[] = {
+ {
+ testSizes, 0,
+ "Please add -DLONG_LONG to the compiler flags and recompile."
+ },
+ {
+ allocateMemory, 0,
+ "Allocation 1: This test allocates a single byte of memory."
+ },
+ {
+ read0, 0,
+ "Read valid memory 1: This test reads the allocated memory."
+ },
+ {
+ write0, 0,
+ "Write valid memory 1: This test writes the allocated memory."
+ },
+ {
+ read1, 1,
+ "Read overrun: This test reads beyond the end of the buffer."
+ },
+ {
+ freeMemory, 0,
+ "Free memory: This test frees the allocated memory."
+ },
+ {
+ protectBelow, 0,
+ "Protect below: This sets Electric Fence to protect\n"
+ "the lower boundary of a malloc buffer, rather than the\n"
+ "upper boundary."
+ },
+ {
+ allocateMemory, 0,
+ "Allocation 2: This allocates memory with the lower boundary"
+ " protected."
+ },
+ {
+ read0, 0,
+ "Read valid memory 2: This test reads the allocated memory."
+ },
+ {
+ write0, 0,
+ "Write valid memory 2: This test writes the allocated memory."
+ },
+ {
+ readMinus1, 1,
+ "Read underrun: This test reads before the beginning of the"
+ " buffer."
+ },
+ {
+ 0, 0, 0
+ }
+};
+
+static const char failedTest[]
+ = "Electric Fence confidence test failed.\n";
+
+static const char newline = '\n';
+
+int
+main(int argc, char * * argv)
+{
+ static const struct diagnostic * diag = diagnostics;
+
+
+ EF_PROTECT_BELOW = 0;
+ EF_ALIGNMENT = 0;
+
+ while ( diag->explanation != 0 ) {
+ int status = gotSegmentationFault(diag->test);
+
+ if ( status != diag->expectedStatus ) {
+ /*
+ * Don't use stdio to print here, because stdio
+ * uses malloc() and we've just proven that malloc()
+ * is broken. Also, use _exit() instead of exit(),
+ * because _exit() doesn't flush stdio.
+ */
+ write(2, failedTest, sizeof(failedTest) - 1);
+ write(2, diag->explanation, strlen(diag->explanation));
+ write(2, &newline, 1);
+ _exit(-1);
+ }
+ diag++;
+ }
+ return 0;
+}
diff --git a/src/common/ef/page.cpp b/src/common/ef/page.cpp
new file mode 100644
index 00000000..8a5a8f1c
--- /dev/null
+++ b/src/common/ef/page.cpp
@@ -0,0 +1,193 @@
+#include "efence.h"
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+
+/*
+ * Lots of systems are missing the definition of PROT_NONE.
+ */
+#ifndef PROT_NONE
+#define PROT_NONE 0
+#endif
+
+/*
+ * 386 BSD has MAP_ANON instead of MAP_ANONYMOUS.
+ */
+#if ( !defined(MAP_ANONYMOUS) && defined(MAP_ANON) )
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+/*
+ * For some reason, I can't find mprotect() in any of the headers on
+ * IRIX or SunOS 4.1.2
+ */
+/* extern C_LINKAGE int mprotect(void * addr, size_t len, int prot); */
+
+
+
+//#ifdef _64BIT_PLATFORM
+ static caddr_t startAddr = (caddr_t) 0xc00000000000;
+//#else
+ //static caddr_t startAddr = (caddr_t) 0;
+//#endif
+
+
+#if ( !defined(sgi) && !defined(_AIX) )
+extern int sys_nerr;
+/*extern char * sys_errlist[];*/
+#endif
+
+static const char *
+stringErrorReport(void)
+{
+#if ( defined(sgi) )
+ return strerror(oserror());
+#elif ( defined(_AIX) )
+ return strerror(errno);
+#else
+ //if ( errno > 0 && errno < sys_nerr )
+ return "Unknown error.\n";
+ //return sys_errlist[errno];
+ //else
+ //return "Unknown error.\n";
+#endif
+}
+
+/*
+ * Create memory.
+ */
+#if defined(MAP_ANONYMOUS)
+void *
+Page_Create(size_t size)
+{
+ caddr_t allocation;
+
+ /*
+ * In this version, "startAddr" is a _hint_, not a demand.
+ * When the memory I map here is contiguous with other
+ * mappings, the allocator can coalesce the memory from two
+ * or more mappings into one large contiguous chunk, and thus
+ * might be able to find a fit that would not otherwise have
+ * been possible. I could _force_ it to be contiguous by using
+ * the MMAP_FIXED flag, but I don't want to stomp on memory mappings
+ * generated by other software, etc.
+ */
+ allocation = (caddr_t) mmap(
+ startAddr
+ ,size
+ ,PROT_READ|PROT_WRITE
+ ,MAP_PRIVATE|MAP_ANONYMOUS
+ ,-1
+ ,0);
+
+#ifndef __hpux
+ /*
+ * Set the "address hint" for the next mmap() so that it will abut
+ * the mapping we just created.
+ *
+ * HP/UX 9.01 has a kernel bug that makes mmap() fail sometimes
+ * when given a non-zero address hint, so we'll leave the hint set
+ * to zero on that system. HP recently told me this is now fixed.
+ * Someone please tell me when it is probable to assume that most
+ * of those systems that were running 9.01 have been upgraded.
+ */
+ startAddr = allocation + size;
+#endif
+
+ if ( allocation == (caddr_t)-1 )
+ EF_Exit("mmap() failed: %s", stringErrorReport());
+
+ return (void *)allocation;
+}
+#else
+void *
+Page_Create(size_t size)
+{
+ static int devZeroFd = -1;
+ caddr_t allocation;
+
+ if ( devZeroFd == -1 ) {
+ devZeroFd = open("/dev/zero", O_RDWR);
+ if ( devZeroFd < 0 )
+ EF_Exit(
+ "open() on /dev/zero failed: %s"
+ ,stringErrorReport());
+ }
+
+ /*
+ * In this version, "startAddr" is a _hint_, not a demand.
+ * When the memory I map here is contiguous with other
+ * mappings, the allocator can coalesce the memory from two
+ * or more mappings into one large contiguous chunk, and thus
+ * might be able to find a fit that would not otherwise have
+ * been possible. I could _force_ it to be contiguous by using
+ * the MMAP_FIXED flag, but I don't want to stomp on memory mappings
+ * generated by other software, etc.
+ */
+ allocation = (caddr_t) mmap(
+ startAddr
+ ,size
+ ,PROT_READ|PROT_WRITE
+ ,MAP_PRIVATE
+ ,devZeroFd
+ ,0);
+
+ startAddr = allocation + size;
+
+ if ( allocation == (caddr_t)-1 )
+ EF_Exit("mmap() failed: %s", stringErrorReport());
+
+ return (void *)allocation;
+}
+#endif
+
+static void
+mprotectFailed(void)
+{
+ EF_Exit("mprotect() failed: %s", stringErrorReport());
+}
+
+void
+Page_AllowAccess(void * address, size_t size)
+{
+ if ( mprotect((caddr_t)address, size, PROT_READ|PROT_WRITE) < 0 )
+ mprotectFailed();
+}
+
+void
+Page_DenyAccess(void * address, size_t size)
+{
+ if ( mprotect((caddr_t)address, size, PROT_NONE) < 0 )
+ mprotectFailed();
+}
+
+void
+Page_Delete(void * address, size_t size)
+{
+ Page_DenyAccess(address, size);
+}
+
+#if defined(_SC_PAGESIZE)
+size_t
+Page_Size(void)
+{
+ return (size_t)sysconf(_SC_PAGESIZE);
+}
+#elif defined(_SC_PAGE_SIZE)
+size_t
+Page_Size(void)
+{
+ return (size_t)sysconf(_SC_PAGE_SIZE);
+}
+#else
+/* extern int getpagesize(); */
+size_t
+Page_Size(void)
+{
+ return getpagesize();
+}
+#endif
diff --git a/src/common/ef/print.cpp b/src/common/ef/print.cpp
new file mode 100644
index 00000000..c28189e5
--- /dev/null
+++ b/src/common/ef/print.cpp
@@ -0,0 +1,170 @@
+#include "efence.h"
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <string.h>
+#include <signal.h>
+
+/*
+ * These routines do their printing without using stdio. Stdio can't
+ * be used because it calls malloc(). Internal routines of a malloc()
+ * debugger should not re-enter malloc(), so stdio is out.
+ */
+
+/*
+ * NUMBER_BUFFER_SIZE is the longest character string that could be needed
+ * to represent an unsigned integer, assuming we might print in base 2.
+ */
+#define NUMBER_BUFFER_SIZE (sizeof(ef_number) * NBBY)
+
+static void
+printNumber(ef_number number, ef_number base)
+{
+ char buffer[NUMBER_BUFFER_SIZE];
+ char * s = &buffer[NUMBER_BUFFER_SIZE];
+ int size;
+
+ do {
+ ef_number digit;
+
+ if ( --s == buffer )
+ EF_Abort("Internal error printing number.");
+
+ digit = number % base;
+
+ if ( digit < 10 )
+ *s = '0' + digit;
+ else
+ *s = 'a' + digit - 10;
+
+ } while ( (number /= base) > 0 );
+
+ size = &buffer[NUMBER_BUFFER_SIZE] - s;
+
+ if ( size > 0 )
+ write(2, s, size);
+}
+
+static void
+vprint(const char * pattern, va_list args)
+{
+ static const char bad_pattern[] =
+ "\nBad pattern specifier %%%c in EF_Print().\n";
+ const char * s = pattern;
+ char c;
+
+ while ( (c = *s++) != '\0' ) {
+ if ( c == '%' ) {
+ c = *s++;
+ switch ( c ) {
+ case '%':
+ (void) write(2, &c, 1);
+ break;
+ case 'a':
+ /*
+ * Print an address passed as a void pointer.
+ * The type of ef_number must be set so that
+ * it is large enough to contain all of the
+ * bits of a void pointer.
+ */
+ printNumber(
+ (ef_number)va_arg(args, void *)
+ ,0x10);
+ break;
+ case 's':
+ {
+ const char * string;
+ size_t length;
+
+ string = va_arg(args, char *);
+ length = strlen(string);
+
+ (void) write(2, string, length);
+ }
+ break;
+ case 'd':
+ {
+ int n = va_arg(args, int);
+
+ if ( n < 0 ) {
+ char c = '-';
+ write(2, &c, 1);
+ n = -n;
+ }
+ printNumber(n, 10);
+ }
+ break;
+ case 'x':
+ printNumber(va_arg(args, u_int), 0x10);
+ break;
+ case 'c':
+ { /*Cast used, since char gets promoted to int in ... */
+ char c = (char) va_arg(args, int);
+
+ (void) write(2, &c, 1);
+ }
+ break;
+ default:
+ {
+ EF_Print(bad_pattern, c);
+ }
+
+ }
+ }
+ else
+ (void) write(2, &c, 1);
+ }
+}
+
+void
+EF_Abort(const char * pattern, ...)
+{
+ va_list args;
+
+ va_start(args, pattern);
+
+ EF_Print("\nElectricFence Aborting: ");
+ vprint(pattern, args);
+ EF_Print("\n");
+
+ va_end(args);
+
+ /*
+ * I use kill(getpid(), SIGILL) instead of abort() because some
+ * mis-guided implementations of abort() flush stdio, which can
+ * cause malloc() or free() to be called.
+ */
+ kill(getpid(), SIGILL);
+ /* Just in case something handles SIGILL and returns, exit here. */
+ _exit(-1);
+}
+
+void
+EF_Exit(const char * pattern, ...)
+{
+ va_list args;
+
+ va_start(args, pattern);
+
+ EF_Print("\nElectricFence Exiting: ");
+ vprint(pattern, args);
+ EF_Print("\n");
+
+ va_end(args);
+
+ /*
+ * I use _exit() because the regular exit() flushes stdio,
+ * which may cause malloc() or free() to be called.
+ */
+ _exit(-1);
+}
+
+void
+EF_Print(const char * pattern, ...)
+{
+ va_list args;
+
+ va_start(args, pattern);
+ vprint(pattern, args);
+ va_end(args);
+}
diff --git a/src/common/ef/tstheap.c b/src/common/ef/tstheap.c
new file mode 100644
index 00000000..c712fed5
--- /dev/null
+++ b/src/common/ef/tstheap.c
@@ -0,0 +1,61 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include <math.h>
+#include <limits.h>
+#include "efence.h"
+
+/*
+ * This is a simple program to exercise the allocator. It allocates and frees
+ * memory in a pseudo-random fashion. It should run silently, using up time
+ * and resources on your system until you stop it or until it has gone
+ * through TEST_DURATION (or the argument) iterations of the loop.
+ */
+
+extern C_LINKAGE double drand48(void); /* For pre-ANSI C systems */
+
+#define POOL_SIZE 1024
+#define LARGEST_BUFFER 30000
+#define TEST_DURATION 1000000
+
+void * pool[POOL_SIZE];
+
+#ifdef FAKE_DRAND48
+/*
+ * Add -DFAKE_DRAND48 to your compile flags if your system doesn't
+ * provide drand48().
+ */
+
+#ifndef ULONG_MAX
+#define ULONG_MAX ~(1L)
+#endif
+
+double
+drand48(void)
+{
+ return (random() / (double)ULONG_MAX);
+}
+#endif
+
+int
+main(int argc, char * * argv)
+{
+ int count = 0;
+ int duration = TEST_DURATION;
+
+ if ( argc >= 2 )
+ duration = atoi(argv[1]);
+
+ for ( ; count < duration; count++ ) {
+ void * * element = &pool[(int)(drand48() * POOL_SIZE)];
+ size_t size = (size_t)(drand48() * (LARGEST_BUFFER + 1));
+
+ if ( *element ) {
+ free( *element );
+ *element = 0;
+ }
+ else if ( size > 0 ) {
+ *element = malloc(size);
+ }
+ }
+ return 0;
+}