summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/vppinfra/test_valloc.c267
-rw-r--r--src/vppinfra/valloc.c362
-rw-r--r--src/vppinfra/valloc.h71
3 files changed, 700 insertions, 0 deletions
diff --git a/src/vppinfra/test_valloc.c b/src/vppinfra/test_valloc.c
new file mode 100644
index 00000000000..15bf9aa3fca
--- /dev/null
+++ b/src/vppinfra/test_valloc.c
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vppinfra/valloc.h>
+
+u32
+vl (void *p)
+{
+ return vec_len (p);
+}
+
+/*
+ * GDB callable function: pe - call pool_elts - number of elements in a pool
+ */
+uword
+pe (void *v)
+{
+ return (pool_elts (v));
+}
+
+typedef struct
+{
+ u32 seed;
+ uword baseva;
+ uword size;
+ uword *basevas;
+ u8 *item_in_table;
+ u32 nitems;
+ u32 niter;
+ u32 item_size;
+ int check_every_add_del;
+ clib_valloc_main_t valloc_main;
+ int verbose;
+} test_main_t;
+
+test_main_t test_main;
+
+clib_error_t *
+test_valloc (test_main_t * tm)
+{
+ clib_valloc_chunk_t _ip, *ip = &_ip;
+ uword baseva;
+ uword *p;
+ int i, j, index;
+ u32 currently_in_table;
+ u32 found;
+
+ ip->baseva = 0x20000000;
+ ip->size = 1024;
+
+ clib_valloc_init (&tm->valloc_main, ip, 1 /* lock */ );
+
+ ip->baseva = 0x20000000 + 1024;
+ ip->size = 1024 * 1024 * 1024 - 1024;
+ clib_valloc_add_chunk (&tm->valloc_main, ip);
+
+ fformat (stdout, "Allocate %d items...\n", tm->nitems);
+ for (i = 0; i < tm->nitems; i++)
+ {
+ baseva = clib_valloc_alloc (&tm->valloc_main, 1024,
+ 1 /* fail:os_out_of_memory */ );
+ vec_add1 (tm->basevas, baseva);
+ vec_add1 (tm->item_in_table, 1);
+ }
+
+ fformat (stdout, "Perform %d random add/delete operations...\n", tm->niter);
+
+ for (i = 0; i < tm->niter; i++)
+ {
+ index = random_u32 (&tm->seed) % tm->nitems;
+ /* Swap state of random entry */
+ if (tm->item_in_table[index])
+ {
+ if (0)
+ fformat (stdout, "free [%d] %llx\n", index, tm->basevas[index]);
+ clib_valloc_free (&tm->valloc_main, tm->basevas[index]);
+ tm->item_in_table[index] = 0;
+ tm->basevas[index] = ~0;
+ }
+ else
+ {
+ baseva = clib_valloc_alloc (&tm->valloc_main, 1024,
+ 1 /* fail:os_out_of_memory */ );
+ tm->basevas[index] = baseva;
+ tm->item_in_table[index] = 1;
+ if (0)
+ fformat (stdout, "alloc [%d] %llx\n", index, tm->basevas[index]);
+ }
+
+ /* Check our work... */
+ if (tm->check_every_add_del)
+ {
+ for (j = 0; j < tm->nitems; j++)
+ {
+ if (tm->item_in_table[j])
+ {
+ p = hash_get ((&tm->valloc_main)->chunk_index_by_baseva,
+ tm->basevas[j]);
+ if (p)
+ {
+ ip =
+ pool_elt_at_index ((&tm->valloc_main)->chunks, p[0]);
+ ASSERT (ip->baseva == tm->basevas[j]);
+ ASSERT (ip->flags & CLIB_VALLOC_BUSY);
+ }
+ }
+ else
+ {
+ p = hash_get ((&tm->valloc_main)->chunk_index_by_baseva,
+ tm->basevas[j]);
+ /* Have to check, it's OK for the block to have been fused */
+ if (p)
+ {
+ ip =
+ pool_elt_at_index ((&tm->valloc_main)->chunks, p[0]);
+ if ((ip->flags & CLIB_VALLOC_BUSY))
+ {
+ fformat (stdout, "BUG: baseva %llx chunk %d busy\n",
+ tm->basevas[j], p[0]);
+ fformat (stdout, "%U\n", format_valloc,
+ &tm->valloc_main, 1 /* verbose */ );
+ ASSERT ((ip->flags & CLIB_VALLOC_BUSY) == 0);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ currently_in_table = 0;
+
+ for (i = 0; i < tm->nitems; i++)
+ {
+ currently_in_table += tm->item_in_table[i];
+ }
+
+ fformat (stdout, "Check that %d items in table can be found...\n",
+ currently_in_table);
+
+ found = 0;
+
+ for (i = 0; i < tm->nitems; i++)
+ {
+ if (tm->item_in_table[i])
+ {
+ p = hash_get ((&tm->valloc_main)->chunk_index_by_baseva,
+ tm->basevas[i]);
+ if (p)
+ {
+ ip = pool_elt_at_index ((&tm->valloc_main)->chunks, p[0]);
+ ASSERT (ip->baseva == tm->basevas[i]);
+ ASSERT (ip->flags & CLIB_VALLOC_BUSY);
+ }
+ found++;
+ }
+ else
+ {
+ p = hash_get ((&tm->valloc_main)->chunk_index_by_baseva,
+ tm->basevas[i]);
+ /* Have to check, it's OK for the block to have been fused */
+ if (p)
+ {
+ ip = pool_elt_at_index ((&tm->valloc_main)->chunks, p[0]);
+ if ((ip->flags & CLIB_VALLOC_BUSY))
+ {
+ fformat (stdout, "BUG: baseva %llx chunk %d busy\n",
+ tm->basevas[i], p[0]);
+ fformat (stdout, "%U\n", format_valloc,
+ &tm->valloc_main, 1 /* verbose */ );
+ ASSERT ((ip->flags & CLIB_VALLOC_BUSY) == 0);
+ }
+ }
+ }
+ }
+
+ fformat (stdout, "Found %d items in table...\n", found);
+
+ for (i = 0; i < tm->nitems; i++)
+ {
+ if (tm->item_in_table[i])
+ clib_valloc_free (&tm->valloc_main, tm->basevas[i]);
+ }
+
+ fformat (stdout, "%U", format_valloc, &tm->valloc_main, 1 /* verbose */ );
+
+ return 0;
+}
+
+clib_error_t *
+test_valloc_main (unformat_input_t * i)
+{
+ test_main_t *tm = &test_main;
+ clib_error_t *error;
+
+ tm->seed = 0xdeaddabe;
+ tm->nitems = 5;
+ tm->niter = 100;
+ tm->item_size = 1024;
+
+ while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (i, "seed %u", &tm->seed))
+ ;
+ else if (unformat (i, "nitems %u", &tm->nitems))
+ ;
+ else if (unformat (i, "niter %u", &tm->niter))
+ ;
+ else if (unformat (i, "item-size %u", &tm->item_size))
+ ;
+ else if (unformat (i, "check-every-add_del"))
+ tm->check_every_add_del = 1;
+ else if (unformat (i, "verbose %d", &tm->verbose))
+ ;
+ else if (unformat (i, "verbose"))
+ tm->verbose = 1;
+ else
+ return clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, i);
+ }
+
+ error = test_valloc (tm);
+
+ return error;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int rv = 0;
+ clib_error_t *error;
+
+ clib_mem_init (0, 3ULL << 30);
+
+ unformat_init_command_line (&i, argv);
+ error = test_valloc_main (&i);
+ if (error)
+ {
+ clib_error_report (error);
+ rv = 1;
+ }
+ unformat_free (&i);
+
+ return rv;
+}
+#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/valloc.c b/src/vppinfra/valloc.c
new file mode 100644
index 00000000000..cd1d89bb9a1
--- /dev/null
+++ b/src/vppinfra/valloc.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vppinfra/valloc.h>
+
+/** @file
+ @brief Simple first-fit virtual space allocator
+*/
+
+/** Add a chunk of memory to a virtual allocation arena
+ @param vam - clib_valloc_main_t * pointer to the allocation arena
+ @param template - clib_valloc_chunk_t * pointer to a template chunk which
+ describes the virtual address range to add
+
+ @note only the baseva and size member of the template chunk are significant
+ It's perfectly OK for the new chunk to be discontinuous with previous
+ chunks, the chunk fusion algorithm won't merge them.
+ */
+
+void
+clib_valloc_add_chunk (clib_valloc_main_t * vam,
+ clib_valloc_chunk_t * template)
+{
+ clib_valloc_chunk_t *ch, *new_ch;
+ u32 index;
+
+ ASSERT (vam->flags & CLIB_VALLOC_INITIALIZED);
+
+ clib_spinlock_lock_if_init (&vam->lock);
+
+ /* Add at the beginning, or at the end... */
+ index = vam->first_index;
+
+ /*
+ * Make sure we're not trying to add an overlapping chunk..
+ * It's worth checking, because someone will eventually do that.
+ */
+ if (CLIB_DEBUG > 0 && index != ~0)
+ {
+ while (index != ~0)
+ {
+ ch = pool_elt_at_index (vam->chunks, index);
+ ASSERT (template->baseva < ch->baseva || template->baseva >=
+ (ch->baseva + ch->size));
+ ASSERT (template->baseva + template->size < ch->baseva ||
+ template->baseva + template->size >=
+ (ch->baseva + ch->size));
+ index = ch->next;
+ }
+ index = vam->first_index;
+ }
+
+ if (index != ~0)
+ ch = pool_elt_at_index (vam->chunks, index);
+
+ if (index == ~0 || template->baseva < ch->baseva)
+ {
+ pool_get (vam->chunks, new_ch);
+ memset (new_ch, 0, sizeof (*new_ch));
+
+ if (index != ~0)
+ {
+ ch = pool_elt_at_index (vam->chunks, index);
+
+ new_ch->next = index;
+ new_ch->prev = ~0;
+ ch->prev = new_ch - vam->chunks;
+ }
+ else
+ {
+ new_ch->next = new_ch->prev = ~0;
+ }
+
+ new_ch->baseva = template->baseva;
+ new_ch->size = template->size;
+
+ vam->first_index = new_ch - vam->chunks;
+
+ hash_set (vam->chunk_index_by_baseva, new_ch->baseva, vam->first_index);
+ }
+ else
+ {
+ /* Walk to the end of the chunk chain */
+ while (index != ~0)
+ {
+ ch = pool_elt_at_index (vam->chunks, index);
+ index = ch->next;
+ }
+ /* we want the last chunk index */
+ index = ch - vam->chunks;
+
+ pool_get (vam->chunks, new_ch);
+ memset (new_ch, 0, sizeof (*new_ch));
+
+ ch = pool_elt_at_index (vam->chunks, index);
+
+ new_ch->next = ~0;
+ new_ch->prev = index;
+ ch->next = new_ch - vam->chunks;
+
+ new_ch->baseva = template->baseva;
+ new_ch->size = template->size;
+
+ hash_set (vam->chunk_index_by_baseva, new_ch->baseva,
+ new_ch - vam->chunks);
+ }
+
+ clib_spinlock_unlock_if_init (&vam->lock);
+}
+
+/** Initialize a virtual memory allocation arena
+ @param vam - clib_valloc_main_t * pointer to the arena to initialize
+ @param template - clib_valloc_chunk_t * pointer to a template chunk which
+ describes the initial virtual address range
+*/
+void
+clib_valloc_init (clib_valloc_main_t * vam, clib_valloc_chunk_t * template,
+ int need_lock)
+{
+ ASSERT (template && template->baseva && template->size);
+ memset (vam, 0, sizeof (*vam));
+ if (need_lock)
+ clib_spinlock_init (&vam->lock);
+
+ vam->chunk_index_by_baseva = hash_create (0, sizeof (uword));
+ vam->first_index = ~0;
+ vam->flags |= CLIB_VALLOC_INITIALIZED;
+
+ clib_valloc_add_chunk (vam, template);
+}
+
+/** Allocate virtual space
+ @param vam - clib_valloc_main_t * pointer to the allocation arena
+ @param size - u64 number of bytes to allocate
+ @os_out_of_memory_on_failure - 1=> panic on allocation failure
+ @return uword allocated space, 0=> failure
+*/
+uword
+clib_valloc_alloc (clib_valloc_main_t * vam, uword size,
+ int os_out_of_memory_on_failure)
+{
+ clib_valloc_chunk_t *ch, *new_ch, *next_ch;
+ u32 index;
+
+ clib_spinlock_lock_if_init (&vam->lock);
+
+ index = vam->first_index;
+
+ while (index != ~0)
+ {
+ ch = pool_elt_at_index (vam->chunks, index);
+ /* If the chunk is free... */
+ if ((ch->flags & CLIB_VALLOC_BUSY) == 0)
+ {
+ /* Too small? */
+ if (ch->size < size)
+ goto next_chunk;
+ /* Exact match? */
+ if (ch->size == size)
+ {
+ ch->flags |= CLIB_VALLOC_BUSY;
+ clib_spinlock_unlock_if_init (&vam->lock);
+ return ch->baseva;
+ }
+ /*
+ * The current free chunk is larger than necessary, split the block.
+ */
+ pool_get (vam->chunks, new_ch);
+ /* ch might have just moved */
+ ch = pool_elt_at_index (vam->chunks, index);
+ memset (new_ch, 0, sizeof (*new_ch));
+ new_ch->next = new_ch->prev = ~0;
+ new_ch->baseva = ch->baseva + size;
+ new_ch->size = ch->size - size;
+ ch->size = size;
+
+ /* Insert into doubly-linked list */
+ new_ch->next = ch->next;
+ new_ch->prev = ch - vam->chunks;
+
+ if (ch->next != ~0)
+ {
+ next_ch = pool_elt_at_index (vam->chunks, ch->next);
+ next_ch->prev = new_ch - vam->chunks;
+ }
+ ch->next = new_ch - vam->chunks;
+
+ hash_set (vam->chunk_index_by_baseva, new_ch->baseva,
+ new_ch - vam->chunks);
+
+ ch->flags |= CLIB_VALLOC_BUSY;
+ clib_spinlock_unlock_if_init (&vam->lock);
+ return ch->baseva;
+ }
+
+ next_chunk:
+ index = ch->next;
+ }
+ clib_spinlock_unlock_if_init (&vam->lock);
+
+ if (os_out_of_memory_on_failure)
+ os_out_of_memory ();
+
+ return 0;
+}
+
+
+/** Free virtual space
+ @param vam - clib_valloc_main_t * pointer to the allocation arena
+ @param baseva - uword base virtual address of the returned space
+ @return uword - size of the freed virtual chunk
+ @note the size is returned since we know it / in case the caller
+ doesn't memorize chunk sizes
+*/
+uword
+clib_valloc_free (clib_valloc_main_t * vam, uword baseva)
+{
+ clib_valloc_chunk_t *ch, *prev_ch, *next_ch, *n2_ch;
+ u32 index;
+ uword return_size = 0;
+ uword *p;
+
+ clib_spinlock_lock_if_init (&vam->lock);
+
+ p = hash_get (vam->chunk_index_by_baseva, baseva);
+
+ /* Check even in production images */
+ if (p == 0)
+ os_panic ();
+
+ index = p[0];
+
+ ch = pool_elt_at_index (vam->chunks, index);
+
+ return_size = ch->size;
+
+ ASSERT (ch->baseva == baseva);
+ ASSERT ((ch->flags & CLIB_VALLOC_BUSY) != 0);
+
+ ch->flags &= ~CLIB_VALLOC_BUSY;
+
+ /* combine with previous entry? */
+ if (ch->prev != ~0)
+ {
+ prev_ch = pool_elt_at_index (vam->chunks, ch->prev);
+ /*
+ * Previous item must be free as well, and
+ * tangent to this block.
+ */
+ if ((prev_ch->flags & CLIB_VALLOC_BUSY) == 0
+ && ((prev_ch->baseva + prev_ch->size) == ch->baseva))
+ {
+ hash_unset (vam->chunk_index_by_baseva, baseva);
+ prev_ch->size += ch->size;
+ prev_ch->next = ch->next;
+ if (ch->next != ~0)
+ {
+ next_ch = pool_elt_at_index (vam->chunks, ch->next);
+ next_ch->prev = ch->prev;
+ }
+ ASSERT (ch - vam->chunks != vam->first_index);
+ memset (ch, 0xfe, sizeof (*ch));
+ pool_put (vam->chunks, ch);
+ /* See about combining with next elt */
+ ch = prev_ch;
+ }
+ }
+
+ /* Combine with next entry? */
+ if (ch->next != ~0)
+ {
+ next_ch = pool_elt_at_index (vam->chunks, ch->next);
+
+ if ((next_ch->flags & CLIB_VALLOC_BUSY) == 0
+ && ((ch->baseva + ch->size) == next_ch->baseva))
+ {
+ hash_unset (vam->chunk_index_by_baseva, next_ch->baseva);
+ ch->size += next_ch->size;
+ ch->next = next_ch->next;
+ if (ch->next != ~0)
+ {
+ n2_ch = pool_elt_at_index (vam->chunks, next_ch->next);
+ n2_ch->prev = ch - vam->chunks;
+ }
+ ASSERT (next_ch - vam->chunks != vam->first_index);
+ memset (next_ch, 0xfe, sizeof (*ch));
+ pool_put (vam->chunks, next_ch);
+ }
+ }
+
+ clib_spinlock_unlock_if_init (&vam->lock);
+ return return_size;
+}
+
+/** format a virtual allocation arena (varargs)
+ @param vam - clib_valloc_main_t pointer to the arena to format
+ @param verbose - int - verbosity level
+ @return u8 vector
+*/
+u8 *
+format_valloc (u8 * s, va_list * va)
+{
+ clib_valloc_main_t *vam = va_arg (*va, clib_valloc_main_t *);
+ int verbose = va_arg (*va, int);
+ clib_valloc_chunk_t *ch;
+ u32 index;
+ uword *p;
+
+ clib_spinlock_lock_if_init (&vam->lock);
+
+ s = format (s, "%d chunks, first index %d\n",
+ pool_elts (vam->chunks), vam->first_index);
+
+ if (verbose)
+ {
+ index = vam->first_index;
+ while (index != ~0)
+ {
+ ch = pool_elt_at_index (vam->chunks, index);
+
+ s = format (s, "[%d] base %llx size %llx (%lld) prev %d %s\n",
+ index, ch->baseva, ch->size, ch->size, ch->prev,
+ (ch->flags & CLIB_VALLOC_BUSY) ? "busy" : "free");
+
+ p = hash_get (vam->chunk_index_by_baseva, ch->baseva);
+ if (p == 0)
+ {
+ s = format (s, " BUG: baseva not in hash table!\n");
+ }
+ else if (p[0] != index)
+ {
+ s = format (s, " BUG: baseva in hash table %d not %d!\n",
+ p[0], index);
+ }
+ index = ch->next;
+ }
+ }
+
+ clib_spinlock_unlock_if_init (&vam->lock);
+
+ return s;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/valloc.h b/src/vppinfra/valloc.h
new file mode 100644
index 00000000000..993844eb1bd
--- /dev/null
+++ b/src/vppinfra/valloc.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_valloc_h
+#define included_valloc_h
+#include <vppinfra/clib.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/bitmap.h>
+#include <vppinfra/lock.h>
+#include <vppinfra/hash.h>
+
+/** @file
+ @brief Simple first-fit virtual space allocator
+*/
+
+typedef struct
+{
+ u32 next; /**< next chunk pool index */
+ u32 prev; /**< previous chunk pool index */
+ uword baseva; /**< base VA for this chunk */
+ uword size; /**< size in bytes of this chunk */
+ uword flags; /**< flags (free/busy) */
+} clib_valloc_chunk_t;
+
+#define CLIB_VALLOC_BUSY (1<<0) /**< chunk is in use */
+
+typedef struct
+{
+ clib_valloc_chunk_t *chunks; /**< pool of virtual chunks */
+ uword *chunk_index_by_baseva; /**< chunk by baseva hash */
+ clib_spinlock_t lock; /**< spinlock */
+ uword flags; /**< flags */
+ u32 first_index; /**< pool index of first chunk in list */
+} clib_valloc_main_t;
+
+#define CLIB_VALLOC_INITIALIZED (1<<0) /**< object has been initialized */
+
+/* doxygen tags in valloc.c */
+void clib_valloc_init (clib_valloc_main_t * vam,
+ clib_valloc_chunk_t * template, int need_lock);
+void
+clib_valloc_add_chunk (clib_valloc_main_t * vam,
+ clib_valloc_chunk_t * template);
+
+format_function_t format_valloc;
+
+uword clib_valloc_free (clib_valloc_main_t * vam, uword baseva);
+uword clib_valloc_alloc (clib_valloc_main_t * vam, uword size,
+ int os_out_of_memory_on_failure);
+
+#endif /* included_valloc_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */