aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/ip
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2020-09-16 17:06:45 +0200
committerNeale Ranns <nranns@cisco.com>2020-09-21 14:04:19 +0000
commit8157a161c613c3cc83c1c4507ed141b21b9627b5 (patch)
tree63e6072058e2427702d85c3d5fbaa44942b301af /src/vnet/ip
parent88caf28354133160d2446e939f1e54adb71cc144 (diff)
ip: use main heap for mtrie
Main heap can be hugepage backed so it is more efficient to use main heap instead of allocating special heap just for mtrie.... Type: improvement Change-Id: I210912ab8567c043205ddfc10fdcfde9a0fa7757 Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/vnet/ip')
-rw-r--r--src/vnet/ip/ip4.h3
-rw-r--r--src/vnet/ip/ip4_mtrie.c48
2 files changed, 0 insertions, 51 deletions
diff --git a/src/vnet/ip/ip4.h b/src/vnet/ip/ip4.h
index f5ed9385232..8fba33f8d6e 100644
--- a/src/vnet/ip/ip4.h
+++ b/src/vnet/ip/ip4.h
@@ -170,9 +170,6 @@ typedef struct ip4_main_t
/** Use hugetlb pages for the Mtries */
int mtrie_hugetlb;
- /** The memory heap for the mtries */
- void *mtrie_mheap;
-
/** ARP throttling */
throttle_t arp_throttle;
diff --git a/src/vnet/ip/ip4_mtrie.c b/src/vnet/ip/ip4_mtrie.c
index b5d0a890a7c..7bfcf986e6f 100644
--- a/src/vnet/ip/ip4_mtrie.c
+++ b/src/vnet/ip/ip4_mtrie.c
@@ -176,12 +176,9 @@ ply_create (ip4_fib_mtrie_t * m,
u32 leaf_prefix_len, u32 ply_base_len)
{
ip4_fib_mtrie_8_ply_t *p;
- void *old_heap;
/* Get cache aligned ply. */
- old_heap = clib_mem_set_heap (ip4_main.mtrie_mheap);
pool_get_aligned (ip4_ply_pool, p, CLIB_CACHE_LINE_BYTES);
- clib_mem_set_heap (old_heap);
ply_8_init (p, init_leaf, leaf_prefix_len, ply_base_len);
return ip4_fib_mtrie_leaf_set_next_ply_index (p - ip4_ply_pool);
@@ -798,55 +795,10 @@ static clib_error_t *
ip4_mtrie_module_init (vlib_main_t * vm)
{
CLIB_UNUSED (ip4_fib_mtrie_8_ply_t * p);
- ip4_main_t *im = &ip4_main;
clib_error_t *error = NULL;
- uword *old_heap;
-
- if (im->mtrie_heap_size == 0)
- im->mtrie_heap_size = IP4_FIB_DEFAULT_MTRIE_HEAP_SIZE;
-
-again:
- if (im->mtrie_hugetlb)
- {
- void *rv;
- int mmap_flags, mmap_flags_huge;
- uword htlb_pagesize = clib_mem_get_default_hugepage_size ();
- if (htlb_pagesize == 0)
- {
- clib_warning ("WARNING: htlb pagesize == 0");
- im->mtrie_hugetlb = 0;
- goto again;
- }
- /* Round the allocation request to an even number of huge pages */
- im->mtrie_heap_size = (im->mtrie_heap_size + (htlb_pagesize - 1)) &
- ~(htlb_pagesize - 1);
- mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
- mmap_flags_huge = (mmap_flags | MAP_HUGETLB | MAP_LOCKED |
- min_log2 (htlb_pagesize) << MAP_HUGE_SHIFT);
- rv = mmap (0, im->mtrie_heap_size,
- PROT_READ | PROT_WRITE, mmap_flags_huge, -1, 0);
- if (rv == MAP_FAILED)
- {
- /* Failure when running as root should be logged... */
- if (geteuid () == 0)
- clib_warning ("ip4 mtrie htlb map failed: not enough pages?");
- im->mtrie_hugetlb = 0;
- goto again;
- }
- if (mlock (rv, im->mtrie_heap_size))
- clib_warning ("WARNING: couldn't lock mtrie heap at %llx", rv);
- im->mtrie_mheap = create_mspace_with_base (rv, im->mtrie_heap_size,
- 1 /* locked */ );
- }
- else
- {
- im->mtrie_mheap = create_mspace (im->mtrie_heap_size, 1 /* locked */ );
- }
/* Burn one ply so index 0 is taken */
- old_heap = clib_mem_set_heap (ip4_main.mtrie_mheap);
pool_get (ip4_ply_pool, p);
- clib_mem_set_heap (old_heap);
return (error);
}