summaryrefslogtreecommitdiffstats
path: root/src/vnet
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2020-09-16 17:06:45 +0200
committerNeale Ranns <nranns@cisco.com>2020-09-21 14:04:19 +0000
commit8157a161c613c3cc83c1c4507ed141b21b9627b5 (patch)
tree63e6072058e2427702d85c3d5fbaa44942b301af /src/vnet
parent88caf28354133160d2446e939f1e54adb71cc144 (diff)
ip: use main heap for mtrie
Main heap can be hugepage backed so it is more efficient to use main heap instead of allocating special heap just for mtrie.... Type: improvement Change-Id: I210912ab8567c043205ddfc10fdcfde9a0fa7757 Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/vnet')
-rw-r--r--src/vnet/fib/ip4_fib.c21
-rw-r--r--src/vnet/ip/ip4.h3
-rw-r--r--src/vnet/ip/ip4_mtrie.c48
3 files changed, 3 insertions, 69 deletions
diff --git a/src/vnet/fib/ip4_fib.c b/src/vnet/fib/ip4_fib.c
index 5b12aa67ce3..9142c636cdf 100644
--- a/src/vnet/fib/ip4_fib.c
+++ b/src/vnet/fib/ip4_fib.c
@@ -106,14 +106,11 @@ ip4_create_fib_with_table_id (u32 table_id,
{
fib_table_t *fib_table;
ip4_fib_t *v4_fib;
- void *old_heap;
pool_get(ip4_main.fibs, fib_table);
clib_memset(fib_table, 0, sizeof(*fib_table));
- old_heap = clib_mem_set_heap (ip4_main.mtrie_mheap);
pool_get_aligned(ip4_main.v4_fibs, v4_fib, CLIB_CACHE_LINE_BYTES);
- clib_mem_set_heap (old_heap);
ASSERT((fib_table - ip4_main.fibs) ==
(v4_fib - ip4_main.v4_fibs));
@@ -333,8 +330,6 @@ ip4_fib_table_entry_insert (ip4_fib_t *fib,
/*
* adding a new entry
*/
- uword *old_heap;
- old_heap = clib_mem_set_heap (ip4_main.mtrie_mheap);
if (NULL == hash) {
hash = hash_create (32 /* elts */, sizeof (uword));
@@ -343,7 +338,6 @@ ip4_fib_table_entry_insert (ip4_fib_t *fib,
}
hash = hash_set(hash, key, fib_entry_index);
fib->fib_entry_by_dst_address[len] = hash;
- clib_mem_set_heap (old_heap);
}
else
{
@@ -371,11 +365,7 @@ ip4_fib_table_entry_remove (ip4_fib_t *fib,
}
else
{
- uword *old_heap;
-
- old_heap = clib_mem_set_heap (ip4_main.mtrie_mheap);
hash_unset(hash, key);
- clib_mem_set_heap (old_heap);
}
fib->fib_entry_by_dst_address[len] = hash;
@@ -571,10 +561,9 @@ ip4_fib_table_show_one (ip4_fib_t *fib,
u8 *
format_ip4_fib_table_memory (u8 * s, va_list * args)
{
- s = format(s, "%=30s %=6d %=12ld\n",
+ s = format(s, "%=30s %=6d\n",
"IPv4 unicast",
- pool_elts(ip4_main.fibs),
- mspace_footprint(ip4_main.mtrie_mheap));
+ pool_elts(ip4_main.fibs));
return (s);
}
@@ -640,13 +629,12 @@ ip4_show_fib (vlib_main_t * vm,
if (memory)
{
- uword mtrie_size, hash_size, *old_heap;
+ uword mtrie_size, hash_size;
mtrie_size = ip4_fib_mtrie_memory_usage(&fib->mtrie);
hash_size = 0;
- old_heap = clib_mem_set_heap (ip4_main.mtrie_mheap);
for (i = 0; i < ARRAY_LEN (fib->fib_entry_by_dst_address); i++)
{
uword * hash = fib->fib_entry_by_dst_address[i];
@@ -655,7 +643,6 @@ ip4_show_fib (vlib_main_t * vm,
hash_size += hash_bytes(hash);
}
}
- clib_mem_set_heap (old_heap);
if (verbose)
vlib_cli_output (vm, "%U mtrie:%d hash:%d",
@@ -725,8 +712,6 @@ ip4_show_fib (vlib_main_t * vm,
total_mtrie_memory,
total_hash_memory,
total_mtrie_memory + total_hash_memory);
- vlib_cli_output (vm, "\nMtrie Mheap Usage: %U\n",
- format_mheap, ip4_main.mtrie_mheap, 1);
}
return 0;
}
diff --git a/src/vnet/ip/ip4.h b/src/vnet/ip/ip4.h
index f5ed9385232..8fba33f8d6e 100644
--- a/src/vnet/ip/ip4.h
+++ b/src/vnet/ip/ip4.h
@@ -170,9 +170,6 @@ typedef struct ip4_main_t
/** Use hugetlb pages for the Mtries */
int mtrie_hugetlb;
- /** The memory heap for the mtries */
- void *mtrie_mheap;
-
/** ARP throttling */
throttle_t arp_throttle;
diff --git a/src/vnet/ip/ip4_mtrie.c b/src/vnet/ip/ip4_mtrie.c
index b5d0a890a7c..7bfcf986e6f 100644
--- a/src/vnet/ip/ip4_mtrie.c
+++ b/src/vnet/ip/ip4_mtrie.c
@@ -176,12 +176,9 @@ ply_create (ip4_fib_mtrie_t * m,
u32 leaf_prefix_len, u32 ply_base_len)
{
ip4_fib_mtrie_8_ply_t *p;
- void *old_heap;
/* Get cache aligned ply. */
- old_heap = clib_mem_set_heap (ip4_main.mtrie_mheap);
pool_get_aligned (ip4_ply_pool, p, CLIB_CACHE_LINE_BYTES);
- clib_mem_set_heap (old_heap);
ply_8_init (p, init_leaf, leaf_prefix_len, ply_base_len);
return ip4_fib_mtrie_leaf_set_next_ply_index (p - ip4_ply_pool);
@@ -798,55 +795,10 @@ static clib_error_t *
ip4_mtrie_module_init (vlib_main_t * vm)
{
CLIB_UNUSED (ip4_fib_mtrie_8_ply_t * p);
- ip4_main_t *im = &ip4_main;
clib_error_t *error = NULL;
- uword *old_heap;
-
- if (im->mtrie_heap_size == 0)
- im->mtrie_heap_size = IP4_FIB_DEFAULT_MTRIE_HEAP_SIZE;
-
-again:
- if (im->mtrie_hugetlb)
- {
- void *rv;
- int mmap_flags, mmap_flags_huge;
- uword htlb_pagesize = clib_mem_get_default_hugepage_size ();
- if (htlb_pagesize == 0)
- {
- clib_warning ("WARNING: htlb pagesize == 0");
- im->mtrie_hugetlb = 0;
- goto again;
- }
- /* Round the allocation request to an even number of huge pages */
- im->mtrie_heap_size = (im->mtrie_heap_size + (htlb_pagesize - 1)) &
- ~(htlb_pagesize - 1);
- mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
- mmap_flags_huge = (mmap_flags | MAP_HUGETLB | MAP_LOCKED |
- min_log2 (htlb_pagesize) << MAP_HUGE_SHIFT);
- rv = mmap (0, im->mtrie_heap_size,
- PROT_READ | PROT_WRITE, mmap_flags_huge, -1, 0);
- if (rv == MAP_FAILED)
- {
- /* Failure when running as root should be logged... */
- if (geteuid () == 0)
- clib_warning ("ip4 mtrie htlb map failed: not enough pages?");
- im->mtrie_hugetlb = 0;
- goto again;
- }
- if (mlock (rv, im->mtrie_heap_size))
- clib_warning ("WARNING: couldn't lock mtrie heap at %llx", rv);
- im->mtrie_mheap = create_mspace_with_base (rv, im->mtrie_heap_size,
- 1 /* locked */ );
- }
- else
- {
- im->mtrie_mheap = create_mspace (im->mtrie_heap_size, 1 /* locked */ );
- }
/* Burn one ply so index 0 is taken */
- old_heap = clib_mem_set_heap (ip4_main.mtrie_mheap);
pool_get (ip4_ply_pool, p);
- clib_mem_set_heap (old_heap);
return (error);
}