diff options
author | Dave Barach <dbarach@cisco.com> | 2016-04-28 16:24:15 -0400 |
---|---|---|
committer | Damjan Marion <damarion@cisco.com> | 2016-04-29 11:36:09 +0000 |
commit | 848191d3e1b1b2febb1f67e5121487f871e67b56 (patch) | |
tree | c70269ca7019b2d62cef4b3aeb1f4b43e6d63599 /vlib | |
parent | 7226b84d7418fcbb9b8dd42045aa7a5f18da9679 (diff) |
Clean up per-thread mheap setup.
The stats thread was sharing the main mheap when we started at least
one worker or I/O thread, but ran on its own mheap when we started 0
worker + io threads.
Net of this change; if a VLIB_REGISTER_THREAD instance specifies a
per-thread mheap, a per-thread mheap will be provided. Otherwise,
threads share the main heap.
The stats thread now uses the main heap. Simpler is better.
Change-Id: I1fff0dd66ae8f7dfe44923f702734e2832b55b09
Signed-off-by: Dave Barach <dbarach@cisco.com>
Diffstat (limited to 'vlib')
-rw-r--r-- | vlib/vlib/threads.c | 44 |
1 files changed, 22 insertions, 22 deletions
diff --git a/vlib/vlib/threads.c b/vlib/vlib/threads.c index 32ebdfe761d..3b815be42e1 100644 --- a/vlib/vlib/threads.c +++ b/vlib/vlib/threads.c @@ -535,7 +535,9 @@ static clib_error_t * start_workers (vlib_main_t * vm) vlib_node_runtime_t * rt; u32 n_vlib_mains = tm->n_vlib_mains; u32 worker_thread_index; - + u8 * main_heap = clib_mem_get_per_cpu_heap(); + mheap_t * main_heap_header = mheap_header (main_heap); + vec_reset_length (vlib_worker_threads); /* Set up the main thread */ @@ -558,21 +560,19 @@ static clib_error_t * start_workers (vlib_main_t * vm) } #endif + /* + * Truth of the matter: we always use at least two + * threads. So, make the main heap thread-safe + * and make the event log thread-safe. + */ + main_heap_header->flags |= MHEAP_FLAG_THREAD_SAFE; + vm->elog_main.lock = + clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, + CLIB_CACHE_LINE_BYTES); + vm->elog_main.lock[0] = 0; + if (n_vlib_mains > 1) { - u8 * heap = clib_mem_get_per_cpu_heap(); - mheap_t * h = mheap_header (heap); - - /* make the main heap thread-safe */ - h->flags |= MHEAP_FLAG_THREAD_SAFE; - - /* Make the event-log MP-safe */ - vm->elog_main.lock = - clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, - CLIB_CACHE_LINE_BYTES); - - vm->elog_main.lock[0] = 0; - vec_validate (vlib_mains, tm->n_vlib_mains - 1); _vec_len (vlib_mains) = 0; vec_add1 (vlib_mains, vm); @@ -609,13 +609,10 @@ static clib_error_t * start_workers (vlib_main_t * vm) for (k = 0; k < tr->count; k++) { vec_add2 (vlib_worker_threads, w, 1); - /* - * Share the main heap which is now thread-safe. - * - * To allocate separate heaps, code: - * mheap_alloc (0 / * use VM * /, tr->mheap_size); - */ - w->thread_mheap = heap; + if (tr->mheap_size) + w->thread_mheap = mheap_alloc (0 /* use VM */, tr->mheap_size); + else + w->thread_mheap = main_heap; w->thread_stack = vlib_thread_stacks[w - vlib_worker_threads]; w->thread_function = tr->function; w->thread_function_arg = w; @@ -744,7 +741,10 @@ static clib_error_t * start_workers (vlib_main_t * vm) for (j = 0; j < tr->count; j++) { vec_add2 (vlib_worker_threads, w, 1); - w->thread_mheap = mheap_alloc (0 /* use VM */, tr->mheap_size); + if (tr->mheap_size) + w->thread_mheap = mheap_alloc (0 /* use VM */, tr->mheap_size); + else + w->thread_mheap = main_heap; w->thread_stack = vlib_thread_stacks[w - vlib_worker_threads]; w->thread_function = tr->function; w->thread_function_arg = w; |