diff options
author | Dave Barach <dave@barachs.net> | 2019-05-02 15:48:15 -0400 |
---|---|---|
committer | Dave Wallace <dwallacelf@gmail.com> | 2019-05-24 13:56:01 +0000 |
commit | 511ce2572a9710835c6722a09a93b4dd80ab7fae (patch) | |
tree | e91aa851cab7e9410aaa1a285cee53801bf51310 | |
parent | 070b810a883d789c97a9d89f56f952e14ed9999a (diff) |
Clean up multi-thread barrier-sync hold-down timerstable/1807
Main thread: don't bother with the barrier sync hold-down timer if
none of the worker threads are busy.
Worker threads: avoid epoll_pwait (10ms timeout) when the
control-plane has been active in the last half-second.
Cherry-pick a recent dangling reference fix: pool_elt_at_index after
e.g. rx callback is required, in case the unix file pool expands.
Manual feature backport to 18.07
Change-Id: I745fbb8a12aeda34b0ec7b6dcda66c0e25c3eee1
Signed-off-by: Dave Barach <dave@barachs.net>
-rw-r--r-- | src/vlib/main.h | 5 | ||||
-rw-r--r-- | src/vlib/threads.c | 83 | ||||
-rw-r--r-- | src/vlib/threads.h | 17 | ||||
-rw-r--r-- | src/vlib/unix/input.c | 36 |
4 files changed, 132 insertions, 9 deletions
diff --git a/src/vlib/main.h b/src/vlib/main.h index 16e4120067d..894fe9efd6e 100644 --- a/src/vlib/main.h +++ b/src/vlib/main.h @@ -61,6 +61,9 @@ typedef struct vlib_main_t CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); /* Instruction level timing state. */ clib_time_t clib_time; + /* Offset from main thread time */ + f64 time_offset; + f64 time_last_barrier_release; /* Time stamp of last node dispatch. */ u64 cpu_time_last_node_dispatch; @@ -224,7 +227,7 @@ void vlib_worker_loop (vlib_main_t * vm); always_inline f64 vlib_time_now (vlib_main_t * vm) { - return clib_time_now (&vm->clib_time); + return clib_time_now (&vm->clib_time) + vm->time_offset; } always_inline f64 diff --git a/src/vlib/threads.c b/src/vlib/threads.c index a946326f239..1119d82bfb8 100644 --- a/src/vlib/threads.c +++ b/src/vlib/threads.c @@ -1448,7 +1448,9 @@ vlib_worker_thread_barrier_sync_int (vlib_main_t * vm) f64 t_entry; f64 t_open; f64 t_closed; + f64 max_vector_rate; u32 count; + int i; if (vec_len (vlib_mains) < 2) return; @@ -1468,13 +1470,43 @@ vlib_worker_thread_barrier_sync_int (vlib_main_t * vm) return; } + /* + * Need data to decide if we're working hard enough to honor + * the barrier hold-down timer. + */ + max_vector_rate = 0.0; + for (i = 1; i < vec_len (vlib_mains); i++) + max_vector_rate = + clib_max (max_vector_rate, + vlib_last_vectors_per_main_loop_as_f64 (vlib_mains[i])); + vlib_worker_threads[0].barrier_sync_count++; /* Enforce minimum barrier open time to minimize packet loss */ ASSERT (vm->barrier_no_close_before <= (now + BARRIER_MINIMUM_OPEN_LIMIT)); - while ((now = vlib_time_now (vm)) < vm->barrier_no_close_before) - ; + /* + * If any worker thread seems busy, which we define + * as a vector rate above 10, we enforce the barrier hold-down timer + */ + if (max_vector_rate > 10.0) + { + while (1) + { + now = vlib_time_now (vm); + /* Barrier hold-down timer expired? */ + if (now >= vm->barrier_no_close_before) + break; + if ((vm->barrier_no_close_before - now) + > (2.0 * BARRIER_MINIMUM_OPEN_LIMIT)) + { + clib_warning + ("clock change: would have waited for %.4f seconds", + (vm->barrier_no_close_before - now)); + break; + } + } + } /* Record time of closure */ t_open = now - vm->barrier_epoch; vm->barrier_epoch = now; @@ -1559,6 +1591,14 @@ vlib_worker_thread_barrier_release (vlib_main_t * vm) deadline = now + BARRIER_SYNC_TIMEOUT; + /* + * Note when we let go of the barrier. + * Workers can use this to derive a reasonably accurate + * time offset. See vlib_time_now(...) + */ + vm->time_last_barrier_release = vlib_time_now (vm); + CLIB_MEMORY_STORE_BARRIER (); + *vlib_worker_threads->wait_at_barrier = 0; while (*vlib_worker_threads->workers_at_barrier > 0) @@ -1844,6 +1884,45 @@ threads_init (vlib_main_t * vm) VLIB_INIT_FUNCTION (threads_init); + +static clib_error_t * +show_clock_command_fn (vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) +{ + int i; + f64 now; + + now = vlib_time_now (vm); + + vlib_cli_output (vm, "Time now %.9f", now); + + if (vec_len (vlib_mains) == 1) + return 0; + + vlib_cli_output (vm, "Time last barrier release %.9f", + vm->time_last_barrier_release); + + for (i = 1; i < vec_len (vlib_mains); i++) + { + if (vlib_mains[i] == 0) + continue; + vlib_cli_output (vm, "Thread %d offset %.9f error %.9f", i, + vlib_mains[i]->time_offset, + vm->time_last_barrier_release - + vlib_mains[i]->time_last_barrier_release); + } + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (f_command, static) = +{ + .path = "show clock", + .short_help = "show clock", + .function = show_clock_command_fn, +}; +/* *INDENT-ON* */ + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vlib/threads.h b/src/vlib/threads.h index f78ec1b9fa5..dd0ed20ca0c 100644 --- a/src/vlib/threads.h +++ b/src/vlib/threads.h @@ -400,7 +400,7 @@ vlib_worker_thread_barrier_check (void) { if (PREDICT_FALSE (*vlib_worker_threads->wait_at_barrier)) { - vlib_main_t *vm; + vlib_main_t *vm = vlib_get_main (); clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, 1); if (CLIB_DEBUG > 0) { @@ -409,6 +409,21 @@ vlib_worker_thread_barrier_check (void) } while (*vlib_worker_threads->wait_at_barrier) ; + + /* + * Recompute the offset from thread-0 time. + * Note that vlib_time_now adds vm->time_offset, so + * clear it first. Save the resulting idea of "now", to + * see how well we're doing. See show_clock_command_fn(...) + */ + { + f64 now; + vm->time_offset = 0.0; + now = vlib_time_now (vm); + vm->time_offset = vlib_global_main.time_last_barrier_release - now; + vm->time_last_barrier_release = vlib_time_now (vm); + } + if (CLIB_DEBUG > 0) vm->parked_at_barrier = 0; clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, -1); diff --git a/src/vlib/unix/input.c b/src/vlib/unix/input.c index 8be0770bfd3..20a98f7a31e 100644 --- a/src/vlib/unix/input.c +++ b/src/vlib/unix/input.c @@ -145,9 +145,13 @@ linux_epoll_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_node_main_t *nm = &vm->node_main; u32 ticks_until_expiration; f64 timeout; + f64 now; int timeout_ms = 0, max_timeout_ms = 10; f64 vector_rate = vlib_last_vectors_per_main_loop (vm); + if (is_main == 0) + now = vlib_time_now (vm); + /* * If we've been asked for a fixed-sleep between main loop polls, * do so right away. @@ -194,8 +198,9 @@ linux_epoll_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, } node->input_main_loops_per_call = 0; } - else if (is_main == 0 && vector_rate < 2 && - nm->input_node_counts_by_state[VLIB_NODE_STATE_POLLING] == 0) + else if (is_main == 0 && vector_rate < 2 + && (vlib_global_main.time_last_barrier_release + 0.5 < now) + && nm->input_node_counts_by_state[VLIB_NODE_STATE_POLLING] == 0) { timeout = 10e-3; timeout_ms = max_timeout_ms; @@ -223,12 +228,32 @@ linux_epoll_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, em->epoll_events, vec_len (em->epoll_events), timeout_ms); } + } else { + /* + * Worker thread, no epoll fd's, sleep for 100us at a time + * and check for a barrier sync request + */ if (timeout_ms) - usleep (timeout_ms * 1000); - return 0; + { + struct timespec ts, tsrem; + f64 limit = now + (f64) timeout_ms * 1e-3; + + while (vlib_time_now (vm) < limit) + { + /* Sleep for 100us at a time */ + ts.tv_sec = 0; + ts.tv_nsec = 1000 * 100; + + while (nanosleep (&ts, &tsrem) < 0) + ts = tsrem; + if (*vlib_worker_threads->wait_at_barrier) + goto done; + } + } + goto done; } } @@ -238,7 +263,7 @@ linux_epoll_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_panic_with_error (vm, clib_error_return_unix (0, "epoll_wait")); /* non fatal error (e.g. EINTR). */ - return 0; + goto done; } em->epoll_waits += 1; @@ -314,6 +339,7 @@ linux_epoll_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, } } +done: return 0; } |