diff options
author | Dave Barach <dave@barachs.net> | 2019-04-23 10:07:24 -0400 |
---|---|---|
committer | Florin Coras <florin.coras@gmail.com> | 2019-04-24 15:17:58 +0000 |
commit | 9ae190e9d228bfc1358482d4f07da1f4cfd41c90 (patch) | |
tree | 9e22b9797fbabd92f1b290fff0cc6b543a3d2bdb | |
parent | 502785b65c40351f62e510a245ccee56084a07f4 (diff) |
Clean up multi-thread barrier-sync hold-down timer
Main thread: don't bother with the barrier sync hold-down timer if
none of the worker threads are busy.
Worker threads: avoid epoll_pwait (10ms timeout) when the
control-plane has been active in the last half-second.
Change-Id: I82008d09968c65e2a4af0ebb7887389992e60603
Signed-off-by: Dave Barach <dave@barachs.net>
-rw-r--r-- | src/vlib/threads.c | 40 | ||||
-rw-r--r-- | src/vlib/unix/input.c | 30 |
2 files changed, 57 insertions, 13 deletions
diff --git a/src/vlib/threads.c b/src/vlib/threads.c index 7d17c7b37cc..52886df37e0 100644 --- a/src/vlib/threads.c +++ b/src/vlib/threads.c @@ -1389,7 +1389,9 @@ vlib_worker_thread_barrier_sync_int (vlib_main_t * vm, const char *func_name) f64 t_entry; f64 t_open; f64 t_closed; + f64 max_vector_rate; u32 count; + int i; if (vec_len (vlib_mains) < 2) return; @@ -1410,23 +1412,41 @@ vlib_worker_thread_barrier_sync_int (vlib_main_t * vm, const char *func_name) return; } + /* + * Need data to decide if we're working hard enough to honor + * the barrier hold-down timer. + */ + max_vector_rate = 0.0; + for (i = 1; i < vec_len (vlib_mains); i++) + max_vector_rate = + clib_max (max_vector_rate, + vlib_last_vectors_per_main_loop_as_f64 (vlib_mains[i])); + vlib_worker_threads[0].barrier_sync_count++; /* Enforce minimum barrier open time to minimize packet loss */ ASSERT (vm->barrier_no_close_before <= (now + BARRIER_MINIMUM_OPEN_LIMIT)); - while (1) + /* + * If any worker thread seems busy, which we define + * as a vector rate above 10, we enforce the barrier hold-down timer + */ + if (max_vector_rate > 10.0) { - now = vlib_time_now (vm); - /* Barrier hold-down timer expired? */ - if (now >= vm->barrier_no_close_before) - break; - if ((vm->barrier_no_close_before - now) - > (2.0 * BARRIER_MINIMUM_OPEN_LIMIT)) + while (1) { - clib_warning ("clock change: would have waited for %.4f seconds", - (vm->barrier_no_close_before - now)); - break; + now = vlib_time_now (vm); + /* Barrier hold-down timer expired? */ + if (now >= vm->barrier_no_close_before) + break; + if ((vm->barrier_no_close_before - now) + > (2.0 * BARRIER_MINIMUM_OPEN_LIMIT)) + { + clib_warning + ("clock change: would have waited for %.4f seconds", + (vm->barrier_no_close_before - now)); + break; + } } } /* Record time of closure */ diff --git a/src/vlib/unix/input.c b/src/vlib/unix/input.c index 7f49b954cc2..1c1cb1aa79c 100644 --- a/src/vlib/unix/input.c +++ b/src/vlib/unix/input.c @@ -145,9 +145,13 @@ linux_epoll_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_node_main_t *nm = &vm->node_main; u32 ticks_until_expiration; f64 timeout; + f64 now; int timeout_ms = 0, max_timeout_ms = 10; f64 vector_rate = vlib_last_vectors_per_main_loop (vm); + if (is_main == 0) + now = vlib_time_now (vm); + /* * If we've been asked for a fixed-sleep between main loop polls, * do so right away. @@ -194,8 +198,9 @@ linux_epoll_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, } node->input_main_loops_per_call = 0; } - else if (is_main == 0 && vector_rate < 2 && - nm->input_node_counts_by_state[VLIB_NODE_STATE_POLLING] == 0) + else if (is_main == 0 && vector_rate < 2 + && (vlib_global_main.time_last_barrier_release + 0.5 < now) + && nm->input_node_counts_by_state[VLIB_NODE_STATE_POLLING] == 0) { timeout = 10e-3; timeout_ms = max_timeout_ms; @@ -227,8 +232,27 @@ linux_epoll_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, } else { + /* + * Worker thread, no epoll fd's, sleep for 100us at a time + * and check for a barrier sync request + */ if (timeout_ms) - usleep (timeout_ms * 1000); + { + struct timespec ts, tsrem; + f64 limit = now + (f64) timeout_ms * 1e-3; + + while (vlib_time_now (vm) < limit) + { + /* Sleep for 100us at a time */ + ts.tv_sec = 0; + ts.tv_nsec = 1000 * 100; + + while (nanosleep (&ts, &tsrem) < 0) + ts = tsrem; + if (*vlib_worker_threads->wait_at_barrier) + goto done; + } + } goto done; } } |