diff options
author | Dave Barach <dave@barachs.net> | 2020-06-03 08:05:15 -0400 |
---|---|---|
committer | Andrew Yourtchenko <ayourtch@gmail.com> | 2020-08-18 19:47:21 +0000 |
commit | 1c73742cb3c90f11a7b9e9c01100c464a82951fd (patch) | |
tree | c63b341f270d52c5f1f534e415912ca9ef8a2668 /src/vnet/dpo | |
parent | 77335de45a0904fd6d6e2a5597f1fd0fcbaf0917 (diff) |
fib: add barrier sync, pool/vector expand cases
load_balance_alloc_i(...) is not thread safe when the
load_balance_pool or combined counter vectors expand.
Type: fix
Signed-off-by: Dave Barach <dave@barachs.net>
Change-Id: I7f295ed77350d1df0434d5ff461eedafe79131de
(cherry picked from commit 8341f76fd1cd4351961cd8161cfed2814fc55103)
Diffstat (limited to 'src/vnet/dpo')
-rw-r--r-- | src/vnet/dpo/load_balance.c | 26 |
1 files changed, 25 insertions, 1 deletions
diff --git a/src/vnet/dpo/load_balance.c b/src/vnet/dpo/load_balance.c index 7acccca61bf..c029341f147 100644 --- a/src/vnet/dpo/load_balance.c +++ b/src/vnet/dpo/load_balance.c @@ -93,12 +93,33 @@ static load_balance_t * load_balance_alloc_i (void) { load_balance_t *lb; + u8 need_barrier_sync = 0; + vlib_main_t *vm = vlib_get_main(); + ASSERT (vm->thread_index == 0); + + pool_get_aligned_will_expand (load_balance_pool, need_barrier_sync, + CLIB_CACHE_LINE_BYTES); + if (need_barrier_sync) + vlib_worker_thread_barrier_sync (vm); pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES); clib_memset(lb, 0, sizeof(*lb)); lb->lb_map = INDEX_INVALID; lb->lb_urpf = INDEX_INVALID; + + if (need_barrier_sync == 0) + { + need_barrier_sync += vlib_validate_combined_counter_will_expand + (&(load_balance_main.lbm_to_counters), + load_balance_get_index(lb)); + need_barrier_sync += vlib_validate_combined_counter_will_expand + (&(load_balance_main.lbm_via_counters), + load_balance_get_index(lb)); + if (need_barrier_sync) + vlib_worker_thread_barrier_sync (vm); + } + vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters), load_balance_get_index(lb)); vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters), @@ -108,6 +129,9 @@ load_balance_alloc_i (void) vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters), load_balance_get_index(lb)); + if (need_barrier_sync) + vlib_worker_thread_barrier_release (vm); + return (lb); } @@ -1121,7 +1145,7 @@ load_balance_inline (vlib_main_t * vm, vnet_buffer(b0)->ip.flow_hash = bier_compute_flow_hash(bh0); } - dpo0 = load_balance_get_bucket_i(lb0, + dpo0 = load_balance_get_bucket_i(lb0, vnet_buffer(b0)->ip.flow_hash & (lb0->lb_n_buckets_minus_1)); |