diff options
author | Dave Barach <dave@barachs.net> | 2020-06-03 08:05:15 -0400 |
---|---|---|
committer | Florin Coras <florin.coras@gmail.com> | 2020-06-04 14:42:26 +0000 |
commit | 8341f76fd1cd4351961cd8161cfed2814fc55103 (patch) | |
tree | 420010b919dacc54dde85e9f5d5b6fc305775aa2 | |
parent | c39c79c5aa7b5410f3aad4a770a741ab04f7dcc5 (diff) |
fib: add barrier sync, pool/vector expand cases
load_balance_alloc_i(...) is not thread safe when the
load_balance_pool or combined counter vectors expand.
Type: fix
Signed-off-by: Dave Barach <dave@barachs.net>
Change-Id: I7f295ed77350d1df0434d5ff461eedafe79131de
-rw-r--r-- | src/vlib/counter.c | 38 | ||||
-rw-r--r-- | src/vlib/counter.h | 3 | ||||
-rw-r--r-- | src/vnet/dpo/load_balance.c | 26 |
3 files changed, 66 insertions, 1 deletions
diff --git a/src/vlib/counter.c b/src/vlib/counter.c index edba3754da4..adf667f4051 100644 --- a/src/vlib/counter.c +++ b/src/vlib/counter.c @@ -119,6 +119,44 @@ vlib_validate_combined_counter (vlib_combined_counter_main_t * cm, u32 index) 3 /*STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED */ ); } +int + vlib_validate_combined_counter_will_expand + (vlib_combined_counter_main_t * cm, u32 index) +{ + vlib_thread_main_t *tm = vlib_get_thread_main (); + int i; + void *oldheap = vlib_stats_push_heap (cm->counters); + + /* Possibly once in recorded history */ + if (PREDICT_FALSE (vec_len (cm->counters) == 0)) + { + vlib_stats_pop_heap (cm, oldheap, index, + 3 /*STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED */ ); + return 1; + } + + for (i = 0; i < tm->n_vlib_mains; i++) + { + /* Trivially OK, and proves that index >= vec_len(...) */ + if (index < vec_len (cm->counters[i])) + continue; + if (_vec_resize_will_expand + (cm->counters[i], + index - vec_len (cm->counters[i]) /* length_increment */ , + sizeof (cm->counters[i]) /* data_bytes */ , + 0 /* header_bytes */ , + CLIB_CACHE_LINE_BYTES /* data_alignment */ )) + { + vlib_stats_pop_heap (cm, oldheap, index, + 3 /*STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED */ ); + return 1; + } + } + vlib_stats_pop_heap (cm, oldheap, index, + 3 /*STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED */ ); + return 0; +} + void vlib_free_combined_counter (vlib_combined_counter_main_t * cm) { diff --git a/src/vlib/counter.h b/src/vlib/counter.h index 7c9094727e2..8a5aed4c194 100644 --- a/src/vlib/counter.h +++ b/src/vlib/counter.h @@ -314,6 +314,9 @@ void vlib_free_simple_counter (vlib_simple_counter_main_t * cm); void vlib_validate_combined_counter (vlib_combined_counter_main_t * cm, u32 index); +int vlib_validate_combined_counter_will_expand + (vlib_combined_counter_main_t * cm, u32 index); + void vlib_free_combined_counter (vlib_combined_counter_main_t * cm); /** Obtain the number of simple or combined counters allocated. diff --git a/src/vnet/dpo/load_balance.c b/src/vnet/dpo/load_balance.c index 7acccca61bf..c029341f147 100644 --- a/src/vnet/dpo/load_balance.c +++ b/src/vnet/dpo/load_balance.c @@ -93,12 +93,33 @@ static load_balance_t * load_balance_alloc_i (void) { load_balance_t *lb; + u8 need_barrier_sync = 0; + vlib_main_t *vm = vlib_get_main(); + ASSERT (vm->thread_index == 0); + + pool_get_aligned_will_expand (load_balance_pool, need_barrier_sync, + CLIB_CACHE_LINE_BYTES); + if (need_barrier_sync) + vlib_worker_thread_barrier_sync (vm); pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES); clib_memset(lb, 0, sizeof(*lb)); lb->lb_map = INDEX_INVALID; lb->lb_urpf = INDEX_INVALID; + + if (need_barrier_sync == 0) + { + need_barrier_sync += vlib_validate_combined_counter_will_expand + (&(load_balance_main.lbm_to_counters), + load_balance_get_index(lb)); + need_barrier_sync += vlib_validate_combined_counter_will_expand + (&(load_balance_main.lbm_via_counters), + load_balance_get_index(lb)); + if (need_barrier_sync) + vlib_worker_thread_barrier_sync (vm); + } + vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters), load_balance_get_index(lb)); vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters), @@ -108,6 +129,9 @@ load_balance_alloc_i (void) vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters), load_balance_get_index(lb)); + if (need_barrier_sync) + vlib_worker_thread_barrier_release (vm); + return (lb); } @@ -1121,7 +1145,7 @@ load_balance_inline (vlib_main_t * vm, vnet_buffer(b0)->ip.flow_hash = bier_compute_flow_hash(bh0); } - dpo0 = load_balance_get_bucket_i(lb0, + dpo0 = load_balance_get_bucket_i(lb0, vnet_buffer(b0)->ip.flow_hash & (lb0->lb_n_buckets_minus_1)); |