aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet
diff options
context:
space:
mode:
authorFlorin Coras <fcoras@cisco.com>2024-03-20 16:31:38 -0700
committerDave Barach <vpp@barachs.net>2024-03-21 18:01:32 +0000
commit5bd96b7733c82c074216c0e9fe78bedc54d86eb5 (patch)
tree7a48742dc4b3064472ddf69ae6f98f496f35cc92 /src/vnet
parent2f4586d9b3507243918c11ce99b9d151d5bde7a0 (diff)
session: fix workers race to allocate lookup table
Type: fix Signed-off-by: Florin Coras <fcoras@cisco.com> Change-Id: I60600452c91184da571d4630bf2f0d9c24a3e85e
Diffstat (limited to 'src/vnet')
-rw-r--r--src/vnet/session/session_lookup.c63
1 files changed, 39 insertions, 24 deletions
diff --git a/src/vnet/session/session_lookup.c b/src/vnet/session/session_lookup.c
index 0755d29f915..9d028dbb28c 100644
--- a/src/vnet/session/session_lookup.c
+++ b/src/vnet/session/session_lookup.c
@@ -161,49 +161,64 @@ session_table_alloc_needs_sync (void)
return !vlib_thread_is_main_w_barrier () && (vlib_num_workers () > 1);
}
+static_always_inline u8
+session_table_is_alloced (u8 fib_proto, u32 fib_index)
+{
+ return (vec_len (fib_index_to_table_index[fib_proto]) > fib_index &&
+ fib_index_to_table_index[fib_proto][fib_index] != ~0);
+}
+
static session_table_t *
session_table_get_or_alloc (u8 fib_proto, u32 fib_index)
{
session_table_t *st;
u32 table_index;
+
ASSERT (fib_index != ~0);
- if (vec_len (fib_index_to_table_index[fib_proto]) > fib_index &&
- fib_index_to_table_index[fib_proto][fib_index] != ~0)
+
+ if (session_table_is_alloced (fib_proto, fib_index))
{
table_index = fib_index_to_table_index[fib_proto][fib_index];
return session_table_get (table_index);
}
- else
- {
- u8 needs_sync = session_table_alloc_needs_sync ();
- session_lookup_main_t *slm = &sl_main;
- /* Stop workers, otherwise consumers might be affected. This is
- * acceptable because new tables should seldom be allocated */
- if (needs_sync)
- {
- vlib_workers_sync ();
+ u8 needs_sync = session_table_alloc_needs_sync ();
+ session_lookup_main_t *slm = &sl_main;
- /* We might have a race, only one worker allowed at once */
- clib_spinlock_lock (&slm->st_alloc_lock);
- }
+ /* Stop workers, otherwise consumers might be affected. This is
+ * acceptable because new tables should seldom be allocated */
+ if (needs_sync)
+ {
+ vlib_workers_sync ();
+ /* We might have a race, only one worker allowed at once */
+ clib_spinlock_lock (&slm->st_alloc_lock);
+ }
+
+ /* Another worker just allocated this table */
+ if (session_table_is_alloced (fib_proto, fib_index))
+ {
+ table_index = fib_index_to_table_index[fib_proto][fib_index];
+ st = session_table_get (table_index);
+ }
+ else
+ {
st = session_table_alloc ();
- table_index = session_table_index (st);
+ st->active_fib_proto = fib_proto;
+ session_table_init (st, fib_proto);
vec_validate_init_empty (fib_index_to_table_index[fib_proto], fib_index,
~0);
+ table_index = session_table_index (st);
fib_index_to_table_index[fib_proto][fib_index] = table_index;
- st->active_fib_proto = fib_proto;
- session_table_init (st, fib_proto);
-
- if (needs_sync)
- {
- clib_spinlock_unlock (&slm->st_alloc_lock);
- vlib_workers_continue ();
- }
+ }
- return st;
+ if (needs_sync)
+ {
+ clib_spinlock_unlock (&slm->st_alloc_lock);
+ vlib_workers_continue ();
}
+
+ return st;
}
static session_table_t *