aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFlorin Coras <fcoras@cisco.com>2022-03-30 13:50:19 -0700
committerDamjan Marion <dmarion@me.com>2022-03-31 20:24:50 +0000
commit4b20830b496ade3f1e77cd5fcbdf6d6c7bb5336d (patch)
tree1c42e6c55f199011f651781da28417ce8e3e4208
parentb3a5b39efb48a67fa93d4329282345c0fc8a1f29 (diff)
vlib: add support for workers sync
Adds api that allows workers to synchronize through main thread. Type: improvement Signed-off-by: Florin Coras <fcoras@cisco.com> Change-Id: I1e75e2fb5144d397d19b13c4dfc7e937f11c044c
-rw-r--r--src/vlib/threads.c50
-rw-r--r--src/vlib/threads.h14
-rw-r--r--src/vnet/session/session.h40
3 files changed, 66 insertions, 38 deletions
diff --git a/src/vlib/threads.c b/src/vlib/threads.c
index b470976d3d3..e34ef7c30cb 100644
--- a/src/vlib/threads.c
+++ b/src/vlib/threads.c
@@ -1480,6 +1480,56 @@ vlib_worker_thread_barrier_release (vlib_main_t * vm)
vm->clib_time.last_cpu_time, 1 /* leave */ );
}
+static void
+vlib_worker_sync_rpc (void *args)
+{
+ ASSERT (vlib_thread_is_main_w_barrier ());
+ vlib_worker_threads->wait_before_barrier = 0;
+}
+
+void
+vlib_workers_sync (void)
+{
+ if (PREDICT_FALSE (!vlib_num_workers ()))
+ return;
+
+ if (!(*vlib_worker_threads->wait_at_barrier) &&
+ !clib_atomic_swap_rel_n (&vlib_worker_threads->wait_before_barrier, 1))
+ {
+ u32 thread_index = vlib_get_thread_index ();
+ vlib_rpc_call_main_thread (vlib_worker_sync_rpc, (u8 *) &thread_index,
+ sizeof (thread_index));
+ }
+
+ /* Wait until main thread asks for barrier */
+ while (!(*vlib_worker_threads->wait_at_barrier))
+ ;
+
+ /* Stop before barrier and make sure all threads are either
+ * at worker barrier or the barrier before it */
+ clib_atomic_fetch_add (&vlib_worker_threads->workers_before_barrier, 1);
+ while (vlib_num_workers () > (*vlib_worker_threads->workers_at_barrier +
+ vlib_worker_threads->workers_before_barrier))
+ ;
+}
+
+void
+vlib_workers_continue (void)
+{
+ if (PREDICT_FALSE (!vlib_num_workers ()))
+ return;
+
+ clib_atomic_fetch_add (&vlib_worker_threads->done_work_before_barrier, 1);
+
+ /* Wait until all workers are done with work before barrier */
+ while (vlib_worker_threads->done_work_before_barrier <
+ vlib_worker_threads->workers_before_barrier)
+ ;
+
+ clib_atomic_fetch_add (&vlib_worker_threads->done_work_before_barrier, -1);
+ clib_atomic_fetch_add (&vlib_worker_threads->workers_before_barrier, -1);
+}
+
/**
* Wait until each of the workers has been once around the track
*/
diff --git a/src/vlib/threads.h b/src/vlib/threads.h
index e406dde5b07..b25d4764168 100644
--- a/src/vlib/threads.h
+++ b/src/vlib/threads.h
@@ -101,6 +101,9 @@ typedef struct
const char *barrier_caller;
const char *barrier_context;
volatile u32 *node_reforks_required;
+ volatile u32 wait_before_barrier;
+ volatile u32 workers_before_barrier;
+ volatile u32 done_work_before_barrier;
long lwp;
int cpu_id;
@@ -484,6 +487,17 @@ void vlib_rpc_call_main_thread (void *function, u8 * args, u32 size);
void vlib_get_thread_core_numa (vlib_worker_thread_t * w, unsigned cpu_id);
vlib_thread_main_t *vlib_get_thread_main_not_inline (void);
+/**
+ * Force workers sync from within worker
+ *
+ * Must be paired with @ref vlib_workers_continue
+ */
+void vlib_workers_sync (void);
+/**
+ * Release barrier after workers sync
+ */
+void vlib_workers_continue (void);
+
#endif /* included_vlib_threads_h */
/*
diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h
index 56e30e6533b..0cf6cd084fe 100644
--- a/src/vnet/session/session.h
+++ b/src/vnet/session/session.h
@@ -803,41 +803,6 @@ pool_program_safe_realloc (void *p, u32 thread_index,
uword_to_pointer (thread_index, void *));
}
-#define pool_realloc_all_at_barrier(_not) \
- (*vlib_worker_threads->workers_at_barrier >= (vlib_num_workers () - _not))
-
-always_inline void
-pool_realloc_wait_at_barrier (void)
-{
- session_main_t *sm = &session_main;
-
- /* Wait until main thread asks for barrier */
- while (!(*vlib_worker_threads->wait_at_barrier))
- ;
-
- /* Stop at realloc barrier and make sure all threads are either
- * at worker barrier or at pool realloc barrier */
- clib_atomic_fetch_add (&sm->pool_realloc_at_barrier, 1);
- while (!pool_realloc_all_at_barrier (sm->pool_realloc_at_barrier))
- ;
-
- /* Track all workers that are doing work */
- clib_atomic_fetch_add (&sm->pool_realloc_doing_work, 1);
-}
-
-always_inline void
-pool_realloc_done_wait_at_barrier (void)
-{
- session_main_t *sm = &session_main;
-
- /* Wait until all workers at pool realloc barrier have started reallocs */
- while (sm->pool_realloc_doing_work < sm->pool_realloc_at_barrier)
- ;
-
- clib_atomic_fetch_add (&sm->pool_realloc_doing_work, -1);
- clib_atomic_fetch_add (&sm->pool_realloc_at_barrier, -1);
-}
-
#define pool_needs_realloc(P) \
((!P) || \
(vec_len (pool_header (P)->free_indices) < POOL_REALLOC_SAFE_ELT_THRESH && \
@@ -857,10 +822,9 @@ pool_realloc_done_wait_at_barrier (void)
} \
else if (PREDICT_FALSE (!pool_free_elts (P))) \
{ \
- pool_program_safe_realloc (P, thread_index, rpc_fn); \
- pool_realloc_wait_at_barrier (); \
+ vlib_workers_sync (); \
pool_alloc_aligned (P, pool_max_len (P), align); \
- pool_realloc_done_wait_at_barrier (); \
+ vlib_workers_continue (); \
ALWAYS_ASSERT (pool_free_elts (P) > 0); \
} \
else \