aboutsummaryrefslogtreecommitdiffstats
path: root/src/vlib/main.h
diff options
context:
space:
mode:
authorDave Barach <dave@barachs.net>2019-02-19 17:05:30 -0500
committerDamjan Marion <dmarion@me.com>2019-02-20 16:27:47 +0000
commita4324a996f34902579338033110d27575a654c8e (patch)
treecdc4a58a8d055053cf8ef90b5ddf7d0fcd1a5af2 /src/vlib/main.h
parent51a423dc2b0d4aad0bbd8f7e757d2ca49221ff83 (diff)
calculate per-thread time offset
The main thread squirrels away vlib_time_now (&vlib_global_main), worker threads use it to calculate an offset in f64 seconds from their own vlib_time_now(vm) value. We use that offset until the next barrier sync. Thanks to Damjan for the suggestion. Change-Id: If56cdfe68e5ad8ac3b0d0fc885dc3ba556cd1215 Signed-off-by: Dave Barach <dave@barachs.net>
Diffstat (limited to 'src/vlib/main.h')
-rw-r--r--src/vlib/main.h5
1 files changed, 4 insertions, 1 deletions
diff --git a/src/vlib/main.h b/src/vlib/main.h
index f89ecd3299f..4192c3f2720 100644
--- a/src/vlib/main.h
+++ b/src/vlib/main.h
@@ -63,6 +63,9 @@ typedef struct vlib_main_t
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
/* Instruction level timing state. */
clib_time_t clib_time;
+ /* Offset from main thread time */
+ f64 time_offset;
+ f64 time_last_barrier_release;
/* Time stamp of last node dispatch. */
u64 cpu_time_last_node_dispatch;
@@ -232,7 +235,7 @@ void vlib_worker_loop (vlib_main_t * vm);
always_inline f64
vlib_time_now (vlib_main_t * vm)
{
- return clib_time_now (&vm->clib_time);
+ return clib_time_now (&vm->clib_time) + vm->time_offset;
}
always_inline f64