summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/vlib/trace_funcs.h19
1 files changed, 15 insertions, 4 deletions
diff --git a/src/vlib/trace_funcs.h b/src/vlib/trace_funcs.h
index 357079c4e3d..d9d8d7933ee 100644
--- a/src/vlib/trace_funcs.h
+++ b/src/vlib/trace_funcs.h
@@ -58,7 +58,7 @@ vlib_add_trace_inline (vlib_main_t * vm,
{
vlib_trace_main_t *tm = &vm->trace_main;
vlib_trace_header_t *h;
- u32 n_data_words;
+ u32 n_data_words, trace_index;
ASSERT (vnet_trace_placeholder);
@@ -83,12 +83,23 @@ vlib_add_trace_inline (vlib_main_t * vm,
if (PREDICT_FALSE (!vlib_add_handoff_trace (vm, b)))
return vnet_trace_placeholder;
- vlib_validate_trace (tm, b);
+ /*
+ * there is a small chance of a race condition with 'clear trace' here: if a
+ * buffer was set to be traced before the 'clear trace' and is still going
+ * through the graph after the 'clear trace', its trace_index is staled as
+ * the pool was destroyed.
+ * The pool may have been re-allocated because of a new traced buffer, and
+ * the trace_index might be valid by pure (bad) luck. In that case the trace
+ * will be a mix of both buffer traces, but this should be acceptable.
+ */
+ trace_index = vlib_buffer_get_trace_index (b);
+ if (PREDICT_FALSE (pool_is_free_index (tm->trace_buffer_pool, trace_index)))
+ return vnet_trace_placeholder;
n_data_bytes = round_pow2 (n_data_bytes, sizeof (h[0]));
n_data_words = n_data_bytes / sizeof (h[0]);
- vec_add2_aligned (tm->trace_buffer_pool[vlib_buffer_get_trace_index (b)], h,
- 1 + n_data_words, sizeof (h[0]));
+ vec_add2_aligned (tm->trace_buffer_pool[trace_index], h, 1 + n_data_words,
+ sizeof (h[0]));
h->time = vm->cpu_time_last_node_dispatch;
h->n_data = n_data_words;