diff options
author | Benoît Ganne <bganne@cisco.com> | 2020-10-02 19:36:57 +0200 |
---|---|---|
committer | Dave Barach <openvpp@barachs.net> | 2020-11-09 11:51:34 +0000 |
commit | 9a3973e3a36bfd4dd8dbffe130a92649fc1b73d3 (patch) | |
tree | 1ed9e9c7a3b13edd68f7e78d66dbb995cbe79a2a /src/vlib/trace_funcs.h | |
parent | f6b02e0d0bfd7e0f1d79e8ee426f48ca37ae5ff3 (diff) |
vlib: fix trace number accounting
When using classifier to filter traces, not all packets will be traced.
In that case, we should only count traced packets.
Type: fix
Change-Id: I87d1e217b580ebff8c6ade7860eb43950420ae78
Signed-off-by: Benoît Ganne <bganne@cisco.com>
Diffstat (limited to 'src/vlib/trace_funcs.h')
-rw-r--r-- | src/vlib/trace_funcs.h | 20 |
1 files changed, 14 insertions, 6 deletions
diff --git a/src/vlib/trace_funcs.h b/src/vlib/trace_funcs.h index fba55bfc3d3..8dd5310ae76 100644 --- a/src/vlib/trace_funcs.h +++ b/src/vlib/trace_funcs.h @@ -49,7 +49,7 @@ vlib_validate_trace (vlib_trace_main_t * tm, vlib_buffer_t * b) vlib_buffer_get_trace_index (b))); } -void vlib_add_handoff_trace (vlib_main_t * vm, vlib_buffer_t * b); +int vlib_add_handoff_trace (vlib_main_t * vm, vlib_buffer_t * b); always_inline void * vlib_add_trace_inline (vlib_main_t * vm, @@ -80,7 +80,8 @@ vlib_add_trace_inline (vlib_main_t * vm, /* Are we trying to trace a handoff case? */ if (PREDICT_FALSE (vlib_buffer_get_trace_thread (b) != vm->thread_index)) - vlib_add_handoff_trace (vm, b); + if (PREDICT_FALSE (!vlib_add_handoff_trace (vm, b))) + return vnet_trace_placeholder; vlib_validate_trace (tm, b); @@ -131,8 +132,13 @@ int vnet_is_packet_traced (vlib_buffer_t * b, u32 classify_table_index, int func); -/* Mark buffer as traced and allocate trace buffer. */ -always_inline void +/* + * Mark buffer as traced and allocate trace buffer. + * return 1 if the buffer is successfully traced, 0 if not + * A buffer might not be traced if tracing is off or if the packet did not + * match the filter. + */ +always_inline __clib_warn_unused_result int vlib_trace_buffer (vlib_main_t * vm, vlib_node_runtime_t * r, u32 next_index, vlib_buffer_t * b, int follow_chain) @@ -141,7 +147,7 @@ vlib_trace_buffer (vlib_main_t * vm, vlib_trace_header_t **h; if (PREDICT_FALSE (tm->trace_enable == 0)) - return; + return 0; /* Classifier filter in use? */ if (PREDICT_FALSE (vlib_global_main.trace_filter.trace_filter_enable)) @@ -150,7 +156,7 @@ vlib_trace_buffer (vlib_main_t * vm, if (vnet_is_packet_traced (b, vlib_global_main.trace_filter.trace_classify_table_index, 0 /* full classify */ ) != 1) - return; + return 0; } /* @@ -178,6 +184,8 @@ vlib_trace_buffer (vlib_main_t * vm, (vm->thread_index, h - tm->trace_buffer_pool); } while (follow_chain && (b = vlib_get_next_buffer (vm, b))); + + return 1; } always_inline void |