From 0a67b48f1d1065d250191657810bd5cd0b718c39 Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Mon, 8 Jun 2020 11:17:19 -0400 Subject: vlib: stop inlining vlib_add_trace(...) Packet tracing performance doesn't justify inlining vlib_add_trace(...) over 500 times. It makes a 15% text-segment size difference in a representative use-case: Inline: $ size .../vnet_skx.dir/ipsec/ipsec_input.c.o text data bss dec hex filename 6831 80 0 6911 1aff .../vnet_skx.dir/ipsec/ipsec_input.c.o Not inline: $ size .../vnet_skx.dir/ipsec/ipsec_input.c.o text data bss dec hex filename 5776 80 0 5856 16e0 .../vnet_skx.dir/ipsec/ipsec_input.c.o Retain the original code as vlib_add_trace_inline, instantiate once as vlib_add_trace. Type: refactor Signed-off-by: Dave Barach Change-Id: Iaf431dbf00c4aad03663d86f9dd1322e84d03962 --- src/vlib/trace_funcs.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'src/vlib/trace_funcs.h') diff --git a/src/vlib/trace_funcs.h b/src/vlib/trace_funcs.h index 4261f675aec..b25237bd7e8 100644 --- a/src/vlib/trace_funcs.h +++ b/src/vlib/trace_funcs.h @@ -52,8 +52,9 @@ vlib_validate_trace (vlib_trace_main_t * tm, vlib_buffer_t * b) void vlib_add_handoff_trace (vlib_main_t * vm, vlib_buffer_t * b); always_inline void * -vlib_add_trace (vlib_main_t * vm, - vlib_node_runtime_t * r, vlib_buffer_t * b, u32 n_data_bytes) +vlib_add_trace_inline (vlib_main_t * vm, + vlib_node_runtime_t * r, vlib_buffer_t * b, + u32 n_data_bytes) { vlib_trace_main_t *tm = &vm->trace_main; vlib_trace_header_t *h; @@ -95,6 +96,11 @@ vlib_add_trace (vlib_main_t * vm, return h->data; } +/* Non-inline (typical use-case) version of the above */ +void *vlib_add_trace (vlib_main_t * vm, + vlib_node_runtime_t * r, vlib_buffer_t * b, + u32 n_data_bytes); + always_inline vlib_trace_header_t * vlib_trace_header_next (vlib_trace_header_t * h) { -- cgit 1.2.3-korg