aboutsummaryrefslogtreecommitdiffstats
path: root/src/vlib/trace.c
diff options
context:
space:
mode:
authorDave Barach <dave@barachs.net>2020-11-03 09:59:06 -0500
committerFlorin Coras <florin.coras@gmail.com>2020-11-04 17:39:10 +0000
commit27d978c9136e903244113a7ab57acea4b496898e (patch)
tree1520252a663db663b0634633149e5f83a9c23f53 /src/vlib/trace.c
parenta210433d534fe0039ddc2a9aa9840895aef0405d (diff)
vlib: add postmortem pcap dispatch trace
Inspired by a real-life conundrum: scenario X involves a vpp crash in ip4-load-balance because vnet_buffer(b)->ip.adj_index[VLIB_TX] is (still) set to ~0. The problem takes most of a day to occur, and we need to see the broken packet's graph trajectory, metadata, etc. to understand the problem. Fix a signed/unsigned ASSERT bug in vlib_get_trace_count(). Rename elog_post_mortem_dump() -> vlib_post_mortem_dump(), add dispatch trace post-mortem dump. Add FILTER_FLAG_POST_MORTEM so we can (putatively) capture a ludicrous number of buffer traces, without actually using more than one dispatch cycle's worth of memory. Type: improvement Signed-off-by: Dave Barach <dave@barachs.net> Change-Id: If093202ef071df46e290370bd9b33bf6560d30e6
Diffstat (limited to 'src/vlib/trace.c')
-rw-r--r--src/vlib/trace.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/src/vlib/trace.c b/src/vlib/trace.c
index abd116622c7..f90f275fa87 100644
--- a/src/vlib/trace.c
+++ b/src/vlib/trace.c
@@ -201,6 +201,15 @@ filter_accept (vlib_trace_main_t * tm, vlib_trace_header_t * h)
if (tm->filter_flag == 0)
return 1;
+ /*
+ * When capturing a post-mortem dispatch trace,
+ * toss all existing traces once per dispatch cycle.
+ * So we can trace 4 billion pkts without running out of
+ * memory...
+ */
+ if (tm->filter_flag == FILTER_FLAG_POST_MORTEM)
+ return 0;
+
if (tm->filter_flag == FILTER_FLAG_INCLUDE)
{
while (h < e)