aboutsummaryrefslogtreecommitdiffstats
path: root/src/vlib/trace_funcs.h
diff options
context:
space:
mode:
authorDave Barach <dave@barachs.net>2020-11-03 09:59:06 -0500
committerFlorin Coras <florin.coras@gmail.com>2020-11-04 17:39:10 +0000
commit27d978c9136e903244113a7ab57acea4b496898e (patch)
tree1520252a663db663b0634633149e5f83a9c23f53 /src/vlib/trace_funcs.h
parenta210433d534fe0039ddc2a9aa9840895aef0405d (diff)
vlib: add postmortem pcap dispatch trace
Inspired by a real-life conundrum: scenario X involves a vpp crash in ip4-load-balance because vnet_buffer(b)->ip.adj_index[VLIB_TX] is (still) set to ~0. The problem takes most of a day to occur, and we need to see the broken packet's graph trajectory, metadata, etc. to understand the problem. Fix a signed/unsigned ASSERT bug in vlib_get_trace_count(). Rename elog_post_mortem_dump() -> vlib_post_mortem_dump(), add dispatch trace post-mortem dump. Add FILTER_FLAG_POST_MORTEM so we can (putatively) capture a ludicrous number of buffer traces, without actually using more than one dispatch cycle's worth of memory. Type: improvement Signed-off-by: Dave Barach <dave@barachs.net> Change-Id: If093202ef071df46e290370bd9b33bf6560d30e6
Diffstat (limited to 'src/vlib/trace_funcs.h')
-rw-r--r--src/vlib/trace_funcs.h6
1 files changed, 2 insertions, 4 deletions
diff --git a/src/vlib/trace_funcs.h b/src/vlib/trace_funcs.h
index 98bdb7e0a06..fba55bfc3d3 100644
--- a/src/vlib/trace_funcs.h
+++ b/src/vlib/trace_funcs.h
@@ -194,15 +194,13 @@ vlib_get_trace_count (vlib_main_t * vm, vlib_node_runtime_t * rt)
{
vlib_trace_main_t *tm = &vm->trace_main;
vlib_trace_node_t *tn;
- int n;
if (rt->node_index >= vec_len (tm->nodes))
return 0;
tn = tm->nodes + rt->node_index;
- n = tn->limit - tn->count;
- ASSERT (n >= 0);
+ ASSERT (tn->count <= tn->limit);
- return n;
+ return tn->limit - tn->count;
}
always_inline void