diff options
author | Bud Grise <griseb@cisco.com> | 2016-02-19 12:10:33 -0500 |
---|---|---|
committer | Gerrit Code Review <gerrit@fd.io> | 2016-02-26 21:01:17 +0000 |
commit | d56a6f59e99b7adec6742cd25f0d84bacf594537 (patch) | |
tree | 868a86b3fd2554ed11744a457e18610de4ddcfd8 | |
parent | 060c6fc0b54c909db56856b4d3e3fa65265f5871 (diff) |
Add packet tracing hint
This avoids checking the buffer flags bit if tracing is not enabled.
Change-Id: I32e1a90b5fd10318254c611344488bc2a441c71e
Signed-off-by: Todd Foggoa (tfoggoa) <tfoggoa@cisco.com>
-rw-r--r-- | vlib/vlib/trace.c | 4 | ||||
-rw-r--r-- | vlib/vlib/trace.h | 3 | ||||
-rw-r--r-- | vnet/vnet/devices/dpdk/node.c | 60 |
3 files changed, 40 insertions, 27 deletions
diff --git a/vlib/vlib/trace.c b/vlib/vlib/trace.c index b3008644e5d..4a611ee97b1 100644 --- a/vlib/vlib/trace.c +++ b/vlib/vlib/trace.c @@ -121,6 +121,8 @@ clear_trace_buffer (void) tm = &this_vlib_main->trace_main; mainheap = clib_mem_set_heap (this_vlib_main->heap_base); + tm->trace_active_hint = 0; + for (i = 0; i < vec_len (tm->trace_buffer_pool); i++) if (! pool_is_free_index (tm->trace_buffer_pool, i)) vec_free (tm->trace_buffer_pool[i]); @@ -369,6 +371,8 @@ cli_add_trace_buffer (vlib_main_t * vm, void *oldheap; tm = &this_vlib_main->trace_main; + tm->trace_active_hint = 1; + oldheap = clib_mem_set_heap (this_vlib_main->heap_base); vec_validate (tm->nodes, node_index); diff --git a/vlib/vlib/trace.h b/vlib/vlib/trace.h index 50e51638fdb..fe5d7377861 100644 --- a/vlib/vlib/trace.h +++ b/vlib/vlib/trace.h @@ -76,6 +76,9 @@ typedef struct { #define FILTER_FLAG_EXCLUDE 2 u32 filter_count; + /* set on trace add, cleared on clear trace */ + u32 trace_active_hint; + /* Per node trace counts. */ vlib_trace_node_t * nodes; } vlib_trace_main_t; diff --git a/vnet/vnet/devices/dpdk/node.c b/vnet/vnet/devices/dpdk/node.c index 575f7f4e972..b9815706169 100644 --- a/vnet/vnet/devices/dpdk/node.c +++ b/vnet/vnet/devices/dpdk/node.c @@ -154,25 +154,28 @@ handoff_dispatch_node_fn (vlib_main_t * vm, next0 = vnet_buffer(b0)->io_handoff.next_index; next1 = vnet_buffer(b1)->io_handoff.next_index; - if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + if (PREDICT_FALSE(vm->trace_main.trace_active_hint)) { - vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0); - handoff_dispatch_trace_t *t = - vlib_add_trace (vm, node, b0, sizeof (*t)); - sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX]; - t->sw_if_index = sw_if_index0; - t->next_index = next0; - t->buffer_index = bi0; - } - if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) - { - vlib_trace_buffer (vm, node, next1, b1, /* follow_chain */ 0); - handoff_dispatch_trace_t *t = - vlib_add_trace (vm, node, b1, sizeof (*t)); - sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX]; - t->sw_if_index = sw_if_index1; - t->next_index = next1; - t->buffer_index = bi1; + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0); + handoff_dispatch_trace_t *t = + vlib_add_trace (vm, node, b0, sizeof (*t)); + sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX]; + t->sw_if_index = sw_if_index0; + t->next_index = next0; + t->buffer_index = bi0; + } + if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) + { + vlib_trace_buffer (vm, node, next1, b1, /* follow_chain */ 0); + handoff_dispatch_trace_t *t = + vlib_add_trace (vm, node, b1, sizeof (*t)); + sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX]; + t->sw_if_index = sw_if_index1; + t->next_index = next1; + t->buffer_index = bi1; + } } /* verify speculative enqueues, maybe switch current next frame */ @@ -200,16 +203,19 @@ handoff_dispatch_node_fn (vlib_main_t * vm, next0 = vnet_buffer(b0)->io_handoff.next_index; - if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + if (PREDICT_FALSE(vm->trace_main.trace_active_hint)) { - vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0); - handoff_dispatch_trace_t *t = - vlib_add_trace (vm, node, b0, sizeof (*t)); - sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX]; - t->sw_if_index = sw_if_index0; - t->next_index = next0; - t->buffer_index = bi0; - } + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0); + handoff_dispatch_trace_t *t = + vlib_add_trace (vm, node, b0, sizeof (*t)); + sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX]; + t->sw_if_index = sw_if_index0; + t->next_index = next0; + t->buffer_index = bi0; + } + } /* verify speculative enqueue, maybe switch current next frame */ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, |