aboutsummaryrefslogtreecommitdiffstats
path: root/src/vppinfra/string.h
AgeCommit message (Expand)AuthorFilesLines
2019-05-13Fix typoIgor Mikhailov (imichail)1-1/+1
2019-05-01Enable NEON instructions in memcpy_leLijian.Zhang1-1/+1
2019-03-28Avoid overwrite in clib_memcpy_le{32,64}Damjan Marion1-27/+29
2019-03-26ipsec: esp-encrypt reworkDamjan Marion1-0/+76
2019-01-18deprecate clib_memcpy64_x4Damjan Marion1-68/+0
2019-01-10strncpy_s_inline copies more bytes than necessarySteven1-2/+3
2018-12-05Improve strncpy_s src/dst overlap checkDave Barach1-2/+12
2018-12-02vppinfra: c11 safe string functionsSteven1-0/+713
2018-11-14Remove c-11 memcpy checks from perf-critical codeDave Barach1-7/+17
2018-10-23c11 safe string handling supportDave Barach1-3/+110
2018-10-10Integer underflow and out-of-bounds read (VPP-1442)Neale Ranns1-4/+4
2018-10-04clib_count_equal_*: don't read of the end of a small array and init data only...Neale Ranns1-8/+28
2018-09-13vppinfra: optmize clib_count_equal functionsDamjan Marion1-60/+136
2018-05-22vppinfra: add clib_count_equal_uXX and clib_memset_uXX functionsDamjan Marion1-0/+334
2017-12-14vppinfra: add AVX512 variant of clib_memcpyDamjan Marion1-2/+4
2017-12-08vppinfra: fix issues depending on compilerSergio Gonzalez Monroy1-34/+34
2017-11-22use intel intrinsics in clib_memcpy64_x4Damjan Marion1-47/+54
2017-11-13dpdk: introduce AVX512 variants of node functionsDamjan Marion1-0/+65
2016-12-28Reorganize source tree to use single autotools instanceDamjan Marion1-0/+83
span class="cm"> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * trace_funcs.h: VLIB trace buffer. * * Copyright (c) 2008 Eliot Dresselhaus * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef included_vlib_trace_funcs_h #define included_vlib_trace_funcs_h extern u8 *vnet_trace_placeholder; always_inline void vlib_validate_trace (vlib_trace_main_t * tm, vlib_buffer_t * b) { ASSERT (!pool_is_free_index (tm->trace_buffer_pool, vlib_buffer_get_trace_index (b))); } void vlib_add_handoff_trace (vlib_main_t * vm, vlib_buffer_t * b); always_inline void * vlib_add_trace_inline (vlib_main_t * vm, vlib_node_runtime_t * r, vlib_buffer_t * b, u32 n_data_bytes) { vlib_trace_main_t *tm = &vm->trace_main; vlib_trace_header_t *h; u32 n_data_words; ASSERT (vnet_trace_placeholder); if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_IS_TRACED) == 0)) return vnet_trace_placeholder; if (PREDICT_FALSE (tm->add_trace_callback != 0)) { return tm->add_trace_callback ((struct vlib_main_t *) vm, (struct vlib_node_runtime_t *) r, (struct vlib_buffer_t *) b, n_data_bytes); } else if (PREDICT_FALSE (tm->trace_enable == 0)) { ASSERT (vec_len (vnet_trace_placeholder) >= n_data_bytes + sizeof (*h)); return vnet_trace_placeholder; } /* Are we trying to trace a handoff case? */ if (PREDICT_FALSE (vlib_buffer_get_trace_thread (b) != vm->thread_index)) vlib_add_handoff_trace (vm, b); vlib_validate_trace (tm, b); n_data_bytes = round_pow2 (n_data_bytes, sizeof (h[0])); n_data_words = n_data_bytes / sizeof (h[0]); vec_add2_aligned (tm->trace_buffer_pool[vlib_buffer_get_trace_index (b)], h, 1 + n_data_words, sizeof (h[0])); h->time = vm->cpu_time_last_node_dispatch; h->n_data = n_data_words; h->node_index = r->node_index; return h->data; } /* Non-inline (typical use-case) version of the above */ void *vlib_add_trace (vlib_main_t * vm, vlib_node_runtime_t * r, vlib_buffer_t * b, u32 n_data_bytes); always_inline vlib_trace_header_t * vlib_trace_header_next (vlib_trace_header_t * h) { return h + 1 + h->n_data; } always_inline void vlib_free_trace (vlib_main_t * vm, vlib_buffer_t * b) { vlib_trace_main_t *tm = &vm->trace_main; u32 trace_index = vlib_buffer_get_trace_index (b); vlib_validate_trace (tm, b); _vec_len (tm->trace_buffer_pool[trace_index]) = 0; pool_put_index (tm->trace_buffer_pool, trace_index); } always_inline void vlib_trace_next_frame (vlib_main_t * vm, vlib_node_runtime_t * r, u32 next_index) { vlib_next_frame_t *nf; nf = vlib_node_runtime_get_next_frame (vm, r, next_index); nf->flags |= VLIB_FRAME_TRACE; } void trace_apply_filter (vlib_main_t * vm); int vnet_is_packet_traced (vlib_buffer_t * b, u32 classify_table_index, int func); /* Mark buffer as traced and allocate trace buffer. */ always_inline void vlib_trace_buffer (vlib_main_t * vm, vlib_node_runtime_t * r, u32 next_index, vlib_buffer_t * b, int follow_chain) { vlib_trace_main_t *tm = &vm->trace_main; vlib_trace_header_t **h; if (PREDICT_FALSE (tm->trace_enable == 0)) return; /* Classifier filter in use? */ if (PREDICT_FALSE (vlib_global_main.trace_filter.trace_filter_enable)) { /* See if we're supposed to trace this packet... */ if (vnet_is_packet_traced (b, vlib_global_main.trace_filter.trace_classify_table_index, 0 /* full classify */ ) != 1) return; } /* * Apply filter to existing traces to keep number of allocated traces low. * Performed each time around the main loop. */ if (tm->last_main_loop_count != vm->main_loop_count) { tm->last_main_loop_count = vm->main_loop_count; trace_apply_filter (vm); if (tm->trace_buffer_callback) (tm->trace_buffer_callback) ((struct vlib_main_t *) vm, (struct vlib_trace_main_t *) tm); } vlib_trace_next_frame (vm, r, next_index); pool_get (tm->trace_buffer_pool, h); do { b->flags |= VLIB_BUFFER_IS_TRACED; b->trace_handle = vlib_buffer_make_trace_handle (vm->thread_index, h - tm->trace_buffer_pool); } while (follow_chain && (b = vlib_get_next_buffer (vm, b))); } always_inline void vlib_buffer_copy_trace_flag (vlib_main_t * vm, vlib_buffer_t * b, u32 bi_target) { vlib_buffer_t *b_target = vlib_get_buffer (vm, bi_target); b_target->flags |= b->flags & VLIB_BUFFER_IS_TRACED; b_target->trace_handle = b->trace_handle; } always_inline u32 vlib_get_trace_count (vlib_main_t * vm, vlib_node_runtime_t * rt) { vlib_trace_main_t *tm = &vm->trace_main; vlib_trace_node_t *tn; if (rt->node_index >= vec_len (tm->nodes)) return 0; tn = tm->nodes + rt->node_index; ASSERT (tn->count <= tn->limit); return tn->limit - tn->count; } always_inline void vlib_set_trace_count (vlib_main_t * vm, vlib_node_runtime_t * rt, u32 count) { vlib_trace_main_t *tm = &vm->trace_main; vlib_trace_node_t *tn = vec_elt_at_index (tm->nodes, rt->node_index); ASSERT (count <= tn->limit); tn->count = tn->limit - count; } /* Helper function for nodes which only trace buffer data. */ void vlib_trace_frame_buffers_only (vlib_main_t * vm, vlib_node_runtime_t * node, u32 * buffers, uword n_buffers, uword next_buffer_stride, uword n_buffer_data_bytes_in_trace); #endif /* included_vlib_trace_funcs_h */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */