aboutsummaryrefslogtreecommitdiffstats
path: root/resources
AgeCommit message (Expand)AuthorFilesLines
2020-08-27Trending: Add DPDK tests to list of failed testsTibor Frank1-0/+72
2020-08-27Trending, Report: nat44-deterministc test names changeTibor Frank2-46/+46
2020-08-24T-Rex: 2.82, core pin, 8 workerspmikus4-4/+5
2020-08-24Framework: Qemu alignmentspmikus2-17/+28
2020-08-08Fix: NAT44 deterministic moved to separate det44 pluginJan Gelety4-93/+195
2020-08-20Fix: Use new-style interface variablesVratko Polak2-6/+6
2020-08-20Framework: use 'stl' in trex stateless profile namesJan Gelety1-3/+2
2020-08-20PAL: Add processing of throughput in GbpsTibor Frank2-98/+171
2020-08-19Add Intel E810CQ 100G NIC configurationYulong Pei2-2/+7
2020-08-07Perf: NAT44 endpoint-dependent mode - udp, part IJan Gelety12-235/+541
2020-08-18Framework: Qemu alignmentspmikus10-8/+180
2020-08-18Framework: Alignmentspmikus1-2/+2
2020-08-17Framework: Profiles loadpmikus4-140/+264
2020-08-17Measure latency only in special casesVratko Polak2-7/+15
2020-08-17Trending: Fixes after reviewTibor Frank1-26/+48
2020-08-13Trending: NFV TestsTibor Frank2-32/+301
2020-08-12Trending: NFV TestsTibor Frank1-0/+3
2020-08-06Limit user triggers: no ORVratko Polak1-0/+7
2020-08-05Report 2005: Add data, configure rls2005.32Tibor Frank3-2/+14
2020-08-04Trending: Set longer time periodTibor Frank1-21/+38
2020-08-04Trending: Implement the latest changes in job specsTibor Frank1-2199/+1018
2020-07-23T-Rex: Add advanced stateful modeJan Gelety5-178/+383
2020-07-31Framework: Perf stat capturingPeter Mikus4-10/+84
2020-07-30Introduce per DUT configurable statspmikus2-17/+68
2020-07-30Update RCA files with new findingsVratko Polak4-28/+37
2020-07-29Ansible: Nomad 0.12.0 upgradepmikus6-2/+13
2020-07-28Soak: Avoid a possible deadlock.Vratko Polak1-3/+5
2020-07-28vsap: install ab using ansibleXiaolong Jiang3-0/+28
2020-07-24NDRPDR: Allow smaller min_rate, 9001 ppsVratko Polak1-12/+16
2020-07-24Report 2005: Add dataTibor Frank1-0/+6
2020-07-23ansible nomad: install htopDave Wallace1-0/+1
2020-07-23FIX: Log required cli_cmd not "cli_inband"Jan Gelety1-2/+2
2020-07-23perpatch: Echo MAKE_PARALLEL_* var before buildJuraj Linkeš1-1/+15
2020-07-23Report 2005: Configure rls2005.31, add dataTibor Frank3-2/+5
2020-07-22Report 2005: Add dataTibor Frank1-0/+11
2020-07-21Report 2005: Add dataTibor Frank1-0/+6
2020-07-20Report 2005: Add dataTibor Frank1-0/+5
2020-07-19Report 2005: Add dataTibor Frank1-0/+83
2020-07-17Report 2005: Add dataTibor Frank1-0/+19
2020-07-16Remove remains of WRK testsTibor Frank24-1717/+0
2020-07-16FIX: CSIT annoyances I.pmikus1-2/+2
2020-07-16Report 2005: Add dataTibor Frank1-0/+4
2020-07-16Report 2005: Configure 2005.30, add dataTibor Frank3-138/+103
2020-07-15Report 2005: Add pdf versionTibor Frank1-2/+6
2020-07-14report: 2n-clx, 2n-skx, 3n-skx RCA edits in comparison tablesMaciek Konstantynowicz4-102/+92
2020-07-14Report 2005: Fix legend in comparison tablesTibor Frank1-54/+54
2020-07-14Report 2005: Add soak testsTibor Frank1-56/+42
2020-07-14Report 2005: Add NFV testsTibor Frank1-24/+31
2020-07-13Report: remove wrk testsTibor Frank5-1179/+1
2020-07-13Docs: Update TRex informationVratko Polak1-1/+1
pan>flags & VLIB_BUFFER_IS_TRACED) { t0 = vlib_add_trace (vm, node, b0, n_buffer_data_bytes_in_trace); clib_memcpy_fast (t0, b0->data + b0->current_data, n_buffer_data_bytes_in_trace); } from += 1; n_left -= 1; } } /* Free up all trace buffer memory. */ always_inline void clear_trace_buffer (void) { int i; vlib_trace_main_t *tm; /* *INDENT-OFF* */ foreach_vlib_main ( ({ tm = &this_vlib_main->trace_main; tm->trace_enable = 0; for (i = 0; i < vec_len (tm->trace_buffer_pool); i++) if (! pool_is_free_index (tm->trace_buffer_pool, i)) vec_free (tm->trace_buffer_pool[i]); pool_free (tm->trace_buffer_pool); })); /* *INDENT-ON* */ } u8 * format_vlib_trace (u8 * s, va_list * va) { vlib_main_t *vm = va_arg (*va, vlib_main_t *); vlib_trace_header_t *h = va_arg (*va, vlib_trace_header_t *); vlib_trace_header_t *e = vec_end (h); vlib_node_t *node, *prev_node; clib_time_t *ct = &vm->clib_time; f64 t; prev_node = 0; while (h < e) { node = vlib_get_node (vm, h->node_index); if (node != prev_node) { t = (h->time - vm->cpu_time_main_loop_start) * ct->seconds_per_clock; s = format (s, "\n%U: %v", format_time_interval, "h:m:s:u", t, node->name); } prev_node = node; if (node->format_trace) s = format (s, "\n %U", node->format_trace, vm, node, h->data); else s = format (s, "\n %U", node->format_buffer, h->data); h = vlib_trace_header_next (h); } return s; } /* Root of all trace cli commands. */ /* *INDENT-OFF* */ VLIB_CLI_COMMAND (trace_cli_command,static) = { .path = "trace", .short_help = "Packet tracer commands", }; /* *INDENT-ON* */ static int trace_cmp (void *a1, void *a2) { vlib_trace_header_t **t1 = a1; vlib_trace_header_t **t2 = a2; i64 dt = t1[0]->time - t2[0]->time; return dt < 0 ? -1 : (dt > 0 ? +1 : 0); } /* * Return 1 if this packet passes the trace filter, or 0 otherwise */ u32 filter_accept (vlib_trace_main_t * tm, vlib_trace_header_t * h) { vlib_trace_header_t *e = vec_end (h); if (tm->filter_flag == 0) return 1; if (tm->filter_flag == FILTER_FLAG_INCLUDE) { while (h < e) { if (h->node_index == tm->filter_node_index) return 1; h = vlib_trace_header_next (h); } return 0; } else /* FILTER_FLAG_EXCLUDE */ { while (h < e) { if (h->node_index == tm->filter_node_index) return 0; h = vlib_trace_header_next (h); } return 1; } return 0; } /* * Remove traces from the trace buffer pool that don't pass the filter */ void trace_apply_filter (vlib_main_t * vm) { vlib_trace_main_t *tm = &vm->trace_main; vlib_trace_header_t **h; vlib_trace_header_t ***traces_to_remove = 0; u32 index; u32 trace_index; u32 n_accepted; u32 accept; if (tm->filter_flag == FILTER_FLAG_NONE) return; /* * Ideally we would retain the first N traces that pass the filter instead * of any N traces. */ n_accepted = 0; /* *INDENT-OFF* */ pool_foreach (h, tm->trace_buffer_pool, ({ accept = filter_accept(tm, h[0]); if ((n_accepted == tm->filter_count) || !accept) vec_add1 (traces_to_remove, h); else n_accepted++; })); /* *INDENT-ON* */ /* remove all traces that we don't want to keep */ for (index = 0; index < vec_len (traces_to_remove); index++) { trace_index = traces_to_remove[index] - tm->trace_buffer_pool; _vec_len (tm->trace_buffer_pool[trace_index]) = 0; pool_put_index (tm->trace_buffer_pool, trace_index); } vec_free (traces_to_remove); } static clib_error_t * cli_show_trace_buffer (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { vlib_trace_main_t *tm; vlib_trace_header_t **h, **traces; u32 i, index = 0; char *fmt; u8 *s = 0; u32 max; /* * By default display only this many traces. To display more, explicitly * specify a max. This prevents unexpectedly huge outputs. */ max = 50; while (unformat_check_input (input) != (uword) UNFORMAT_END_OF_INPUT) { if (unformat (input, "max %d", &max)) ; else return clib_error_create ("expected 'max COUNT', got `%U'", format_unformat_error, input); } /* Get active traces from pool. */ /* *INDENT-OFF* */ foreach_vlib_main ( ({ fmt = "------------------- Start of thread %d %s -------------------\n"; s = format (s, fmt, index, vlib_worker_threads[index].name); tm = &this_vlib_main->trace_main; trace_apply_filter(this_vlib_main); traces = 0; pool_foreach (h, tm->trace_buffer_pool, ({ vec_add1 (traces, h[0]); })); if (vec_len (traces) == 0) { s = format (s, "No packets in trace buffer\n"); goto done; } /* Sort them by increasing time. */ vec_sort_with_function (traces, trace_cmp); for (i = 0; i < vec_len (traces); i++) { if (i == max) { vlib_cli_output (vm, "Limiting display to %d packets." " To display more specify max.", max); goto done; } s = format (s, "Packet %d\n%U\n\n", i + 1, format_vlib_trace, vm, traces[i]); } done: vec_free (traces); index++; })); /* *INDENT-ON* */ vlib_cli_output (vm, "%v", s); vec_free (s); return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (show_trace_cli,static) = { .path = "show trace", .short_help = "Show trace buffer [max COUNT]", .function = cli_show_trace_buffer, }; /* *INDENT-ON* */ int vlib_enable_disable_pkt_trace_filter (int enable) __attribute__ ((weak)); int vlib_enable_disable_pkt_trace_filter (int enable) { return 0; } static clib_error_t * cli_add_trace_buffer (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { unformat_input_t _line_input, *line_input = &_line_input; vlib_trace_main_t *tm; vlib_node_t *node; vlib_trace_node_t *tn; u32 node_index, add; u8 verbose = 0; int filter = 0; clib_error_t *error = 0; if (!unformat_user (input, unformat_line_input, line_input)) return 0; if (vnet_trace_dummy == 0) vec_validate_aligned (vnet_trace_dummy, 2048, CLIB_CACHE_LINE_BYTES); while (unformat_check_input (line_input) != (uword) UNFORMAT_END_OF_INPUT) { if (unformat (line_input, "%U %d", unformat_vlib_node, vm, &node_index, &add)) ; else if (unformat (line_input, "verbose")) verbose = 1; else if (unformat (line_input, "filter")) filter = 1; else { error = clib_error_create ("expected NODE COUNT, got `%U'", format_unformat_error, line_input); goto done; } } node = vlib_get_node (vm, node_index); if ((node->flags & VLIB_NODE_FLAG_TRACE_SUPPORTED) == 0) { error = clib_error_create ("node '%U' doesn't support per-node " "tracing. There may be another way to " "initiate trace on this node.", format_vlib_node_name, vm, node_index); goto done; } if (filter) { if (vlib_enable_disable_pkt_trace_filter (1 /* enable */ )) { error = clib_error_create ("No packet trace filter configured..."); goto done; } } /* *INDENT-OFF* */ foreach_vlib_main (( { tm = &this_vlib_main->trace_main; tm->verbose = verbose; vec_validate (tm->nodes, node_index); tn = tm->nodes + node_index; tn->limit += add; tm->trace_enable = 1; })); /* *INDENT-ON* */ done: unformat_free (line_input); return error; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (add_trace_cli,static) = { .path = "trace add", .short_help = "Trace given number of packets", .function = cli_add_trace_buffer, }; /* *INDENT-ON* */ /* * Configure a filter for packet traces. * * This supplements the packet trace feature so that only packets matching * the filter are included in the trace. Currently the only filter is to * keep packets that include a certain node in the trace or exclude a certain * node in the trace. * * The count of traced packets in the "trace add" command is still used to * create a certain number of traces. The "trace filter" command specifies * how many of those packets should be retained in the trace. * * For example, 1Mpps of traffic is arriving and one of those packets is being * dropped. To capture the trace for only that dropped packet, you can do: * trace filter include error-drop 1 * trace add dpdk-input 1000000 * <wait one second> * show trace * * Note that the filter could be implemented by capturing all traces and just * reducing traces displayed by the "show trace" function. But that would * require a lot of memory for storing the traces, making that infeasible. * * To remove traces from the trace pool that do not include a certain node * requires that the trace be "complete" before applying the filter. To * accomplish this, the trace pool is filtered upon each iteraction of the * main vlib loop. Doing so keeps the number of allocated traces down to a * reasonably low number. This requires that tracing for a buffer is not * performed after the vlib main loop interation completes. i.e. you can't * save away a buffer temporarily then inject it back into the graph and * expect that the trace_index is still valid (such as a traffic manager might * do). A new trace buffer should be allocated for those types of packets. * * The filter can be extended to support multiple nodes and other match * criteria (e.g. input sw_if_index, mac address) but for now just checks if * a specified node is in the trace or not in the trace. */ static clib_error_t * cli_filter_trace (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { vlib_trace_main_t *tm = &vm->trace_main; u32 filter_node_index; u32 filter_flag; u32 filter_count; if (unformat (input, "include %U %d", unformat_vlib_node, vm, &filter_node_index, &filter_count)) { filter_flag = FILTER_FLAG_INCLUDE; } else if (unformat (input, "exclude %U %d", unformat_vlib_node, vm, &filter_node_index, &filter_count)) { filter_flag = FILTER_FLAG_EXCLUDE; } else if (unformat (input, "none")) { filter_flag = FILTER_FLAG_NONE; filter_node_index = 0; filter_count = 0; } else return clib_error_create ("expected 'include NODE COUNT' or 'exclude NODE COUNT' or 'none', got `%U'", format_unformat_error, input); /* *INDENT-OFF* */ foreach_vlib_main ( ({ tm = &this_vlib_main->trace_main; tm->filter_node_index = filter_node_index; tm->filter_flag = filter_flag; tm->filter_count = filter_count; /* * Clear the trace limits to stop any in-progress tracing * Prevents runaway trace allocations when the filter changes * (or is removed) */ vec_free (tm->nodes); })); /* *INDENT-ON* */ return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (filter_trace_cli,static) = { .path = "trace filter", .short_help = "filter trace output - include NODE COUNT | exclude NODE COUNT | none", .function = cli_filter_trace, }; /* *INDENT-ON* */ static clib_error_t * cli_clear_trace_buffer (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { vlib_enable_disable_pkt_trace_filter (0 /* enable */ ); clear_trace_buffer (); return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (clear_trace_cli,static) = { .path = "clear trace", .short_help = "Clear trace buffer and free memory", .function = cli_clear_trace_buffer, }; /* *INDENT-ON* */ /* Dummy function to get us linked in. */ void vlib_trace_cli_reference (void) { } int vnet_is_packet_traced (vlib_buffer_t * b, u32 classify_table_index, int func) __attribute__ ((weak)); int vnet_is_packet_traced (vlib_buffer_t * b, u32 classify_table_index, int func) { clib_warning ("BUG: STUB called"); return 1; } /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */