/* * Copyright (c) 2015 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * interface_output.c: interface output node * * Copyright (c) 2008 Eliot Dresselhaus * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include typedef struct { u32 sw_if_index; u32 flags; u8 data[128 - 2 * sizeof (u32)]; } interface_output_trace_t; #ifndef CLIB_MARCH_VARIANT u8 * format_vnet_interface_output_trace (u8 * s, va_list * va) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *); vlib_node_t *node = va_arg (*va, vlib_node_t *); interface_output_trace_t *t = va_arg (*va, interface_output_trace_t *); vnet_main_t *vnm = vnet_get_main (); vnet_sw_interface_t *si; u32 indent; if (t->sw_if_index != (u32) ~ 0) { indent = format_get_indent (s); if (pool_is_free_index (vnm->interface_main.sw_interfaces, t->sw_if_index)) { /* the interface may have been deleted by the time the trace is printed */ s = format (s, "sw_if_index: %d ", t->sw_if_index); } else { si = vnet_get_sw_interface (vnm, t->sw_if_index); s = format (s, "%U ", format_vnet_sw_interface_name, vnm, si, t->flags); } s = format (s, "\n%U%U", format_white_space, indent, node->format_buffer ? node->format_buffer : format_hex_bytes, t->data, sizeof (t->data)); } return s; } #endif /* CLIB_MARCH_VARIANT */ static void vnet_interface_output_trace (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, uword n_buffers) { u32 n_left, *from; n_left = n_buffers; from = vlib_frame_vector_args (frame); while (n_left >= 4) { u32 bi0, bi1; vlib_buffer_t *b0, *b1; interface_output_trace_t *t0, *t1; /* Prefetch next iteration. */ vlib_prefetch_buffer_with_index (vm, from[2], LOAD); vlib_prefetch_buffer_with_index (vm, from[3], LOAD); bi0 = from[0]; bi1 = from[1]; b0 = vlib_get_buffer (vm, bi0); b1 = vlib_get_buffer (vm, bi1); if (b0->flags & VLIB_BUFFER_IS_TRACED) { t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0])); t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX]; t0->flags = b0->flags; clib_memcpy_fast (t0->data, vlib_buffer_get_current (b0), sizeof (t0->data)); } if (b1->flags & VLIB_BUFFER_IS_TRACED) { t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0])); t1->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_TX]; t1->flags = b1->flags; clib_memcpy_fast (t1->data, vlib_buffer_get_current (b1), sizeof (t1->data)); } from += 2; n_left -= 2; } while (n_left >= 1) { u32 bi0; vlib_buffer_t *b0; interface_output_trace_t *t0; bi0 = from[0]; b0 = vlib_get_buffer (vm, bi0); if (b0->flags & VLIB_BUFFER_IS_TRACED) { t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0])); t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX]; t0->flags = b0->flags; clib_memcpy_fast (t0->data, vlib_buffer_get_current (b0), sizeof (t0->data)); } from += 1; n_left -= 1; } } static_always_inline void vnet_interface_output_handle_offload (vlib_main_t *vm, vlib_buffer_t *b) { vnet_calc_checksums_inline (vm, b, b->flags & VNET_BUFFER_F_IS_IP4, b->flags & VNET_BUFFER_F_IS_IP6); } static_always_inline uword vnet_interface_output_node_inline (vlib_main_t *vm, u32 sw_if_index, vlib_combined_counter_main_t *ccm, vlib_buffer_t **b, u32 config_index, u8 arc, u32 n_left, int do_tx_offloads, int arc_or_subif) { u32 n_bytes = 0; u32 n_bytes0, n_bytes1, n_bytes2, n_bytes3; u32 ti = vm->thread_index; while (n_left >= 8) { u32 or_flags; /* Prefetch next iteration. */ vlib_prefetch_buffer_header (b[4], LOAD); vlib_prefetch_buffer_header (b[5], LOAD); vlib_prefetch_buffer_header (b[6], LOAD); vlib_prefetch_buffer_header (b[7], LOAD); if (do_tx_offloads) or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags; /* Be grumpy about zero length buffers for benefit of driver tx function. */ ASSERT (b[0]->current_length > 0); ASSERT (b[1]->current_length > 0); ASSERT (b[2]->current_length > 0); ASSERT (b[3]->current_length > 0); n_bytes += n_bytes0 = vlib_buffer_length_in_chain (vm, b[0]); n_bytes += n_bytes1 = vlib_buffer_length_in_chain (vm, b[1]); n_bytes += n_bytes2 = vlib_buffer_length_in_chain (vm, b[2]); n_bytes += n_bytes3 = vlib_buffer_length_in_chain (vm, b[3]); if (arc_or_subif) { u32 tx_swif0, tx_swif1, tx_swif2, tx_swif3; tx_swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX]; tx_swif1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX]; tx_swif2 = vnet_buffer (b[2])->sw_if_index[VLIB_TX]; tx_swif3 = vnet_buffer (b[3])->sw_if_index[VLIB_TX]; /* update vlan subif tx counts, if required */ if (PREDICT_FALSE (tx_swif0 != sw_if_index)) vlib_increment_combined_counter (ccm, ti, tx_swif0, 1, n_bytes0); if (PREDICT_FALSE (tx_swif1 != sw_if_index)) vlib_increment_combined_counter (ccm, ti, tx_swif1, 1, n_bytes1); if (PREDICT_FALSE (tx_swif2 != sw_if_index)) vlib_increment_combined_counter (ccm, ti, tx_swif2, 1, n_bytes2); if (PREDICT_FALSE (tx_swif3 != sw_if_index)) vlib_increment_combined_counter (ccm, ti, tx_swif3, 1, n_bytes3); if (PREDICT_FALSE (config_index != ~0)) { vnet_buffer (b[0])->feature_arc_index = arc; b[0]->current_config_index = config_index; vnet_buffer (b[1])->feature_arc_index = arc; b[1]->current_config_index = config_index; vnet_buffer (b[2])->feature_arc_index = arc; b[2]->current_config_index = config_index; vnet_buffer (b[3])->feature_arc_index = arc; b[3]->current_config_index = config_index; } } if (do_tx_offloads && (or_flags & VNET_BUFFER_F_OFFLOAD)) { vnet_interface_output_handle_offload (vm, b[0]); vnet_interface_output_handle_offload (vm, b[1]); vnet_interface_output_handle_offload (vm, b[2]); vnet_interface_output_handle_offload (vm, b[3]); } n_left -= 4; b += 4; } while (n_left) { /* Be grumpy about zero length buffers for benefit of driver tx function. */ ASSERT (b[0]->current_length > 0); n_bytes += n_bytes0 = vlib_buffer_length_in_chain (vm, b[0]); if (arc_or_subif) { u32 tx_swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX]; if (PREDICT_FALSE (config_index != ~0)) { vnet_buffer (b[0])->feature_arc_index = arc; b[0]->current_config_index = config_index; } if (PREDICT_FALSE (tx_swif0 != sw_if_index)) vlib_increment_combined_counter (ccm, ti, tx_swif0, 1, n_bytes0); } if (do_tx_offloads) vnet_interface_output_handle_offload (vm, b[0]); n_left -= 1; b += 1; } return n_bytes; } static_always_inline void vnet_interface_pcap_tx_trace (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, int sw_if_index_from_buffer) { vnet_main_t *vnm = vnet_get_main (); u32 n_left_from, *from; u32 sw_if_index; vnet_pcap_t *pp = &vnm->pcap; if (PREDICT_TRUE (pp->pcap_tx_enable == 0)) return; if (sw_if_index_from_buffer == 0) { vnet_interface_output_runtime_t *rt = (void *) node->runtime_data; sw_if_index = rt->sw_if_index; } else sw_if_index = ~0; n_left_from = frame->n_vectors; from = vlib_frame_vector_args (frame); while (n_left_from > 0) { u32 bi0 = from[0]; vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0); from++; n_left_from--; if (sw_if_index_from_buffer) sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX]; if (vnet_is_packet_pcaped (pp, b0, sw_if_index)) pcap_add_buffer (&pp->pcap_main, vm, bi0, pp->max_bytes_per_pkt); } } static_always_inline void store_tx_frame_scalar_data (vnet_hw_if_output_node_runtime_t *r, vnet_hw_if_tx_frame_t *tf) { if (r) clib_memcpy_fast (tf, &r->frame, sizeof (vnet_hw_if_tx_frame_t)); } static_always_inline void enqueu_to_tx_node (vlib_main_t *vm, vlib_node_runtime_t *node, vnet_hw_interface_t *hi, u32 *from, u32 n_vectors) { u32 next_index = VNET_INTERFACE_OUTPUT_NEXT_TX; vnet_hw_if_output_node_runtime_t *r = 0; u32 n_free, n_copy, *to; vnet_hw_if_tx_frame_t *tf; vlib_frame_t *f; ASSERT (n_vectors <= VLIB_FRAME_SIZE); if (hi->output_node_thread_runtimes) r = vec_elt_at_index (hi->output_node_thread_runtimes, vm->thread_index); f = vlib_get_next_frame_internal (vm, node, next_index, 0); tf = vlib_frame_scalar_args (f); if (f->n_vectors > 0 && (r == 0 || tf->queue_id == r->frame.queue_id)) { /* append current next frame */ n_free = VLIB_FRAME_SIZE - f->n_vectors; n_copy = clib_min (n_vectors, n_free); n_vectors -= n_copy; to = vlib_frame_vector_args (f); to += f->n_vectors; } else { if (f->n_vectors > 0) { /* current frame doesn't fit - grab empty one */ f = vlib_get_next_frame_internal (vm, node, next_index, 1); tf = vlib_frame_scalar_args (f); } /* empty frame - store scalar data */ store_tx_frame_scalar_data (r, tf); to = vlib_frame_vector_args (f); n_free = VLIB_FRAME_SIZE; n_copy = n_vectors; n_vectors = 0; } vlib_buffer_copy_indices (to, from, n_copy); vlib_put_next_frame (vm, node, next_index, n_free - n_copy); if (n_vectors == 0) return; /* we have more indices to store, take empty frame */ from += n_copy; f = vlib_get_next_frame_internal (vm, node, next_index, 1); store_tx_frame_scalar_data (r, vlib_frame_scalar_args (f)); vlib_buffer_copy_indices (vlib_frame_vector_args (f), from, n_vectors); vlib_put_next_frame (vm, node, next_index, VLIB_FRAME_SIZE - n_vectors); } VLIB_NODE_FN (vnet_interface_output_node) (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame) { vnet_main_t *vnm = vnet_get_main (); vnet_interface_main_t *im = &vnm->interface_main; vlib_combined_counter_main_t *ccm; vnet_hw_interface_t *hi; vnet_sw_interface_t *si; vnet_interface_output_runtime_t *rt = (void *) node->runtime_data; vlib_buffer_t *bufs[VLIB_FRAME_SIZE]; u32 n_bytes, n_buffers = frame->n_vectors; u32 config_index = ~0; u32 sw_if_index = rt->sw_if_index; u32 next_index = VNET_INTERFACE_OUTPUT_NEXT_TX; u32 ti = vm->thread_index; u8 arc = im->output_feature_arc_index; int arc_or_subif = 0; int do_tx_offloads = 0; u32 *from; if (node->flags & VLIB_NODE_FLAG_TRACE) vnet_interface_output_trace (vm, node, frame, n_buffers); from = vlib_frame_vector_args (frame); if (rt->is_deleted) return vlib_error_drop_buffers ( vm, node, from, /* buffer stride */ 1, n_buffers, VNET_INTERFACE_OUTPUT_NEXT_DROP, node->node_index, VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DELETED); vnet_interface_pcap_tx_trace (vm, node, frame, 0 /* sw_if_index_from_buffer */ ); vlib_get_buffers (vm, from, bufs, n_buffers); si = vnet_get_sw_interface (vnm, sw_if_index);