/* * Copyright (c) 2015 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * ip/ip4_input.c: IP v4 input node * * Copyright (c) 2008 Eliot Dresselhaus * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include #include #include #include #include typedef struct { u8 packet_data[64]; } ip4_input_trace_t; static u8 * format_ip4_input_trace (u8 * s, va_list * va) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *); ip4_input_trace_t *t = va_arg (*va, ip4_input_trace_t *); s = format (s, "%U", format_ip4_header, t->packet_data, sizeof (t->packet_data)); return s; } static_always_inline u32 ip4_input_set_next (u32 sw_if_index, vlib_buffer_t * b, int arc_enabled) { ip4_main_t *im = &ip4_main; ip_lookup_main_t *lm = &im->lookup_main; u32 next; u8 arc; ip4_header_t *ip = vlib_buffer_get_current (b); if (PREDICT_FALSE (ip4_address_is_multicast (&ip->dst_address))) { next = IP4_INPUT_NEXT_LOOKUP_MULTICAST; arc = lm->mcast_feature_arc_index; } else { next = IP4_INPUT_NEXT_LOOKUP; arc = lm->ucast_feature_arc_index; } if (arc_enabled) vnet_feature_arc_start (arc, sw_if_index, &next, b); return next; } static_always_inline void ip4_input_check_sw_if_index (vlib_main_t * vm, vlib_simple_counter_main_t * cm, u32 sw_if_index, u32 * last_sw_if_index, u32 * cnt, int *arc_enabled) { ip4_main_t *im = &ip4_main; ip_lookup_main_t *lm = &im->lookup_main; u32 thread_index; if (*last_sw_if_index == sw_if_index) { (*cnt)++; return; } thread_index = vm->thread_index; if (*cnt) vlib_increment_simple_counter (cm, thread_index, *last_sw_if_index, *cnt); *cnt = 1; *last_sw_if_index = sw_if_index; if (vnet_have_features (lm->ucast_feature_arc_index, sw_if_index) || vnet_have_features (lm->mcast_feature_arc_index, sw_if_index)) *arc_enabled = 1; else *arc_enabled = 0; } /* Validate IP v4 packets and pass them either to forwarding code or drop/punt exception packets. */ always_inline uword ip4_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, int verify_checksum) { vnet_main_t *vnm = vnet_get_main (); u32 n_left_from, *from; u32 thread_index = vm->thread_index; vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, ip4_input_node.index); vlib_simple_counter_main_t *cm; vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b; ip4_header_t *ip[4]; u16 nexts[VLIB_FRAME_SIZE], *next; u32 sw_if_index[4]; u32 last_sw_if_index = ~0; u32 cnt = 0; int arc_enabled = 0; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; if (node->flags & VLIB_NODE_FLAG_TRACE) vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors, /* stride */ 1, sizeof (ip4_input_trace_t)); cm = vec_elt_at_index (vnm->interface_main.sw_if_counters, VNET_INTERFACE_COUNTER_IP4); vlib_get_buffers (vm, from, bufs, n_left_from); b = bufs; next = nexts; while (n_left_from >= 4) { u32 x = 0; /* Prefetch next iteration. */ if (n_left_from >= 12) { vlib_prefetch_buffer_header (b[8], LOAD); vlib_prefetch_buffer_header (b[9], LOAD); vlib_prefetch_buffer_header (b[10], LOAD); vlib_prefetch_buffer_header (b[11], LOAD); vlib_prefetch_buffer_data (b[4], LOAD); vlib_prefetch_buffer_data (b[5], LOAD); vlib_prefetch_buffer_data (b[6], LOAD); vlib_prefetch_buffer_data (b[7], LOAD); } vnet_buffer (b[0])->ip.adj_index[VLIB_RX] = ~0; vnet_buffer (b[1])->ip.adj_index[VLIB_RX] = ~0; vnet_buffer (b[2])->ip.adj_index[VLIB_RX] = ~0; vnet_buffer (b[3])->ip.adj_index[VLIB_RX] = ~0; sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX]; sw_if_index[1] = vnet_buffer (b[1])->sw_if_index[VLIB_RX]; sw_if_index[2] = vnet_buffer (b[2])->sw_if_index[VLIB_RX]; sw_if_index[3] = vnet_buffer (b[3])->sw_if_index[VLIB_RX]; x |= sw_if_index[0] ^ last_sw_if_index; x |= sw_if_index[1] ^ last_sw_if