diff options
Diffstat (limited to 'vnet/vnet/lisp-gpe/encap.c')
-rw-r--r-- | vnet/vnet/lisp-gpe/encap.c | 319 |
1 files changed, 126 insertions, 193 deletions
diff --git a/vnet/vnet/lisp-gpe/encap.c b/vnet/vnet/lisp-gpe/encap.c index b3a52c464be..a8158782d8c 100644 --- a/vnet/vnet/lisp-gpe/encap.c +++ b/vnet/vnet/lisp-gpe/encap.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Cisco and/or its affiliates. + * Copyright (c) 2016 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: @@ -12,10 +12,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + #include <vppinfra/error.h> #include <vppinfra/hash.h> #include <vnet/vnet.h> #include <vnet/ip/ip.h> +#include <vnet/ip/udp.h> #include <vnet/ethernet/ethernet.h> #include <vnet/lisp-gpe/lisp_gpe.h> @@ -31,45 +33,41 @@ static char * lisp_gpe_encap_error_strings[] = { typedef enum { #define _(sym,str) LISP_GPE_ENCAP_ERROR_##sym, - foreach_lisp_gpe_encap_error + foreach_lisp_gpe_encap_error #undef _ - LISP_GPE_ENCAP_N_ERROR, + LISP_GPE_ENCAP_N_ERROR, } lisp_gpe_encap_error_t; -typedef enum { - LISP_GPE_ENCAP_NEXT_IP4_LOOKUP, - LISP_GPE_ENCAP_NEXT_DROP, - LISP_GPE_ENCAP_N_NEXT, +typedef enum +{ + LISP_GPE_ENCAP_NEXT_DROP, + LISP_GPE_ENCAP_NEXT_IP4_LOOKUP, + LISP_GPE_ENCAP_N_NEXT, } lisp_gpe_encap_next_t; -typedef struct { +typedef struct +{ u32 tunnel_index; } lisp_gpe_encap_trace_t; -u8 * format_lisp_gpe_encap_trace (u8 * s, va_list * args) +u8 * +format_lisp_gpe_encap_trace (u8 * s, va_list * args) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); - lisp_gpe_encap_trace_t * t - = va_arg (*args, lisp_gpe_encap_trace_t *); + lisp_gpe_encap_trace_t * t = va_arg (*args, lisp_gpe_encap_trace_t *); s = format (s, "LISP-GPE-ENCAP: tunnel %d", t->tunnel_index); return s; } -#define foreach_fixed_header_offset \ -_(0) _(1) _(2) _(3) - static uword -lisp_gpe_encap (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame) +lisp_gpe_encap (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) { u32 n_left_from, next_index, * from, * to_next; - lisp_gpe_main_t * ngm = &lisp_gpe_main; - vnet_main_t * vnm = ngm->vnet_main; + lisp_gpe_main_t * lgm = &lisp_gpe_main; u32 pkts_encapsulated = 0; - u16 old_l0 = 0; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -81,196 +79,131 @@ lisp_gpe_encap (vlib_main_t * vm, u32 n_left_to_next; vlib_get_next_frame (vm, node, next_index, - to_next, n_left_to_next); + to_next, n_left_to_next); -#if 0 while (n_left_from >= 4 && n_left_to_next >= 2) - { - u32 bi0, bi1; - vlib_buffer_t * b0, * b1; - nsh_unicast_header_t * h0, * h1; - u32 label0, label1; - u32 next0, next1; - uword * p0, * p1; - - /* Prefetch next iteration. */ - { - vlib_buffer_t * p2, * p3; - - p2 = vlib_get_buffer (vm, from[2]); - p3 = vlib_get_buffer (vm, from[3]); - - vlib_prefetch_buffer_header (p2, LOAD); - vlib_prefetch_buffer_header (p3, LOAD); - - CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); - } - - bi0 = from[0]; - bi1 = from[1]; - to_next[0] = bi0; - to_next[1] = bi1; - from += 2; - to_next += 2; - n_left_to_next -= 2; - n_left_from -= 2; - - b0 = vlib_get_buffer (vm, bi0); - b1 = vlib_get_buffer (vm, bi1); - - h0 = vlib_buffer_get_current (b0); - h1 = vlib_buffer_get_current (b1); - - next0 = next1 = NSH_INPUT_NEXT_IP4_INPUT; - - label0 = clib_net_to_host_u32 (h0->label_exp_s_ttl); - label1 = clib_net_to_host_u32 (h1->label_exp_s_ttl); - - /* - * Translate label contents into a fib index. - * This is a decent sanity check, and guarantees - * a sane FIB for the downstream lookup - */ - label0 = vnet_nsh_uc_get_label (label0); - label1 = vnet_nsh_uc_get_label (label1); - - /* If 2xlabels match, and match the 1-wide cache, use it */ - if (label0 == label1 && rt->last_label == label0) - { - vnet_buffer(b0)->sw_if_index[VLIB_TX] = rt->last_fib_index; - vnet_buffer(b1)->sw_if_index[VLIB_TX] = rt->last_fib_index; - } - else + { + u32 bi0, bi1; + vlib_buffer_t * b0, * b1; + u32 next0, next1; + u32 adj_index0, adj_index1, tunnel_index0, tunnel_index1; + ip_adjacency_t * adj0, * adj1; + lisp_gpe_tunnel_t * t0, * t1; + + next0 = next1 = LISP_GPE_ENCAP_NEXT_IP4_LOOKUP; + + /* Prefetch next iteration. */ { - p0 = hash_get (rt->mm->fib_index_by_nsh_label, label0); - if (PREDICT_FALSE (p0 == 0)) - { - next0 = NSH_INPUT_NEXT_DROP; - b0->error = node->errors[NSH_ERROR_BAD_LABEL]; - } - else - vnet_buffer(b0)->sw_if_index[VLIB_TX] = p0[0]; - - p1 = hash_get (rt->mm->fib_index_by_nsh_label, label1); - if (PREDICT_FALSE (p1 == 0)) - { - next1 = NSH_INPUT_NEXT_DROP; - b1->error = node->errors[NSH_ERROR_BAD_LABEL]; - } - else - { - vnet_buffer(b1)->sw_if_index[VLIB_TX] = p1[0]; - rt->last_fib_index = p1[0]; - rt->last_label = label1; - } + vlib_buffer_t * p2, *p3; + + p2 = vlib_get_buffer (vm, from[2]); + p3 = vlib_get_buffer (vm, from[3]); + + vlib_prefetch_buffer_header(p2, LOAD); + vlib_prefetch_buffer_header(p3, LOAD); + + CLIB_PREFETCH(p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH(p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); } - if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + bi0 = from[0]; + bi1 = from[1]; + to_next[0] = bi0; + to_next[1] = bi1; + from += 2; + to_next += 2; + n_left_to_next -= 2; + n_left_from -= 2; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + + /* Get adjacency and from it the tunnel_index */ + adj_index0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX]; + adj_index1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX]; + + adj0 = ip_get_adjacency (lgm->lookup_main, adj_index0); + adj1 = ip_get_adjacency (lgm->lookup_main, adj_index1); + + tunnel_index0 = adj0->rewrite_header.sw_if_index; + tunnel_index1 = adj1->rewrite_header.sw_if_index; + + t0 = pool_elt_at_index (lgm->tunnels, tunnel_index0); + t1 = pool_elt_at_index (lgm->tunnels, tunnel_index1); + + ASSERT(t0 != 0); + ASSERT(t1 != 0); + + ASSERT (sizeof(ip4_udp_lisp_gpe_header_t) == 36); + ip4_udp_encap_two (vm, b0, b1, t0->rewrite, t1->rewrite, 36); + + /* Reset to look up tunnel partner in the configured FIB */ + vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index; + vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index; + + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) { - nsh_rx_trace_t *tr = vlib_add_trace (vm, node, - b0, sizeof (*tr)); - tr->label_exp_s_ttl = label0; + lisp_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b0, + sizeof(*tr)); + tr->tunnel_index = t0 - lgm->tunnels; } - if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) + if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) { - nsh_rx_trace_t *tr = vlib_add_trace (vm, node, - b1, sizeof (*tr)); - tr->label_exp_s_ttl = label1; + lisp_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b1, + sizeof(*tr)); + tr->tunnel_index = t1 - lgm->tunnels; } - vlib_buffer_advance (b0, sizeof (*h0)); - vlib_buffer_advance (b1, sizeof (*h1)); + pkts_encapsulated += 2; + + vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, + n_left_to_next, bi0, bi1, next0, + next1); + } - vlib_validate_buffer_enqueue_x2 (vm, node, next_index, - to_next, n_left_to_next, - bi0, bi1, next0, next1); - } -#endif - while (n_left_from > 0 && n_left_to_next > 0) - { - u32 bi0; - vlib_buffer_t * b0; - u32 next0 = LISP_GPE_ENCAP_NEXT_IP4_LOOKUP; - vnet_hw_interface_t * hi0; - ip4_header_t * ip0; - udp_header_t * udp0; - u64 * copy_src0, * copy_dst0; - u32 * copy_src_last0, * copy_dst_last0; - lisp_gpe_tunnel_t * t0; - u16 new_l0; - ip_csum_t sum0; - - bi0 = from[0]; - to_next[0] = bi0; - from += 1; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - b0 = vlib_get_buffer (vm, bi0); - - /* 1-wide cache? */ - hi0 = vnet_get_sup_hw_interface - (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]); - - t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance); - - ASSERT(vec_len(t0->rewrite) >= 24); - - /* Apply the rewrite string. $$$$ vnet_rewrite? */ - vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite)); - - ip0 = vlib_buffer_get_current(b0); - /* Copy the fixed header */ - copy_dst0 = (u64 *) ip0; - copy_src0 = (u64 *) t0->rewrite; - - ASSERT (sizeof (ip4_udp_lisp_gpe_header_t) == 36); - - /* Copy first 32 octets 8-bytes at a time */ -#define _(offs) copy_dst0[offs] = copy_src0[offs]; - foreach_fixed_header_offset; -#undef _ - /* Last 4 octets. Hopefully gcc will be our friend */ - copy_dst_last0 = (u32 *)(©_dst0[4]); - copy_src_last0 = (u32 *)(©_src0[4]); - - copy_dst_last0[0] = copy_src_last0[0]; - - /* fix the <bleep>ing outer-IP checksum */ - sum0 = ip0->checksum; - /* old_l0 always 0, see the rewrite setup */ - new_l0 = - clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)); - - sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t, - length /* changed member */); - ip0->checksum = ip_csum_fold (sum0); - ip0->length = new_l0; - - /* Fix UDP length */ - udp0 = (udp_header_t *)(ip0+1); - new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) - - sizeof (*ip0)); - - udp0->length = new_l0; + { + vlib_buffer_t * b0; + u32 bi0, adj_index0, tunnel_index0; + u32 next0 = LISP_GPE_ENCAP_NEXT_IP4_LOOKUP; + lisp_gpe_tunnel_t * t0 = 0; + ip_adjacency_t * adj0; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + + /* Get adjacency and from it the tunnel_index */ + adj_index0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX]; + adj0 = ip_get_adjacency (lgm->lookup_main, adj_index0); + + tunnel_index0 = adj0->rewrite_header.sw_if_index; + t0 = pool_elt_at_index (lgm->tunnels, tunnel_index0); + + ASSERT(t0 != 0); + + ASSERT (sizeof(ip4_udp_lisp_gpe_header_t) == 36); + ip4_udp_encap_one (vm, b0, t0->rewrite, 36); /* Reset to look up tunnel partner in the configured FIB */ vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index; - pkts_encapsulated ++; + + pkts_encapsulated++; if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) { - lisp_gpe_encap_trace_t *tr = - vlib_add_trace (vm, node, b0, sizeof (*tr)); - tr->tunnel_index = t0 - ngm->tunnels; + lisp_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b0, + sizeof(*tr)); + tr->tunnel_index = t0 - lgm->tunnels; } - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - bi0, next0); - } + vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } @@ -293,7 +226,7 @@ VLIB_REGISTER_NODE (lisp_gpe_encap_node) = { .n_next_nodes = LISP_GPE_ENCAP_N_NEXT, .next_nodes = { - [LISP_GPE_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup", - [LISP_GPE_ENCAP_NEXT_DROP] = "error-drop", + [LISP_GPE_ENCAP_NEXT_DROP] = "error-drop", + [LISP_GPE_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup", }, }; |