From 94b1442a6731bb879797c016a8febaec2f2ae7c9 Mon Sep 17 00:00:00 2001 From: "Keith Burns (alagalah)" Date: Thu, 5 May 2016 18:16:50 -0700 Subject: VPP43 - NSH refactoring: Added nsh-map nodes - removed vnet/vnet/nsh-gre - removed all nsh from vnet/vnet/nsh_vxlan_gpe to vnet/vnet/nsh - moved vnet/vnet/nsh_vxlan_gpe to vnet/vnet/vxlan_gpe - added cli and binary api for VXLAN GPE tunnels - plan to move vnet/vnet/nsh to new repo (sfc_nsh) and make plugin - added cli for NSH (binary API will be done in sfc_nsh) - vnet/vnet/gre will be extended in VPP-54 Change-Id: I1d27def916532321577ccd68cb982ae0d0a07e6f Signed-off-by: Keith Burns (alagalah) --- vnet/Makefile.am | 35 +- vnet/vnet/gre/node.c | 3 +- vnet/vnet/nsh-gre/decap.c | 445 ------------ vnet/vnet/nsh-gre/encap.c | 376 ---------- vnet/vnet/nsh-gre/nsh_gre.c | 552 --------------- vnet/vnet/nsh-gre/nsh_gre.h | 110 --- vnet/vnet/nsh-vxlan-gpe/decap.c | 537 --------------- vnet/vnet/nsh-vxlan-gpe/encap.c | 422 ------------ vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.c | 577 ---------------- vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.h | 138 ---- vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe_error.def | 16 - vnet/vnet/nsh-vxlan-gpe/vxlan-gpe-rfc.txt | 868 ------------------------ vnet/vnet/nsh-vxlan-gpe/vxlan_gpe_packet.h | 74 -- vnet/vnet/nsh/nsh.c | 770 +++++++++++++++++++++ vnet/vnet/nsh/nsh.h | 108 +++ vnet/vnet/nsh/nsh_error.def | 6 +- vnet/vnet/nsh/nsh_gre_error.def | 17 + vnet/vnet/nsh/nsh_packet.h | 11 +- vnet/vnet/vxlan-gpe/decap.c | 450 ++++++++++++ vnet/vnet/vxlan-gpe/encap.c | 425 ++++++++++++ vnet/vnet/vxlan-gpe/vxlan-gpe-rfc.txt | 868 ++++++++++++++++++++++++ vnet/vnet/vxlan-gpe/vxlan_gpe.c | 467 +++++++++++++ vnet/vnet/vxlan-gpe/vxlan_gpe.h | 139 ++++ vnet/vnet/vxlan-gpe/vxlan_gpe_error.def | 16 + vnet/vnet/vxlan-gpe/vxlan_gpe_packet.h | 82 +++ vpp-api-test/vat/api_format.c | 373 +--------- vpp/api/api.c | 121 +--- vpp/api/custom_dump.c | 104 +-- vpp/api/vpe.api | 24 +- 29 files changed, 3447 insertions(+), 4687 deletions(-) delete mode 100644 vnet/vnet/nsh-gre/decap.c delete mode 100644 vnet/vnet/nsh-gre/encap.c delete mode 100644 vnet/vnet/nsh-gre/nsh_gre.c delete mode 100644 vnet/vnet/nsh-gre/nsh_gre.h delete mode 100644 vnet/vnet/nsh-vxlan-gpe/decap.c delete mode 100644 vnet/vnet/nsh-vxlan-gpe/encap.c delete mode 100644 vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.c delete mode 100644 vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.h delete mode 100644 vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe_error.def delete mode 100644 vnet/vnet/nsh-vxlan-gpe/vxlan-gpe-rfc.txt delete mode 100644 vnet/vnet/nsh-vxlan-gpe/vxlan_gpe_packet.h create mode 100644 vnet/vnet/nsh/nsh.c create mode 100644 vnet/vnet/nsh/nsh.h create mode 100644 vnet/vnet/nsh/nsh_gre_error.def create mode 100644 vnet/vnet/vxlan-gpe/decap.c create mode 100644 vnet/vnet/vxlan-gpe/encap.c create mode 100644 vnet/vnet/vxlan-gpe/vxlan-gpe-rfc.txt create mode 100644 vnet/vnet/vxlan-gpe/vxlan_gpe.c create mode 100644 vnet/vnet/vxlan-gpe/vxlan_gpe.h create mode 100644 vnet/vnet/vxlan-gpe/vxlan_gpe_error.def create mode 100644 vnet/vnet/vxlan-gpe/vxlan_gpe_packet.h diff --git a/vnet/Makefile.am b/vnet/Makefile.am index 1239a8b8df4..8a9f214ca70 100644 --- a/vnet/Makefile.am +++ b/vnet/Makefile.am @@ -399,37 +399,40 @@ nobase_include_HEADERS += \ ######################################## # NSH Map: nsh ######################################## + libvnet_la_SOURCES += \ + vnet/nsh/nsh.c -nobase_include_HEADERS += \ - vnet/nsh/nsh_packet.h \ - vnet/nsh/nsh_error.def + nobase_include_HEADERS += \ + vnet/nsh/nsh_packet.h \ + vnet/nsh/nsh.h \ + vnet/nsh/nsh_error.def ######################################## # Tunnel protocol: nsh-gre ######################################## -libvnet_la_SOURCES += \ - vnet/nsh-gre/nsh_gre.c \ - vnet/nsh-gre/encap.c \ - vnet/nsh-gre/decap.c +# libvnet_la_SOURCES += \ +# vnet/nsh-gre/nsh_gre.c \ +# vnet/nsh-gre/encap.c \ +# vnet/nsh-gre/decap.c -nobase_include_HEADERS += \ - vnet/nsh-gre/nsh_gre.h +# nobase_include_HEADERS += \ +# vnet/nsh-gre/nsh_gre.h ######################################## -# Tunnel protocol: nsh-vxlan-gpe +# Tunnel protocol: vxlan-gpe ######################################## libvnet_la_SOURCES += \ - vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.c \ - vnet/nsh-vxlan-gpe/encap.c \ - vnet/nsh-vxlan-gpe/decap.c + vnet/vxlan-gpe/vxlan_gpe.c \ + vnet/vxlan-gpe/encap.c \ + vnet/vxlan-gpe/decap.c nobase_include_HEADERS += \ - vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.h \ - vnet/nsh-vxlan-gpe/vxlan_gpe_packet.h \ - vnet/nsh-vxlan-gpe/nsh_vxlan_gpe_error.def + vnet/vxlan-gpe/vxlan_gpe.h \ + vnet/vxlan-gpe/vxlan_gpe_packet.h \ + vnet/vxlan-gpe/vxlan_gpe_error.def ######################################## # LISP control plane: lisp-cp diff --git a/vnet/vnet/gre/node.c b/vnet/vnet/gre/node.c index 7d07223fc71..5809c5d3e6f 100644 --- a/vnet/vnet/gre/node.c +++ b/vnet/vnet/gre/node.c @@ -24,7 +24,8 @@ _(PUNT, "error-punt") \ _(DROP, "error-drop") \ _(IP4_INPUT, "ip4-input") \ -_(IP6_INPUT, "ip6-input") +_(IP6_INPUT, "ip6-input") \ +_(NSH_INPUT, "nsh-input") typedef enum { #define _(s,n) GRE_INPUT_NEXT_##s, diff --git a/vnet/vnet/nsh-gre/decap.c b/vnet/vnet/nsh-gre/decap.c deleted file mode 100644 index c10b11b3070..00000000000 --- a/vnet/vnet/nsh-gre/decap.c +++ /dev/null @@ -1,445 +0,0 @@ -/* - * nsh.c: nsh packet processing - * - * Copyright (c) 2013 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include - -vlib_node_registration_t nsh_input_node; - -typedef struct { - u32 next_index; - u32 tunnel_index; - u32 error; - nsh_header_t h; -} nsh_rx_trace_t; - - -u8 * format_nsh_header_with_length (u8 * s, va_list * args) -{ - nsh_header_t * h = va_arg (*args, nsh_header_t *); - u32 max_header_bytes = va_arg (*args, u32); - u32 tmp, header_bytes; - - header_bytes = sizeof (h[0]); - if (max_header_bytes != 0 && header_bytes > max_header_bytes) - return format (s, "gre-nsh header truncated"); - - s = format (s, "ver %d ", h->ver_o_c>>6); - - if (h->ver_o_c & NSH_O_BIT) - s = format (s, "O-set "); - - if (h->ver_o_c & NSH_C_BIT) - s = format (s, "C-set "); - - s = format (s, "len %d (%d bytes) md_type %d next_protocol %d\n", - h->length, h->length * 4, h->md_type, h->next_protocol); - - tmp = clib_net_to_host_u32 (h->spi_si); - - s = format (s, " spi %d si %d ", - (tmp>>NSH_SPI_SHIFT) & NSH_SPI_MASK, - tmp & NSH_SINDEX_MASK); - - s = format (s, "c1 %u c2 %u c3 %u c4 %u", - clib_net_to_host_u32 (h->c1), - clib_net_to_host_u32 (h->c2), - clib_net_to_host_u32 (h->c3), - clib_net_to_host_u32 (h->c4)); - - return s; -} - - -u8 * format_nsh_rx_trace (u8 * s, va_list * args) -{ - CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); - CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); - nsh_rx_trace_t * t = va_arg (*args, nsh_rx_trace_t *); - - if (t->tunnel_index != ~0) - { - s = format (s, "NSH: tunnel %d next %d error %d", t->tunnel_index, - t->next_index, t->error); - } - else - { - s = format (s, "NSH: no tunnel next %d error %d\n", t->next_index, - t->error); - } - s = format (s, "\n %U", format_nsh_header_with_length, &t->h, - (u32) sizeof (t->h) /* max size */); - return s; -} - -static uword -nsh_gre_input (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame) -{ - u32 n_left_from, next_index, * from, * to_next; - nsh_gre_main_t * ngm = &nsh_gre_main; - vnet_main_t * vnm = ngm->vnet_main; - vnet_interface_main_t * im = &vnm->interface_main; - u32 last_tunnel_index = ~0; - u64 last_key = ~0ULL; - u32 pkts_decapsulated = 0; - u32 cpu_index = os_get_cpu_number(); - u32 stats_sw_if_index, stats_n_packets, stats_n_bytes; - - from = vlib_frame_vector_args (from_frame); - n_left_from = from_frame->n_vectors; - - next_index = node->cached_next_index; - stats_sw_if_index = node->runtime_data[0]; - stats_n_packets = stats_n_bytes = 0; - - while (n_left_from > 0) - { - u32 n_left_to_next; - - vlib_get_next_frame (vm, node, next_index, - to_next, n_left_to_next); - - while (n_left_from >= 4 && n_left_to_next >= 2) - { - u32 bi0, bi1; - vlib_buffer_t * b0, * b1; - u32 next0, next1; - nsh_header_t * h0, * h1; - uword * p0, * p1; - u32 tunnel_index0, tunnel_index1; - nsh_gre_tunnel_t * t0, * t1; - u64 key0, key1; - u32 error0, error1; - u32 sw_if_index0, sw_if_index1, len0, len1; - - /* Prefetch next iteration. */ - { - vlib_buffer_t * p2, * p3; - - p2 = vlib_get_buffer (vm, from[2]); - p3 = vlib_get_buffer (vm, from[3]); - - vlib_prefetch_buffer_header (p2, LOAD); - vlib_prefetch_buffer_header (p3, LOAD); - - CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); - } - - bi0 = from[0]; - bi1 = from[1]; - to_next[0] = bi0; - to_next[1] = bi1; - from += 2; - to_next += 2; - n_left_to_next -= 2; - n_left_from -= 2; - - b0 = vlib_get_buffer (vm, bi0); - b1 = vlib_get_buffer (vm, bi1); - - h0 = vlib_buffer_get_current (b0); - h1 = vlib_buffer_get_current (b1); - - /* gre stashed the src ip4 address for us... */ - key0 = (((u64)(vnet_buffer(b0)->gre.src))<<32) | h0->spi_si; - key1 = (((u64)(vnet_buffer(b1)->gre.src))<<32) | h1->spi_si; - - /* "pop" nsh header */ - vlib_buffer_advance (b0, sizeof (*h0)); - vlib_buffer_advance (b1, sizeof (*h1)); - - tunnel_index0 = ~0; - tunnel_index1 = ~0; - error0 = 0; - error1 = 0; - next0 = NSH_GRE_INPUT_NEXT_DROP; - next1 = NSH_GRE_INPUT_NEXT_DROP; - - if (PREDICT_FALSE(key0 != last_key)) - { - p0 = hash_get (ngm->nsh_gre_tunnel_by_src_address, key0); - - if (p0 == 0) - { - error0 = NSH_GRE_ERROR_NO_SUCH_TUNNEL; - goto trace0; - } - - last_key = key0; - tunnel_index0 = last_tunnel_index = p0[0]; - } - else - tunnel_index0 = last_tunnel_index; - - t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0); - - next0 = t0->decap_next_index; - sw_if_index0 = t0->sw_if_index; - len0 = vlib_buffer_length_in_chain(vm, b0); - - /* Required to make the l2 tag push / pop code work on l2 subifs */ - vnet_update_l2_len (b0); - - next0 = t0->decap_next_index; - - /* ip[46] lookup in the configured FIB, otherwise an opaque */ - vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index; - - pkts_decapsulated++; - stats_n_packets += 1; - stats_n_bytes += len0; - - if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index)) - { - stats_n_packets -= 1; - stats_n_bytes -= len0; - if (stats_n_packets) - vlib_increment_combined_counter( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); - stats_n_packets = 1; - stats_n_bytes = len0; - stats_sw_if_index = sw_if_index0; - } - - trace0: - b0->error = error0 ? node->errors[error0] : 0; - - if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) - { - nsh_rx_trace_t *tr = vlib_add_trace (vm, node, - b0, sizeof (*tr)); - tr->next_index = next0; - tr->error = error0; - tr->tunnel_index = tunnel_index0; - tr->h = h0[0]; - } - - if (PREDICT_FALSE(key1 != last_key)) - { - p1 = hash_get (ngm->nsh_gre_tunnel_by_src_address, key1); - - if (p1 == 0) - { - error1 = NSH_GRE_ERROR_NO_SUCH_TUNNEL; - goto trace1; - } - - last_key = key1; - tunnel_index1 = last_tunnel_index = p1[0]; - } - else - tunnel_index1 = last_tunnel_index; - - t1 = pool_elt_at_index (ngm->tunnels, tunnel_index1); - - next1 = t1->decap_next_index; - sw_if_index1 = t1->sw_if_index; - len1 = vlib_buffer_length_in_chain(vm, b1); - - /* Required to make the l2 tag push / pop code work on l2 subifs */ - vnet_update_l2_len (b1); - - next1 = t1->decap_next_index; - - /* ip[46] lookup in the configured FIB, otherwise an opaque */ - vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index; - - pkts_decapsulated++; - stats_n_packets += 1; - stats_n_bytes += len1; - /* Batch stats increment on the same nsh-gre tunnel so counter - is not incremented per packet */ - if (PREDICT_FALSE(sw_if_index1 != stats_sw_if_index)) - { - stats_n_packets -= 1; - stats_n_bytes -= len1; - if (stats_n_packets) - vlib_increment_combined_counter( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); - stats_n_packets = 1; - stats_n_bytes = len1; - stats_sw_if_index = sw_if_index1; - } - vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index; - - trace1: - b1->error = error1 ? node->errors[error1] : 0; - - if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) - { - nsh_rx_trace_t *tr = vlib_add_trace (vm, node, - b1, sizeof (*tr)); - tr->next_index = next1; - tr->error = error1; - tr->tunnel_index = tunnel_index1; - tr->h = h1[0]; - } - - vlib_validate_buffer_enqueue_x2 (vm, node, next_index, - to_next, n_left_to_next, - bi0, bi1, next0, next1); - } - - while (n_left_from > 0 && n_left_to_next > 0) - { - u32 bi0; - vlib_buffer_t * b0; - u32 next0; - nsh_header_t * h0; - uword * p0; - u32 tunnel_index0; - nsh_gre_tunnel_t * t0; - u64 key0; - u32 error0; - u32 sw_if_index0, len0; - - bi0 = from[0]; - to_next[0] = bi0; - from += 1; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - b0 = vlib_get_buffer (vm, bi0); - h0 = vlib_buffer_get_current (b0); - - /* gre stashed the src ip4 address for us... */ - key0 = (((u64)(vnet_buffer(b0)->gre.src))<<32) | h0->spi_si; - - /* "pop" nsh header */ - vlib_buffer_advance (b0, sizeof (*h0)); - - tunnel_index0 = ~0; - error0 = 0; - next0 = NSH_GRE_INPUT_NEXT_DROP; - - if (PREDICT_FALSE(key0 != last_key)) - { - p0 = hash_get (ngm->nsh_gre_tunnel_by_src_address, key0); - - if (p0 == 0) - { - error0 = NSH_GRE_ERROR_NO_SUCH_TUNNEL; - goto trace00; - } - - last_key = key0; - tunnel_index0 = last_tunnel_index = p0[0]; - } - else - tunnel_index0 = last_tunnel_index; - - t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0); - - next0 = t0->decap_next_index; - sw_if_index0 = t0->sw_if_index; - len0 = vlib_buffer_length_in_chain(vm, b0); - - /* Required to make the l2 tag push / pop code work on l2 subifs */ - vnet_update_l2_len (b0); - - next0 = t0->decap_next_index; - - /* ip[46] lookup in the configured FIB, otherwise an opaque */ - vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index; - pkts_decapsulated ++; - - stats_n_packets += 1; - stats_n_bytes += len0; - - /* Batch stats increment on the same nsh-gre tunnel so counter - is not incremented per packet */ - if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index)) - { - stats_n_packets -= 1; - stats_n_bytes -= len0; - if (stats_n_packets) - vlib_increment_combined_counter( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); - stats_n_packets = 1; - stats_n_bytes = len0; - stats_sw_if_index = sw_if_index0; - } - - trace00: - b0->error = error0 ? node->errors[error0] : 0; - - if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) - { - nsh_rx_trace_t *tr = vlib_add_trace (vm, node, - b0, sizeof (*tr)); - tr->next_index = next0; - tr->error = error0; - tr->tunnel_index = tunnel_index0; - tr->h = h0[0]; - } - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - bi0, next0); - } - - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - } - vlib_node_increment_counter (vm, nsh_gre_input_node.index, - NSH_GRE_ERROR_DECAPSULATED, - pkts_decapsulated); - /* Increment any remaining batch stats */ - if (stats_n_packets) - { - vlib_increment_combined_counter( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, cpu_index, - stats_sw_if_index, stats_n_packets, stats_n_bytes); - node->runtime_data[0] = stats_sw_if_index; - } - return from_frame->n_vectors; -} - -static char * nsh_error_strings[] = { -#define nsh_gre_error(n,s) s, -#include -#undef nsh_gre_error -#undef _ -}; - -VLIB_REGISTER_NODE (nsh_gre_input_node) = { - .function = nsh_gre_input, - .name = "nsh-gre-input", - /* Takes a vector of packets. */ - .vector_size = sizeof (u32), - - .n_errors = NSH_GRE_N_ERROR, - .error_strings = nsh_error_strings, - - .n_next_nodes = NSH_GRE_INPUT_N_NEXT, - .next_nodes = { -#define _(s,n) [NSH_GRE_INPUT_NEXT_##s] = n, - foreach_nsh_gre_input_next -#undef _ - }, - - .format_buffer = format_nsh_header_with_length, - .format_trace = format_nsh_rx_trace, - // $$$$ .unformat_buffer = unformat_nsh_gre_header, -}; diff --git a/vnet/vnet/nsh-gre/encap.c b/vnet/vnet/nsh-gre/encap.c deleted file mode 100644 index 78b02178263..00000000000 --- a/vnet/vnet/nsh-gre/encap.c +++ /dev/null @@ -1,376 +0,0 @@ -/* - * Copyright (c) 2015 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include -#include -#include -#include -#include -#include - -/* Statistics (not really errors) */ -#define foreach_nsh_gre_encap_error \ -_(ENCAPSULATED, "good packets encapsulated") - -static char * nsh_gre_encap_error_strings[] = { -#define _(sym,string) string, - foreach_nsh_gre_encap_error -#undef _ -}; - -typedef enum { -#define _(sym,str) NSH_GRE_ENCAP_ERROR_##sym, - foreach_nsh_gre_encap_error -#undef _ - NSH_GRE_ENCAP_N_ERROR, -} nsh_gre_encap_error_t; - -typedef enum { - NSH_GRE_ENCAP_NEXT_IP4_LOOKUP, - NSH_GRE_ENCAP_NEXT_DROP, - NSH_GRE_ENCAP_N_NEXT, -} nsh_gre_encap_next_t; - -typedef struct { - u32 tunnel_index; -} nsh_gre_encap_trace_t; - -u8 * format_nsh_gre_encap_trace (u8 * s, va_list * args) -{ - CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); - CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); - nsh_gre_encap_trace_t * t = va_arg (*args, nsh_gre_encap_trace_t *); - - s = format (s, "NSH-GRE-ENCAP: tunnel %d", t->tunnel_index); - return s; -} - -static uword -nsh_gre_encap (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame) -{ - u32 n_left_from, next_index, * from, * to_next; - nsh_gre_main_t * ngm = &nsh_gre_main; - vnet_main_t * vnm = ngm->vnet_main; - vnet_interface_main_t * im = &vnm->interface_main; - u32 pkts_encapsulated = 0; - u16 old_l0 = 0, old_l1 = 0; - u32 cpu_index = os_get_cpu_number(); - u32 stats_sw_if_index, stats_n_packets, stats_n_bytes; - - from = vlib_frame_vector_args (from_frame); - n_left_from = from_frame->n_vectors; - - next_index = node->cached_next_index; - stats_sw_if_index = node->runtime_data[0]; - stats_n_packets = stats_n_bytes = 0; - - while (n_left_from > 0) - { - u32 n_left_to_next; - - vlib_get_next_frame (vm, node, next_index, - to_next, n_left_to_next); - - while (n_left_from >= 4 && n_left_to_next >= 2) - { - u32 bi0, bi1; - vlib_buffer_t * b0, * b1; - u32 next0 = NSH_GRE_ENCAP_NEXT_IP4_LOOKUP; - u32 next1 = NSH_GRE_ENCAP_NEXT_IP4_LOOKUP; - u32 sw_if_index0, sw_if_index1, len0, len1; - vnet_hw_interface_t * hi0, * hi1; - ip4_header_t * ip0, * ip1; - u64 * copy_src0, * copy_dst0; - u64 * copy_src1, * copy_dst1; - nsh_gre_tunnel_t * t0, * t1; - u16 new_l0, new_l1; - ip_csum_t sum0, sum1; - - /* Prefetch next iteration. */ - { - vlib_buffer_t * p2, * p3; - - p2 = vlib_get_buffer (vm, from[2]); - p3 = vlib_get_buffer (vm, from[3]); - - vlib_prefetch_buffer_header (p2, LOAD); - vlib_prefetch_buffer_header (p3, LOAD); - - CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); - } - - bi0 = from[0]; - bi1 = from[1]; - to_next[0] = bi0; - to_next[1] = bi1; - from += 2; - to_next += 2; - n_left_to_next -= 2; - n_left_from -= 2; - - b0 = vlib_get_buffer (vm, bi0); - b1 = vlib_get_buffer (vm, bi1); - - /* 1-wide cache? */ - sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX]; - sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX]; - hi0 = vnet_get_sup_hw_interface - (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]); - hi1 = vnet_get_sup_hw_interface - (vnm, vnet_buffer(b1)->sw_if_index[VLIB_TX]); - - t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance); - t1 = pool_elt_at_index (ngm->tunnels, hi1->dev_instance); - - ASSERT(vec_len(t0->rewrite) >= 24); - ASSERT(vec_len(t1->rewrite) >= 24); - - /* Apply the rewrite string. $$$$ vnet_rewrite? */ - vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite)); - vlib_buffer_advance (b1, -(word)_vec_len(t1->rewrite)); - - ip0 = vlib_buffer_get_current(b0); - ip1 = vlib_buffer_get_current(b1); - /* Copy the fixed header */ - copy_dst0 = (u64 *) ip0; - copy_src0 = (u64 *) t0->rewrite; - copy_dst1 = (u64 *) ip1; - copy_src1 = (u64 *) t1->rewrite; - - copy_dst0[0] = copy_src0[0]; - copy_dst0[1] = copy_src0[1]; - copy_dst0[2] = copy_src0[2]; - - copy_dst1[0] = copy_src1[0]; - copy_dst1[1] = copy_src1[1]; - copy_dst1[2] = copy_src1[2]; - - /* If there are TLVs to copy, do so */ - if (PREDICT_FALSE (_vec_len(t0->rewrite) > 24)) - clib_memcpy (©_dst0[3], t0->rewrite + 24 , - _vec_len (t0->rewrite)-24); - - if (PREDICT_FALSE (_vec_len(t1->rewrite) > 24)) - clib_memcpy (©_dst1[3], t1->rewrite + 24 , - _vec_len (t1->rewrite)-24); - - /* fix the ing outer-IP checksums */ - sum0 = ip0->checksum; - /* old_l0 always 0, see the rewrite setup */ - new_l0 = - clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)); - - sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t, - length /* changed member */); - ip0->checksum = ip_csum_fold (sum0); - ip0->length = new_l0; - - sum1 = ip1->checksum; - /* old_l1 always 1, see the rewrite setup */ - new_l1 = - clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)); - - sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t, - length /* changed member */); - ip1->checksum = ip_csum_fold (sum1); - ip1->length = new_l1; - - /* Reset to look up tunnel partner in the configured FIB */ - vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index; - vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index; - vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0; - vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1; - pkts_encapsulated += 2; - - len0 = vlib_buffer_length_in_chain(vm, b0); - len1 = vlib_buffer_length_in_chain(vm, b0); - stats_n_packets += 2; - stats_n_bytes += len0 + len1; - - /* Batch stats increment on the same vxlan tunnel so counter is not - incremented per packet. Note stats are still incremented for deleted - and admin-down tunnel where packets are dropped. It is not worthwhile - to check for this rare case and affect normal path performance. */ - if (PREDICT_FALSE( - (sw_if_index0 != stats_sw_if_index) - || (sw_if_index1 != stats_sw_if_index))) { - stats_n_packets -= 2; - stats_n_bytes -= len0 + len1; - if (sw_if_index0 == sw_if_index1) { - if (stats_n_packets) - vlib_increment_combined_counter( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); - stats_sw_if_index = sw_if_index0; - stats_n_packets = 2; - stats_n_bytes = len0 + len1; - } else { - vlib_increment_combined_counter( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, sw_if_index0, 1, len0); - vlib_increment_combined_counter( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, sw_if_index1, 1, len1); - } - } - - if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) - { - nsh_gre_encap_trace_t *tr = - vlib_add_trace (vm, node, b0, sizeof (*tr)); - tr->tunnel_index = t0 - ngm->tunnels; - } - - if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) - { - nsh_gre_encap_trace_t *tr = - vlib_add_trace (vm, node, b1, sizeof (*tr)); - tr->tunnel_index = t1 - ngm->tunnels; - } - - vlib_validate_buffer_enqueue_x2 (vm, node, next_index, - to_next, n_left_to_next, - bi0, bi1, next0, next1); - } - - while (n_left_from > 0 && n_left_to_next > 0) - { - u32 bi0; - vlib_buffer_t * b0; - u32 next0 = NSH_GRE_ENCAP_NEXT_IP4_LOOKUP; - u32 sw_if_index0, len0; - vnet_hw_interface_t * hi0; - ip4_header_t * ip0; - u64 * copy_src0, * copy_dst0; - nsh_gre_tunnel_t * t0; - u16 new_l0; - ip_csum_t sum0; - - bi0 = from[0]; - to_next[0] = bi0; - from += 1; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - b0 = vlib_get_buffer (vm, bi0); - - /* 1-wide cache? */ - sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX]; - hi0 = vnet_get_sup_hw_interface - (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]); - - t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance); - - ASSERT(vec_len(t0->rewrite) >= 24); - - /* Apply the rewrite string. $$$$ vnet_rewrite? */ - vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite)); - - ip0 = vlib_buffer_get_current(b0); - /* Copy the fixed header */ - copy_dst0 = (u64 *) ip0; - copy_src0 = (u64 *) t0->rewrite; - copy_dst0[0] = copy_src0[0]; - copy_dst0[1] = copy_src0[1]; - copy_dst0[2] = copy_src0[2]; - - /* If there are TLVs to copy, do so */ - if (PREDICT_FALSE (_vec_len(t0->rewrite) > 24)) - clib_memcpy (©_dst0[3], t0->rewrite + 24 , - _vec_len (t0->rewrite)-24); - - /* fix the ing outer-IP checksum */ - sum0 = ip0->checksum; - /* old_l0 always 0, see the rewrite setup */ - new_l0 = - clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)); - - sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t, - length /* changed member */); - ip0->checksum = ip_csum_fold (sum0); - ip0->length = new_l0; - - /* Reset to look up tunnel partner in the configured FIB */ - vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index; - vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0; - pkts_encapsulated ++; - - len0 = vlib_buffer_length_in_chain(vm, b0); - stats_n_packets += 1; - stats_n_bytes += len0; - - /* Batch stats increment on the same vxlan tunnel so counter is not - incremented per packet. Note stats are still incremented for deleted - and admin-down tunnel where packets are dropped. It is not worthwhile - to check for this rare case and affect normal path performance. */ - if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index)) { - stats_n_packets -= 1; - stats_n_bytes -= len0; - if (stats_n_packets) - vlib_increment_combined_counter( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); - stats_n_packets = 1; - stats_n_bytes = len0; - stats_sw_if_index = sw_if_index0; - } - if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) - { - nsh_gre_encap_trace_t *tr = - vlib_add_trace (vm, node, b0, sizeof (*tr)); - tr->tunnel_index = t0 - ngm->tunnels; - } - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - bi0, next0); - } - - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - } - vlib_node_increment_counter (vm, node->node_index, - NSH_GRE_ENCAP_ERROR_ENCAPSULATED, - pkts_encapsulated); - /* Increment any remaining batch stats */ - if (stats_n_packets) { - vlib_increment_combined_counter( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, cpu_index, - stats_sw_if_index, stats_n_packets, stats_n_bytes); - node->runtime_data[0] = stats_sw_if_index; - } - - return from_frame->n_vectors; -} - -VLIB_REGISTER_NODE (nsh_gre_encap_node) = { - .function = nsh_gre_encap, - .name = "nsh-gre-encap", - .vector_size = sizeof (u32), - .format_trace = format_nsh_gre_encap_trace, - .type = VLIB_NODE_TYPE_INTERNAL, - - .n_errors = ARRAY_LEN(nsh_gre_encap_error_strings), - .error_strings = nsh_gre_encap_error_strings, - - .n_next_nodes = NSH_GRE_ENCAP_N_NEXT, - - // add dispositions here - .next_nodes = { - [NSH_GRE_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup", - [NSH_GRE_ENCAP_NEXT_DROP] = "error-drop", - }, -}; diff --git a/vnet/vnet/nsh-gre/nsh_gre.c b/vnet/vnet/nsh-gre/nsh_gre.c deleted file mode 100644 index e75ed9dd862..00000000000 --- a/vnet/vnet/nsh-gre/nsh_gre.c +++ /dev/null @@ -1,552 +0,0 @@ -/* - * Copyright (c) 2015 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include -#include -#include -#include -#include -#include -#include - -nsh_gre_main_t nsh_gre_main; - -static u8 * format_decap_next (u8 * s, va_list * args) -{ - u32 next_index = va_arg (*args, u32); - - switch (next_index) - { - case NSH_GRE_INPUT_NEXT_DROP: - return format (s, "drop"); - case NSH_GRE_INPUT_NEXT_IP4_INPUT: - return format (s, "ip4"); - case NSH_GRE_INPUT_NEXT_IP6_INPUT: - return format (s, "ip6"); - case NSH_GRE_INPUT_NEXT_ETHERNET_INPUT: - return format (s, "ethernet"); - default: - return format (s, "index %d", next_index); - } - return s; -} - - -u8 * format_nsh_gre_tunnel (u8 * s, va_list * args) -{ - nsh_gre_tunnel_t * t = va_arg (*args, nsh_gre_tunnel_t *); - nsh_gre_main_t * ngm = &nsh_gre_main; - - s = format (s, "[%d] %U (src) %U (dst) fibs: (encap %d, decap %d)", - t - ngm->tunnels, - format_ip4_address, &t->src, - format_ip4_address, &t->dst, - t->encap_fib_index, - t->decap_fib_index); - - s = format (s, " decap-next %U\n", format_decap_next, t->decap_next_index); - - s = format (s, " ver %d ", (t->nsh_hdr.ver_o_c>>6)); - if (t->nsh_hdr.ver_o_c & NSH_O_BIT) - s = format (s, "O-set "); - - if (t->nsh_hdr.ver_o_c & NSH_C_BIT) - s = format (s, "C-set "); - - s = format (s, "len %d (%d bytes) md_type %d next_protocol %d\n", - t->nsh_hdr.length, t->nsh_hdr.length * 4, t->nsh_hdr.md_type, t->nsh_hdr.next_protocol); - - s = format (s, " service path %d service index %d\n", - (t->nsh_hdr.spi_si>>NSH_SPI_SHIFT) & NSH_SPI_MASK, - t->nsh_hdr.spi_si & NSH_SINDEX_MASK); - - s = format (s, " c1 %d c2 %d c3 %d c4 %d\n", - t->nsh_hdr.c1, t->nsh_hdr.c2, t->nsh_hdr.c3, t->nsh_hdr.c4); - - return s; -} - -static u8 * format_nsh_gre_name (u8 * s, va_list * args) -{ - nsh_gre_main_t * ngm = &nsh_gre_main; - u32 i = va_arg (*args, u32); - u32 show_dev_instance = ~0; - - if (i < vec_len (ngm->dev_inst_by_real)) - show_dev_instance = ngm->dev_inst_by_real[i]; - - if (show_dev_instance != ~0) - i = show_dev_instance; - - return format (s, "nsh_gre_tunnel%d", i); -} - -static int nsh_gre_name_renumber (vnet_hw_interface_t * hi, - u32 new_dev_instance) -{ - nsh_gre_main_t * ngm = &nsh_gre_main; - - vec_validate_init_empty (ngm->dev_inst_by_real, hi->dev_instance, ~0); - - ngm->dev_inst_by_real [hi->dev_instance] = new_dev_instance; - - return 0; -} - -static uword dummy_interface_tx (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) -{ - clib_warning ("you shouldn't be here, leaking buffers..."); - return frame->n_vectors; -} - -VNET_DEVICE_CLASS (nsh_gre_device_class,static) = { - .name = "NSH_GRE", - .format_device_name = format_nsh_gre_name, - .format_tx_trace = format_nsh_gre_encap_trace, - .tx_function = dummy_interface_tx, - .name_renumber = nsh_gre_name_renumber, -}; - -static uword dummy_set_rewrite (vnet_main_t * vnm, - u32 sw_if_index, - u32 l3_type, - void * dst_address, - void * rewrite, - uword max_rewrite_bytes) -{ - return 0; -} - -static u8 * format_nsh_gre_header_with_length (u8 * s, va_list * args) -{ - u32 dev_instance = va_arg (*args, u32); - s = format (s, "unimplemented dev %u", dev_instance); - return s; -} - -VNET_HW_INTERFACE_CLASS (nsh_gre_hw_class) = { - .name = "NSH_GRE", - .format_header = format_nsh_gre_header_with_length, - .set_rewrite = dummy_set_rewrite, -}; - -#define foreach_copy_field \ -_(src.as_u32) \ -_(dst.as_u32) \ -_(encap_fib_index) \ -_(decap_fib_index) \ -_(decap_next_index) - - -#define foreach_copy_nshhdr_field \ -_(ver_o_c) \ -_(length) \ -_(md_type) \ -_(next_protocol) \ -_(spi_si) \ -_(c1) \ -_(c2) \ -_(c3) \ -_(c4) \ -_(tlvs) - -#define foreach_32bit_field \ -_(spi_si) \ -_(c1) \ -_(c2) \ -_(c3) \ -_(c4) - -static int nsh_gre_rewrite (nsh_gre_tunnel_t * t) -{ - u8 *rw = 0; - ip4_header_t * ip0; - nsh_header_t * nsh0; - ip4_gre_and_nsh_header_t * h0; - int len; - - len = sizeof (*h0) + vec_len(t->nsh_hdr.tlvs)*4; - - vec_validate_aligned (rw, len-1, CLIB_CACHE_LINE_BYTES); - - h0 = (ip4_gre_and_nsh_header_t *) rw; - - /* Fixed portion of the (outer) ip4 header */ - ip0 = &h0->ip4; - ip0->ip_version_and_header_length = 0x45; - ip0->ttl = 254; - ip0->protocol = IP_PROTOCOL_GRE; - /* we fix up the ip4 header length and checksum after-the-fact */ - ip0->src_address.as_u32 = t->src.as_u32; - ip0->dst_address.as_u32 = t->dst.as_u32; - ip0->checksum = ip4_header_checksum (ip0); - - /* GRE header, zero execpt for the NSH ethertype */ - h0->gre.protocol = clib_host_to_net_u16(GRE_PROTOCOL_nsh); - - /* NSH header */ - nsh0 = &h0->nsh; - nsh0->ver_o_c = t->nsh_hdr.ver_o_c; - nsh0->md_type = t->nsh_hdr.md_type; - nsh0->next_protocol = t->nsh_hdr.next_protocol; - nsh0->spi_si = t->nsh_hdr.spi_si; - nsh0->c1 = t->nsh_hdr.c1; - nsh0->c2 = t->nsh_hdr.c2; - nsh0->c3 = t->nsh_hdr.c3; - nsh0->c4 = t->nsh_hdr.c4; - - /* Endian swap 32-bit fields */ -#define _(x) nsh0->x = clib_host_to_net_u32(nsh0->x); - foreach_32bit_field; -#undef _ - - /* fix nsh header length */ - t->nsh_hdr.length = 6 + vec_len(t->nsh_hdr.tlvs); - nsh0->length = t->nsh_hdr.length; - - /* Copy any TLVs */ - if (vec_len(t->nsh_hdr.tlvs)) - clib_memcpy (nsh0->tlvs, t->nsh_hdr.tlvs, 4*vec_len(t->nsh_hdr.tlvs)); - - t->rewrite = rw; - return (0); -} - -int vnet_nsh_gre_add_del_tunnel (vnet_nsh_gre_add_del_tunnel_args_t *a, - u32 * sw_if_indexp) -{ - nsh_gre_main_t * ngm = &nsh_gre_main; - nsh_gre_tunnel_t *t = 0; - vnet_main_t * vnm = ngm->vnet_main; - vnet_hw_interface_t * hi; - uword * p; - u32 hw_if_index = ~0; - u32 sw_if_index = ~0; - int rv; - u64 key; - u32 spi_si_net_byte_order; - - spi_si_net_byte_order = clib_host_to_net_u32(a->nsh_hdr.spi_si); - - key = (((u64)(a->src.as_u32))<<32) | spi_si_net_byte_order; - - p = hash_get (ngm->nsh_gre_tunnel_by_src_address, key); - - if (a->is_add) - { - /* adding a tunnel: tunnel must not already exist */ - if (p) - return VNET_API_ERROR_INVALID_VALUE; - - if (a->decap_next_index >= NSH_GRE_INPUT_N_NEXT) - return VNET_API_ERROR_INVALID_DECAP_NEXT; - - pool_get_aligned (ngm->tunnels, t, CLIB_CACHE_LINE_BYTES); - memset (t, 0, sizeof (*t)); - - /* copy from arg structure */ -#define _(x) t->x = a->x; - foreach_copy_field; -#undef _ - - /* copy from arg structure */ -#define _(x) t->nsh_hdr.x = a->nsh_hdr.x; - foreach_copy_nshhdr_field; -#undef _ - - rv = nsh_gre_rewrite (t); - - if (rv) - { - pool_put (ngm->tunnels, t); - return rv; - } - - hash_set (ngm->nsh_gre_tunnel_by_src_address, key, t - ngm->tunnels); - - if (vec_len (ngm->free_nsh_gre_tunnel_hw_if_indices) > 0) - { - hw_if_index = ngm->free_nsh_gre_tunnel_hw_if_indices - [vec_len (ngm->free_nsh_gre_tunnel_hw_if_indices)-1]; - _vec_len (ngm->free_nsh_gre_tunnel_hw_if_indices) -= 1; - - hi = vnet_get_hw_interface (vnm, hw_if_index); - hi->dev_instance = t - ngm->tunnels; - hi->hw_instance = hi->dev_instance; - } - else - { - hw_if_index = vnet_register_interface - (vnm, nsh_gre_device_class.index, t - ngm->tunnels, - nsh_gre_hw_class.index, t - ngm->tunnels); - hi = vnet_get_hw_interface (vnm, hw_if_index); - hi->output_node_index = nsh_gre_encap_node.index; - } - - t->hw_if_index = hw_if_index; - t->sw_if_index = sw_if_index = hi->sw_if_index; - - vnet_sw_interface_set_flags (vnm, hi->sw_if_index, - VNET_SW_INTERFACE_FLAG_ADMIN_UP); - } - else - { - /* deleting a tunnel: tunnel must exist */ - if (!p) - return VNET_API_ERROR_NO_SUCH_ENTRY; - - t = pool_elt_at_index (ngm->tunnels, p[0]); - - vnet_sw_interface_set_flags (vnm, t->sw_if_index, 0 /* down */); - vec_add1 (ngm->free_nsh_gre_tunnel_hw_if_indices, t->hw_if_index); - - hash_unset (ngm->nsh_gre_tunnel_by_src_address, key); - vec_free (t->rewrite); - pool_put (ngm->tunnels, t); - } - - if (sw_if_indexp) - *sw_if_indexp = sw_if_index; - - return 0; -} - -static u32 fib_index_from_fib_id (u32 fib_id) -{ - ip4_main_t * im = &ip4_main; - uword * p; - - p = hash_get (im->fib_index_by_table_id, fib_id); - if (!p) - return ~0; - - return p[0]; -} - -static uword unformat_decap_next (unformat_input_t * input, va_list * args) -{ - u32 * result = va_arg (*args, u32 *); - u32 tmp; - - if (unformat (input, "drop")) - *result = NSH_GRE_INPUT_NEXT_DROP; - else if (unformat (input, "ip4")) - *result = NSH_GRE_INPUT_NEXT_IP4_INPUT; - else if (unformat (input, "ip6")) - *result = NSH_GRE_INPUT_NEXT_IP6_INPUT; - else if (unformat (input, "ethernet")) - *result = NSH_GRE_INPUT_NEXT_ETHERNET_INPUT; - else if (unformat (input, "%d", &tmp)) - *result = tmp; - else - return 0; - return 1; -} - -static clib_error_t * -nsh_gre_add_del_tunnel_command_fn (vlib_main_t * vm, - unformat_input_t * input, - vlib_cli_command_t * cmd) -{ - unformat_input_t _line_input, * line_input = &_line_input; - ip4_address_t src, dst; - u8 is_add = 1; - u8 src_set = 0; - u8 dst_set = 0; - u32 encap_fib_index = 0; - u32 decap_fib_index = 0; - u8 ver_o_c = 0; - u8 length = 0; - u8 md_type = 0; - u8 next_protocol = 1; /* ip4 */ - u32 spi; - u8 spi_set = 0; - u32 si; - u8 si_set = 0; - u32 spi_si; - u32 c1 = 0; - u32 c2 = 0; - u32 c3 = 0; - u32 c4 = 0; - u32 decap_next_index = 1; /* ip4_input */ - u32 *tlvs = 0; - u32 tmp; - int rv; - vnet_nsh_gre_add_del_tunnel_args_t _a, * a = &_a; - - /* Get a line of input. */ - if (! unformat_user (input, unformat_line_input, line_input)) - return 0; - - while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { - if (unformat (line_input, "del")) - is_add = 0; - else if (unformat (line_input, "src %U", - unformat_ip4_address, &src)) - src_set = 1; - else if (unformat (line_input, "dst %U", - unformat_ip4_address, &dst)) - dst_set = 1; - else if (unformat (line_input, "encap-vrf-id %d", &tmp)) - { - encap_fib_index = fib_index_from_fib_id (tmp); - if (encap_fib_index == ~0) - return clib_error_return (0, "nonexistent encap fib id %d", tmp); - } - else if (unformat (line_input, "decap-vrf-id %d", &tmp)) - { - decap_fib_index = fib_index_from_fib_id (tmp); - if (decap_fib_index == ~0) - return clib_error_return (0, "nonexistent decap fib id %d", tmp); - } - else if (unformat (line_input, "decap-next %U", unformat_decap_next, - &decap_next_index)) - ; - else if (unformat (line_input, "version %d", &tmp)) - ver_o_c |= (tmp & 3) << 6; - else if (unformat (line_input, "o-bit %d", &tmp)) - ver_o_c |= (tmp & 1) << 5; - else if (unformat (line_input, "c-bit %d", &tmp)) - ver_o_c |= (tmp & 1) << 4; - else if (unformat (line_input, "md-type %d", &tmp)) - md_type = tmp; - else if (unformat(line_input, "next-ip4")) - next_protocol = 1; - else if (unformat(line_input, "next-ip6")) - next_protocol = 2; - else if (unformat(line_input, "next-ethernet")) - next_protocol = 3; - else if (unformat (line_input, "c1 %d", &c1)) - ; - else if (unformat (line_input, "c2 %d", &c2)) - ; - else if (unformat (line_input, "c3 %d", &c3)) - ; - else if (unformat (line_input, "c4 %d", &c4)) - ; - else if (unformat (line_input, "spi %d", &spi)) - spi_set = 1; - else if (unformat (line_input, "si %d", &si)) - si_set = 1; - else if (unformat (line_input, "tlv %x")) - vec_add1 (tlvs, tmp); - else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); - } - - unformat_free (line_input); - - if (src_set == 0) - return clib_error_return (0, "tunnel src address not specified"); - - if (dst_set == 0) - return clib_error_return (0, "tunnel dst address not specified"); - - if (spi_set == 0) - return clib_error_return (0, "spi not specified"); - - if (si_set == 0) - return clib_error_return (0, "si not specified"); - - spi_si = (spi<<8) | si; - - memset (a, 0, sizeof (*a)); - - a->is_add = is_add; - -#define _(x) a->x = x; - foreach_copy_field; -#undef _ - - /* copy from arg structure */ -#define _(x) a->nsh_hdr.x = x; - foreach_copy_nshhdr_field; -#undef _ - - rv = vnet_nsh_gre_add_del_tunnel (a, 0 /* hw_if_indexp */); - - switch(rv) - { - case 0: - break; - case VNET_API_ERROR_INVALID_DECAP_NEXT: - return clib_error_return (0, "invalid decap-next..."); - - case VNET_API_ERROR_TUNNEL_EXIST: - return clib_error_return (0, "tunnel already exists..."); - - case VNET_API_ERROR_NO_SUCH_ENTRY: - return clib_error_return (0, "session does not exist..."); - - default: - return clib_error_return - (0, "vnet_nsh_gre_add_del_tunnel returned %d", rv); - } - - return 0; -} - -VLIB_CLI_COMMAND (create_nsh_gre_tunnel_command, static) = { - .path = "nsh gre tunnel", - .short_help = - "nsh gre tunnel src dst " - " c1 c2 c3 c4 spi si \n" - " [encap-vrf-id ] [decap-vrf-id ] [o-bit <1|0>] [c-bit <1|0>]\n" - " [md-type ][next-ip4][next-ip6][next-ethernet]\n" - " [tlv ][decap-next [ip4|ip6|ethernet]][del]\n", - .function = nsh_gre_add_del_tunnel_command_fn, -}; - -static clib_error_t * -show_nsh_gre_tunnel_command_fn (vlib_main_t * vm, - unformat_input_t * input, - vlib_cli_command_t * cmd) -{ - nsh_gre_main_t * ngm = &nsh_gre_main; - nsh_gre_tunnel_t * t; - - if (pool_elts (ngm->tunnels) == 0) - vlib_cli_output (vm, "No nsh-gre tunnels configured..."); - - pool_foreach (t, ngm->tunnels, - ({ - vlib_cli_output (vm, "%U", format_nsh_gre_tunnel, t); - })); - - return 0; -} - -VLIB_CLI_COMMAND (show_nsh_gre_tunnel_command, static) = { - .path = "show nsh gre tunnel", - .function = show_nsh_gre_tunnel_command_fn, -}; - -clib_error_t *nsh_gre_init (vlib_main_t *vm) -{ - nsh_gre_main_t *ngm = &nsh_gre_main; - - ngm->vnet_main = vnet_get_main(); - ngm->vlib_main = vm; - - ngm->nsh_gre_tunnel_by_src_address = hash_create (0, sizeof (uword)); - gre_register_input_protocol (vm, GRE_PROTOCOL_nsh, - nsh_gre_input_node.index); - return 0; -} - -VLIB_INIT_FUNCTION(nsh_gre_init); - diff --git a/vnet/vnet/nsh-gre/nsh_gre.h b/vnet/vnet/nsh-gre/nsh_gre.h deleted file mode 100644 index abe115580cb..00000000000 --- a/vnet/vnet/nsh-gre/nsh_gre.h +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright (c) 2015 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef included_vnet_nsh_gre_h -#define included_vnet_nsh_gre_h - -#include -#include -#include -#include - -typedef CLIB_PACKED (struct { - ip4_header_t ip4; /* 20 bytes */ - gre_header_t gre; /* 4 bytes */ - nsh_header_t nsh; /* 28 bytes */ -}) ip4_gre_and_nsh_header_t; - -typedef struct { - /* Rewrite string. $$$$ embed vnet_rewrite header */ - u8 * rewrite; - - /* tunnel src and dst addresses */ - ip4_address_t src; - ip4_address_t dst; - - /* FIB indices */ - u32 encap_fib_index; /* tunnel partner lookup here */ - u32 decap_fib_index; /* inner IP lookup here */ - - /* when decapsulating, send pkts here */ - u32 decap_next_index; - - /* vnet intfc hw/sw_if_index */ - u32 hw_if_index; - u32 sw_if_index; - - /* NSH header fields in HOST byte order */ - nsh_header_t nsh_hdr; - -} nsh_gre_tunnel_t; - -#define foreach_nsh_gre_input_next \ - _ (DROP, "error-drop") \ - _ (IP4_INPUT, "ip4-input") \ - _ (IP6_INPUT, "ip6-input") \ - _ (ETHERNET_INPUT, "ethernet-input") - -typedef enum { -#define _(s,n) NSH_GRE_INPUT_NEXT_##s, - foreach_nsh_gre_input_next -#undef _ - NSH_GRE_INPUT_N_NEXT, -} nsh_gre_input_next_t; - -typedef enum { -#define nsh_gre_error(n,s) NSH_GRE_ERROR_##n, -#include -#undef nsh_gre_error - NSH_GRE_N_ERROR, -} nsh_gre_input_error_t; - -typedef struct { - /* vector of encap tunnel instances */ - nsh_gre_tunnel_t *tunnels; - - /* lookup tunnel by tunnel partner src address */ - uword * nsh_gre_tunnel_by_src_address; - - /* Free vlib hw_if_indices */ - u32 * free_nsh_gre_tunnel_hw_if_indices; - - /* show device instance by real device instance */ - u32 * dev_inst_by_real; - - /* convenience */ - vlib_main_t * vlib_main; - vnet_main_t * vnet_main; -} nsh_gre_main_t; - -nsh_gre_main_t nsh_gre_main; - -extern vlib_node_registration_t nsh_gre_input_node; -extern vlib_node_registration_t nsh_gre_encap_node; - -u8 * format_nsh_gre_encap_trace (u8 * s, va_list * args); - -typedef struct { - u8 is_add; - ip4_address_t src, dst; - u32 encap_fib_index; - u32 decap_fib_index; - u32 decap_next_index; - nsh_header_t nsh_hdr; -} vnet_nsh_gre_add_del_tunnel_args_t; - -int vnet_nsh_gre_add_del_tunnel (vnet_nsh_gre_add_del_tunnel_args_t *a, - u32 * sw_if_indexp); - -#endif /* included_vnet_nsh_gre_h */ diff --git a/vnet/vnet/nsh-vxlan-gpe/decap.c b/vnet/vnet/nsh-vxlan-gpe/decap.c deleted file mode 100644 index 76003e6003f..00000000000 --- a/vnet/vnet/nsh-vxlan-gpe/decap.c +++ /dev/null @@ -1,537 +0,0 @@ -/* - * nsh.c: nsh packet processing - * - * Copyright (c) 2013 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include - -vlib_node_registration_t nsh_vxlan_gpe_input_node; - -/* From nsh-gre */ -u8 * format_nsh_header_with_length (u8 * s, va_list * args); - -typedef struct { - u32 next_index; - u32 tunnel_index; - u32 error; - nsh_header_t h; -} nsh_vxlan_gpe_rx_trace_t; - -static u8 * format_nsh_vxlan_gpe_rx_trace (u8 * s, va_list * args) -{ - CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); - CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); - nsh_vxlan_gpe_rx_trace_t * t = va_arg (*args, nsh_vxlan_gpe_rx_trace_t *); - - if (t->tunnel_index != ~0) - { - s = format (s, "NSH-VXLAN: tunnel %d next %d error %d", t->tunnel_index, - t->next_index, t->error); - } - else - { - s = format (s, "NSH-VXLAN: no tunnel next %d error %d\n", t->next_index, - t->error); - } - s = format (s, "\n %U", format_nsh_header_with_length, &t->h, - (u32) sizeof (t->h) /* max size */); - return s; -} - -static uword -nsh_vxlan_gpe_input (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame) -{ - u32 n_left_from, next_index, * from, * to_next; - nsh_vxlan_gpe_main_t * ngm = &nsh_vxlan_gpe_main; - vnet_main_t * vnm = ngm->vnet_main; - vnet_interface_main_t * im = &vnm->interface_main; - u32 last_tunnel_index = ~0; - nsh_vxlan_gpe_tunnel_key_t last_key; - u32 pkts_decapsulated = 0; - u32 cpu_index = os_get_cpu_number(); - u32 stats_sw_if_index, stats_n_packets, stats_n_bytes; - - memset (&last_key, 0xff, sizeof (last_key)); - - from = vlib_frame_vector_args (from_frame); - n_left_from = from_frame->n_vectors; - - next_index = node->cached_next_index; - stats_sw_if_index = node->runtime_data[0]; - stats_n_packets = stats_n_bytes = 0; - - while (n_left_from > 0) - { - u32 n_left_to_next; - - vlib_get_next_frame (vm, node, next_index, - to_next, n_left_to_next); - - while (n_left_from >= 4 && n_left_to_next >= 2) - { - u32 bi0, bi1; - vlib_buffer_t * b0, * b1; - u32 next0, next1; - ip4_vxlan_gpe_and_nsh_header_t * iuvn0, * iuvn1; - uword * p0, * p1; - u32 tunnel_index0, tunnel_index1; - nsh_vxlan_gpe_tunnel_t * t0, * t1; - nsh_vxlan_gpe_tunnel_key_t key0, key1; - u32 error0, error1; - u32 sw_if_index0, sw_if_index1, len0, len1; - - /* Prefetch next iteration. */ - { - vlib_buffer_t * p2, * p3; - - p2 = vlib_get_buffer (vm, from[2]); - p3 = vlib_get_buffer (vm, from[3]); - - vlib_prefetch_buffer_header (p2, LOAD); - vlib_prefetch_buffer_header (p3, LOAD); - - CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); - } - - bi0 = from[0]; - bi1 = from[1]; - to_next[0] = bi0; - to_next[1] = bi1; - from += 2; - to_next += 2; - n_left_to_next -= 2; - n_left_from -= 2; - - b0 = vlib_get_buffer (vm, bi0); - b1 = vlib_get_buffer (vm, bi1); - - /* udp leaves current_data pointing at the vxlan header */ - vlib_buffer_advance - (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t))); - vlib_buffer_advance - (b1, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t))); - - iuvn0 = vlib_buffer_get_current (b0); - iuvn1 = vlib_buffer_get_current (b1); - - /* pop (ip, udp, vxlan, nsh) */ - vlib_buffer_advance (b0, sizeof (*iuvn0)); - vlib_buffer_advance (b1, sizeof (*iuvn1)); - - tunnel_index0 = ~0; - error0 = 0; - next0 = NSH_VXLAN_GPE_INPUT_NEXT_DROP; - - tunnel_index1 = ~0; - error1 = 0; - next1 = NSH_VXLAN_GPE_INPUT_NEXT_DROP; - - key0.src = iuvn0->ip4.src_address.as_u32; - key0.vni = iuvn0->vxlan.vni_res; - key0.spi_si = iuvn0->nsh.spi_si; - key0.pad = 0; - - if (PREDICT_FALSE ((key0.as_u64[0] != last_key.as_u64[0]) - || (key0.as_u64[1] != last_key.as_u64[1]))) - { - p0 = hash_get_mem (ngm->nsh_vxlan_gpe_tunnel_by_key, &key0); - - if (p0 == 0) - { - error0 = NSH_VXLAN_GPE_ERROR_NO_SUCH_TUNNEL; - goto trace0; - } - - last_key.as_u64[0] = key0.as_u64[0]; - last_key.as_u64[1] = key0.as_u64[1]; - tunnel_index0 = last_tunnel_index = p0[0]; - } - else - tunnel_index0 = last_tunnel_index; - - t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0); - - next0 = t0->decap_next_index; - sw_if_index0 = t0->sw_if_index; - len0 = vlib_buffer_length_in_chain(vm, b0); - - /* Required to make the l2 tag push / pop code work on l2 subifs */ - vnet_update_l2_len (b0); - - if (next0 == NSH_VXLAN_GPE_INPUT_NEXT_NSH_VXLAN_GPE_ENCAP) - { - /* - * Functioning as SFF (ie "half NSH tunnel mode") - * If ingress (we are in decap.c) with NSH header, and 'decap next nsh-vxlan-gpe' then "NSH switch" - * 1. Take DST, remap to SRC, remap other keys in place - * 2. Look up new t0 as per above - * 3. Set sw_if_index[VLIB_TX] to be t0->sw_if_index - */ - uword * next_p0; - nsh_vxlan_gpe_tunnel_t * next_t0; - nsh_vxlan_gpe_tunnel_key_t next_key0; - - next_key0.src = iuvn0->ip4.dst_address.as_u32; - next_key0.vni = iuvn0->vxlan.vni_res; - next_key0.spi_si = iuvn0->nsh.spi_si; - next_key0.pad = 0; - - next_p0 = hash_get_mem (ngm->nsh_vxlan_gpe_tunnel_by_key, &next_key0); - - if (next_p0 == 0) - { - error0 = NSH_VXLAN_GPE_ERROR_NO_SUCH_TUNNEL; - goto trace0; - } - next_t0 = pool_elt_at_index (ngm->tunnels, next_p0[0]); - vnet_buffer(b0)->sw_if_index[VLIB_TX] = next_t0->sw_if_index; - - } - else - { - /* - * ip[46] lookup in the configured FIB - * nsh-vxlan-gpe-encap, here's the encap tunnel sw_if_index - */ - vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index; - } - - pkts_decapsulated++; - stats_n_packets += 1; - stats_n_bytes += len0; - - if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index)) - { - stats_n_packets -= 1; - stats_n_bytes -= len0; - if (stats_n_packets) - vlib_increment_combined_counter( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); - stats_n_packets = 1; - stats_n_bytes = len0; - stats_sw_if_index = sw_if_index0; - } - - trace0: - b0->error = error0 ? node->errors[error0] : 0; - - if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) - { - nsh_vxlan_gpe_rx_trace_t *tr - = vlib_add_trace (vm, node, b0, sizeof (*tr)); - tr->next_index = next0; - tr->error = error0; - tr->tunnel_index = tunnel_index0; - tr->h = iuvn0->nsh; - } - - key1.src = iuvn1->ip4.src_address.as_u32; - key1.vni = iuvn1->vxlan.vni_res; - key1.spi_si = iuvn1->nsh.spi_si; - key1.pad = 0; - - if (PREDICT_FALSE ((key1.as_u64[0] != last_key.as_u64[0]) - || (key1.as_u64[1] != last_key.as_u64[1]))) - { - p1 = hash_get_mem (ngm->nsh_vxlan_gpe_tunnel_by_key, &key1); - - if (p1 == 0) - { - error1 = NSH_VXLAN_GPE_ERROR_NO_SUCH_TUNNEL; - goto trace1; - } - - last_key.as_u64[0] = key1.as_u64[0]; - last_key.as_u64[1] = key1.as_u64[1]; - tunnel_index1 = last_tunnel_index = p1[0]; - } - else - tunnel_index1 = last_tunnel_index; - - t1 = pool_elt_at_index (ngm->tunnels, tunnel_index1); - - next1 = t1->decap_next_index; - sw_if_index1 = t1->sw_if_index; - len1 = vlib_buffer_length_in_chain(vm, b1); - - /* Required to make the l2 tag push / pop code work on l2 subifs */ - vnet_update_l2_len (b1); - - if (next1 == NSH_VXLAN_GPE_INPUT_NEXT_NSH_VXLAN_GPE_ENCAP) - { - /* - * Functioning as SFF (ie "half NSH tunnel mode") - * If ingress (we are in decap.c) with NSH header, and 'decap next nsh-vxlan-gpe' then "NSH switch" - * 1. Take DST, remap to SRC, remap other keys in place - * 2. Look up new t0 as per above - * 3. Set sw_if_index[VLIB_TX] to be t0->sw_if_index - */ - uword * next_p1; - nsh_vxlan_gpe_tunnel_t * next_t1; - nsh_vxlan_gpe_tunnel_key_t next_key1; - - next_key1.src = iuvn1->ip4.dst_address.as_u32; - next_key1.vni = iuvn1->vxlan.vni_res; - next_key1.spi_si = iuvn1->nsh.spi_si; - next_key1.pad = 0; - - next_p1 = hash_get_mem (ngm->nsh_vxlan_gpe_tunnel_by_key, &next_key1); - - if (next_p1 == 0) - { - error1 = NSH_VXLAN_GPE_ERROR_NO_SUCH_TUNNEL; - goto trace1; - } - next_t1 = pool_elt_at_index (ngm->tunnels, next_p1[0]); - vnet_buffer(b1)->sw_if_index[VLIB_TX] = next_t1->sw_if_index; - - } - else - { - /* - * ip[46] lookup in the configured FIB - * nsh-vxlan-gpe-encap, here's the encap tunnel sw_if_index - */ - vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index; - } - - pkts_decapsulated++; - stats_n_packets += 1; - stats_n_bytes += len1; - /* Batch stats increment on the same vxlan tunnel so counter - is not incremented per packet */ - if (PREDICT_FALSE(sw_if_index1 != stats_sw_if_index)) - { - stats_n_packets -= 1; - stats_n_bytes -= len1; - if (stats_n_packets) - vlib_increment_combined_counter( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); - stats_n_packets = 1; - stats_n_bytes = len1; - stats_sw_if_index = sw_if_index1; - } - vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index; - - trace1: - b1->error = error1 ? node->errors[error1] : 0; - - if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) - { - nsh_vxlan_gpe_rx_trace_t *tr - = vlib_add_trace (vm, node, b1, sizeof (*tr)); - tr->next_index = next1; - tr->error = error1; - tr->tunnel_index = tunnel_index1; - tr->h = iuvn1->nsh; - } - - vlib_validate_buffer_enqueue_x2 (vm, node, next_index, - to_next, n_left_to_next, - bi0, bi1, next0, next1); - } - - while (n_left_from > 0 && n_left_to_next > 0) - { - u32 bi0; - vlib_buffer_t * b0; - u32 next0; - ip4_vxlan_gpe_and_nsh_header_t * iuvn0; - uword * p0; - u32 tunnel_index0; - nsh_vxlan_gpe_tunnel_t * t0; - nsh_vxlan_gpe_tunnel_key_t key0; - u32 error0; - u32 sw_if_index0, len0; - - bi0 = from[0]; - to_next[0] = bi0; - from += 1; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - b0 = vlib_get_buffer (vm, bi0); - - /* udp leaves current_data pointing at the vxlan header */ - vlib_buffer_advance - (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t))); - - iuvn0 = vlib_buffer_get_current (b0); - - /* pop (ip, udp, vxlan, nsh) */ - vlib_buffer_advance (b0, sizeof (*iuvn0)); - - tunnel_index0 = ~0; - error0 = 0; - next0 = NSH_VXLAN_GPE_INPUT_NEXT_DROP; - - key0.src = iuvn0->ip4.src_address.as_u32; - key0.vni = iuvn0->vxlan.vni_res; - key0.spi_si = iuvn0->nsh.spi_si; - key0.pad = 0; - - if (PREDICT_FALSE ((key0.as_u64[0] != last_key.as_u64[0]) - || (key0.as_u64[1] != last_key.as_u64[1]))) - { - p0 = hash_get_mem (ngm->nsh_vxlan_gpe_tunnel_by_key, &key0); - - if (p0 == 0) - { - error0 = NSH_VXLAN_GPE_ERROR_NO_SUCH_TUNNEL; - goto trace00; - } - - last_key.as_u64[0] = key0.as_u64[0]; - last_key.as_u64[1] = key0.as_u64[1]; - tunnel_index0 = last_tunnel_index = p0[0]; - } - else - tunnel_index0 = last_tunnel_index; - - t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0); - - next0 = t0->decap_next_index; - sw_if_index0 = t0->sw_if_index; - len0 = vlib_buffer_length_in_chain(vm, b0); - - /* Required to make the l2 tag push / pop code work on l2 subifs */ - vnet_update_l2_len (b0); - - if (next0 == NSH_VXLAN_GPE_INPUT_NEXT_NSH_VXLAN_GPE_ENCAP) - { - /* - * Functioning as SFF (ie "half NSH tunnel mode") - * If ingress (we are in decap.c) with NSH header, and 'decap next nsh-vxlan-gpe' then "NSH switch" - * 1. Take DST, remap to SRC, remap other keys in place - * 2. Look up new t0 as per above - * 3. Set sw_if_index[VLIB_TX] to be t0->sw_if_index - */ - uword * next_p0; - nsh_vxlan_gpe_tunnel_t * next_t0; - nsh_vxlan_gpe_tunnel_key_t next_key0; - - next_key0.src = iuvn0->ip4.dst_address.as_u32; - next_key0.vni = iuvn0->vxlan.vni_res; - next_key0.spi_si = iuvn0->nsh.spi_si; - next_key0.pad = 0; - - next_p0 = hash_get_mem (ngm->nsh_vxlan_gpe_tunnel_by_key, &next_key0); - - if (next_p0 == 0) - { - error0 = NSH_VXLAN_GPE_ERROR_NO_SUCH_TUNNEL; - goto trace00; - } - next_t0 = pool_elt_at_index (ngm->tunnels, next_p0[0]); - vnet_buffer(b0)->sw_if_index[VLIB_TX] = next_t0->sw_if_index; - - } - else - { - /* - * ip[46] lookup in the configured FIB - * nsh-vxlan-gpe-encap, here's the encap tunnel sw_if_index - */ - vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index; - } - - pkts_decapsulated ++; - - stats_n_packets += 1; - stats_n_bytes += len0; - - /* Batch stats increment on the same nsh-vxlan-gpe tunnel so counter - is not incremented per packet */ - if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index)) - { - stats_n_packets -= 1; - stats_n_bytes -= len0; - if (stats_n_packets) - vlib_increment_combined_counter( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); - stats_n_packets = 1; - stats_n_bytes = len0; - stats_sw_if_index = sw_if_index0; - } - - trace00: - b0->error = error0 ? node->errors[error0] : 0; - - if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) - { - nsh_vxlan_gpe_rx_trace_t *tr - = vlib_add_trace (vm, node, b0, sizeof (*tr)); - tr->next_index = next0; - tr->error = error0; - tr->tunnel_index = tunnel_index0; - tr->h = iuvn0->nsh; - } - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - bi0, next0); - } - - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - } - vlib_node_increment_counter (vm, nsh_vxlan_gpe_input_node.index, - NSH_VXLAN_GPE_ERROR_DECAPSULATED, - pkts_decapsulated); - /* Increment any remaining batch stats */ - if (stats_n_packets) - { - vlib_increment_combined_counter( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, cpu_index, - stats_sw_if_index, stats_n_packets, stats_n_bytes); - node->runtime_data[0] = stats_sw_if_index; - } - return from_frame->n_vectors; -} - -static char * nsh_vxlan_gpe_error_strings[] = { -#define nsh_vxlan_gpe_error(n,s) s, -#include -#undef nsh_vxlan_gpe_error -#undef _ -}; - -VLIB_REGISTER_NODE (nsh_vxlan_gpe_input_node) = { - .function = nsh_vxlan_gpe_input, - .name = "nsh-vxlan-gpe-input", - /* Takes a vector of packets. */ - .vector_size = sizeof (u32), - - .n_errors = NSH_VXLAN_GPE_N_ERROR, - .error_strings = nsh_vxlan_gpe_error_strings, - - .n_next_nodes = NSH_VXLAN_GPE_INPUT_N_NEXT, - .next_nodes = { -#define _(s,n) [NSH_VXLAN_GPE_INPUT_NEXT_##s] = n, - foreach_nsh_vxlan_gpe_input_next -#undef _ - }, - - .format_buffer = format_nsh_header_with_length, - .format_trace = format_nsh_vxlan_gpe_rx_trace, - // $$$$ .unformat_buffer = unformat_nsh_vxlan_gpe_header, -}; diff --git a/vnet/vnet/nsh-vxlan-gpe/encap.c b/vnet/vnet/nsh-vxlan-gpe/encap.c deleted file mode 100644 index af520b2f8f0..00000000000 --- a/vnet/vnet/nsh-vxlan-gpe/encap.c +++ /dev/null @@ -1,422 +0,0 @@ -/* - * Copyright (c) 2015 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include -#include -#include -#include -#include -#include - -/* Statistics (not really errors) */ -#define foreach_nsh_vxlan_gpe_encap_error \ -_(ENCAPSULATED, "good packets encapsulated") - -static char * nsh_vxlan_gpe_encap_error_strings[] = { -#define _(sym,string) string, - foreach_nsh_vxlan_gpe_encap_error -#undef _ -}; - -typedef enum { -#define _(sym,str) NSH_VXLAN_GPE_ENCAP_ERROR_##sym, - foreach_nsh_vxlan_gpe_encap_error -#undef _ - NSH_VXLAN_GPE_ENCAP_N_ERROR, -} nsh_vxlan_gpe_encap_error_t; - -typedef enum { - NSH_VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP, - NSH_VXLAN_GPE_ENCAP_NEXT_DROP, - NSH_VXLAN_GPE_ENCAP_N_NEXT, -} nsh_vxlan_gpe_encap_next_t; - -typedef struct { - u32 tunnel_index; -} nsh_vxlan_gpe_encap_trace_t; - -u8 * format_nsh_vxlan_gpe_encap_trace (u8 * s, va_list * args) -{ - CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); - CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); - nsh_vxlan_gpe_encap_trace_t * t - = va_arg (*args, nsh_vxlan_gpe_encap_trace_t *); - - s = format (s, "NSH-VXLAN-ENCAP: tunnel %d", t->tunnel_index); - return s; -} - -#define foreach_fixed_header_offset \ -_(0) _(1) _(2) _(3) _(4) _(5) _(6) - -static uword -nsh_vxlan_gpe_encap (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame) -{ - u32 n_left_from, next_index, * from, * to_next; - nsh_vxlan_gpe_main_t * ngm = &nsh_vxlan_gpe_main; - vnet_main_t * vnm = ngm->vnet_main; - vnet_interface_main_t * im = &vnm->interface_main; - u32 pkts_encapsulated = 0; - u16 old_l0 = 0, old_l1 = 0; - u32 cpu_index = os_get_cpu_number(); - u32 stats_sw_if_index, stats_n_packets, stats_n_bytes; - - from = vlib_frame_vector_args (from_frame); - n_left_from = from_frame->n_vectors; - - next_index = node->cached_next_index; - stats_sw_if_index = node->runtime_data[0]; - stats_n_packets = stats_n_bytes = 0; - - while (n_left_from > 0) - { - u32 n_left_to_next; - - vlib_get_next_frame (vm, node, next_index, - to_next, n_left_to_next); - - while (n_left_from >= 4 && n_left_to_next >= 2) - { - u32 bi0, bi1; - vlib_buffer_t * b0, * b1; - u32 next0 = NSH_VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP; - u32 next1 = NSH_VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP; - u32 sw_if_index0, sw_if_index1, len0, len1; - vnet_hw_interface_t * hi0, * hi1; - ip4_header_t * ip0, * ip1; - udp_header_t * udp0, * udp1; - u64 * copy_src0, * copy_dst0; - u64 * copy_src1, * copy_dst1; - u32 * copy_src_last0, * copy_dst_last0; - u32 * copy_src_last1, * copy_dst_last1; - nsh_vxlan_gpe_tunnel_t * t0, * t1; - u16 new_l0, new_l1; - ip_csum_t sum0, sum1; - - /* Prefetch next iteration. */ - { - vlib_buffer_t * p2, * p3; - - p2 = vlib_get_buffer (vm, from[2]); - p3 = vlib_get_buffer (vm, from[3]); - - vlib_prefetch_buffer_header (p2, LOAD); - vlib_prefetch_buffer_header (p3, LOAD); - - CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); - } - - bi0 = from[0]; - bi1 = from[1]; - to_next[0] = bi0; - to_next[1] = bi1; - from += 2; - to_next += 2; - n_left_to_next -= 2; - n_left_from -= 2; - - b0 = vlib_get_buffer (vm, bi0); - b1 = vlib_get_buffer (vm, bi1); - - /* 1-wide cache? */ - sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX]; - sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX]; - hi0 = vnet_get_sup_hw_interface - (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]); - hi1 = vnet_get_sup_hw_interface - (vnm, vnet_buffer(b1)->sw_if_index[VLIB_TX]); - - t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance); - t1 = pool_elt_at_index (ngm->tunnels, hi1->dev_instance); - - ASSERT(vec_len(t0->rewrite) >= 24); - ASSERT(vec_len(t1->rewrite) >= 24); - - /* Apply the rewrite string. $$$$ vnet_rewrite? */ - vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite)); - vlib_buffer_advance (b1, -(word)_vec_len(t1->rewrite)); - - ip0 = vlib_buffer_get_current(b0); - ip1 = vlib_buffer_get_current(b1); - /* Copy the fixed header */ - copy_dst0 = (u64 *) ip0; - copy_src0 = (u64 *) t0->rewrite; - copy_dst1 = (u64 *) ip1; - copy_src1 = (u64 *) t1->rewrite; - - ASSERT (sizeof (ip4_vxlan_gpe_and_nsh_header_t) == 60); - - /* Copy first 56 octets 8-bytes at a time */ -#define _(offs) copy_dst0[offs] = copy_src0[offs]; - foreach_fixed_header_offset; -#undef _ -#define _(offs) copy_dst1[offs] = copy_src1[offs]; - foreach_fixed_header_offset; -#undef _ - - /* Last 4 octets. Hopefully gcc will be our friend */ - copy_dst_last0 = (u32 *)(©_dst0[7]); - copy_src_last0 = (u32 *)(©_src0[7]); - copy_dst_last1 = (u32 *)(©_dst1[7]); - copy_src_last1 = (u32 *)(©_src1[7]); - - copy_dst_last0[0] = copy_src_last0[0]; - copy_dst_last1[0] = copy_src_last1[0]; - - /* If there are TLVs to copy, do so */ - if (PREDICT_FALSE (_vec_len(t0->rewrite) > 64)) - clib_memcpy (©_dst0[3], t0->rewrite + 64 , - _vec_len (t0->rewrite)-64); - - if (PREDICT_FALSE (_vec_len(t1->rewrite) > 64)) - clib_memcpy (©_dst0[3], t1->rewrite + 64 , - _vec_len (t1->rewrite)-64); - - /* fix the ing outer-IP checksum */ - sum0 = ip0->checksum; - /* old_l0 always 0, see the rewrite setup */ - new_l0 = - clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)); - - sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t, - length /* changed member */); - ip0->checksum = ip_csum_fold (sum0); - ip0->length = new_l0; - - sum1 = ip1->checksum; - /* old_l1 always 0, see the rewrite setup */ - new_l1 = - clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)); - - sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t, - length /* changed member */); - ip1->checksum = ip_csum_fold (sum1); - ip1->length = new_l1; - - /* Fix UDP length */ - udp0 = (udp_header_t *)(ip0+1); - new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) - - sizeof (*ip0)); - udp1 = (udp_header_t *)(ip1+1); - new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1) - - sizeof (*ip1)); - - udp0->length = new_l0; - udp1->length = new_l1; - - /* Reset to look up tunnel partner in the configured FIB */ - vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index; - vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index; - vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0; - vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1; - pkts_encapsulated += 2; - - len0 = vlib_buffer_length_in_chain(vm, b0); - len1 = vlib_buffer_length_in_chain(vm, b0); - stats_n_packets += 2; - stats_n_bytes += len0 + len1; - - /* Batch stats increment on the same vxlan tunnel so counter is not - incremented per packet. Note stats are still incremented for deleted - and admin-down tunnel where packets are dropped. It is not worthwhile - to check for this rare case and affect normal path performance. */ - if (PREDICT_FALSE( - (sw_if_index0 != stats_sw_if_index) - || (sw_if_index1 != stats_sw_if_index))) { - stats_n_packets -= 2; - stats_n_bytes -= len0 + len1; - if (sw_if_index0 == sw_if_index1) { - if (stats_n_packets) - vlib_increment_combined_counter( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); - stats_sw_if_index = sw_if_index0; - stats_n_packets = 2; - stats_n_bytes = len0 + len1; - } else { - vlib_increment_combined_counter( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, sw_if_index0, 1, len0); - vlib_increment_combined_counter( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, sw_if_index1, 1, len1); - } - } - - if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) - { - nsh_vxlan_gpe_encap_trace_t *tr = - vlib_add_trace (vm, node, b0, sizeof (*tr)); - tr->tunnel_index = t0 - ngm->tunnels; - } - - if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) - { - nsh_vxlan_gpe_encap_trace_t *tr = - vlib_add_trace (vm, node, b1, sizeof (*tr)); - tr->tunnel_index = t1 - ngm->tunnels; - } - - vlib_validate_buffer_enqueue_x2 (vm, node, next_index, - to_next, n_left_to_next, - bi0, bi1, next0, next1); - } - - while (n_left_from > 0 && n_left_to_next > 0) - { - u32 bi0; - vlib_buffer_t * b0; - u32 next0 = NSH_VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP; - u32 sw_if_index0, len0; - vnet_hw_interface_t * hi0; - ip4_header_t * ip0; - udp_header_t * udp0; - u64 * copy_src0, * copy_dst0; - u32 * copy_src_last0, * copy_dst_last0; - nsh_vxlan_gpe_tunnel_t * t0; - u16 new_l0; - ip_csum_t sum0; - - bi0 = from[0]; - to_next[0] = bi0; - from += 1; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - b0 = vlib_get_buffer (vm, bi0); - - /* 1-wide cache? */ - sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX]; - hi0 = vnet_get_sup_hw_interface - (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]); - - t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance); - - ASSERT(vec_len(t0->rewrite) >= 24); - - /* Apply the rewrite string. $$$$ vnet_rewrite? */ - vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite)); - - ip0 = vlib_buffer_get_current(b0); - /* Copy the fixed header */ - copy_dst0 = (u64 *) ip0; - copy_src0 = (u64 *) t0->rewrite; - - ASSERT (sizeof (ip4_vxlan_gpe_and_nsh_header_t) == 60); - - /* Copy first 56 octets 8-bytes at a time */ -#define _(offs) copy_dst0[offs] = copy_src0[offs]; - foreach_fixed_header_offset; -#undef _ - /* Last 4 octets. Hopefully gcc will be our friend */ - copy_dst_last0 = (u32 *)(©_dst0[7]); - copy_src_last0 = (u32 *)(©_src0[7]); - - copy_dst_last0[0] = copy_src_last0[0]; - - /* If there are TLVs to copy, do so */ - if (PREDICT_FALSE (_vec_len(t0->rewrite) > 64)) - clib_memcpy (©_dst0[3], t0->rewrite + 64 , - _vec_len (t0->rewrite)-64); - - /* fix the ing outer-IP checksum */ - sum0 = ip0->checksum; - /* old_l0 always 0, see the rewrite setup */ - new_l0 = - clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)); - - sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t, - length /* changed member */); - ip0->checksum = ip_csum_fold (sum0); - ip0->length = new_l0; - - /* Fix UDP length */ - udp0 = (udp_header_t *)(ip0+1); - new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) - - sizeof (*ip0)); - - udp0->length = new_l0; - - /* Reset to look up tunnel partner in the configured FIB */ - vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index; - vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0; - pkts_encapsulated ++; - - len0 = vlib_buffer_length_in_chain(vm, b0); - stats_n_packets += 1; - stats_n_bytes += len0; - - /* Batch stats increment on the same vxlan tunnel so counter is not - incremented per packet. Note stats are still incremented for deleted - and admin-down tunnel where packets are dropped. It is not worthwhile - to check for this rare case and affect normal path performance. */ - if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index)) { - stats_n_packets -= 1; - stats_n_bytes -= len0; - if (stats_n_packets) - vlib_increment_combined_counter( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); - stats_n_packets = 1; - stats_n_bytes = len0; - stats_sw_if_index = sw_if_index0; - } - if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) - { - nsh_vxlan_gpe_encap_trace_t *tr = - vlib_add_trace (vm, node, b0, sizeof (*tr)); - tr->tunnel_index = t0 - ngm->tunnels; - } - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - bi0, next0); - } - - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - } - vlib_node_increment_counter (vm, node->node_index, - NSH_VXLAN_GPE_ENCAP_ERROR_ENCAPSULATED, - pkts_encapsulated); - /* Increment any remaining batch stats */ - if (stats_n_packets) { - vlib_increment_combined_counter( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, cpu_index, - stats_sw_if_index, stats_n_packets, stats_n_bytes); - node->runtime_data[0] = stats_sw_if_index; - } - - return from_frame->n_vectors; -} - -VLIB_REGISTER_NODE (nsh_vxlan_gpe_encap_node) = { - .function = nsh_vxlan_gpe_encap, - .name = "nsh-vxlan-gpe-encap", - .vector_size = sizeof (u32), - .format_trace = format_nsh_vxlan_gpe_encap_trace, - .type = VLIB_NODE_TYPE_INTERNAL, - - .n_errors = ARRAY_LEN(nsh_vxlan_gpe_encap_error_strings), - .error_strings = nsh_vxlan_gpe_encap_error_strings, - - .n_next_nodes = NSH_VXLAN_GPE_ENCAP_N_NEXT, - - .next_nodes = { - [NSH_VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup", - [NSH_VXLAN_GPE_ENCAP_NEXT_DROP] = "error-drop", - }, -}; diff --git a/vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.c b/vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.c deleted file mode 100644 index 88945cd8762..00000000000 --- a/vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.c +++ /dev/null @@ -1,577 +0,0 @@ -/* - * Copyright (c) 2015 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include - -nsh_vxlan_gpe_main_t nsh_vxlan_gpe_main; - -static u8 * format_decap_next (u8 * s, va_list * args) -{ - u32 next_index = va_arg (*args, u32); - - switch (next_index) - { - case NSH_VXLAN_GPE_INPUT_NEXT_DROP: - return format (s, "drop"); - case NSH_VXLAN_GPE_INPUT_NEXT_IP4_INPUT: - return format (s, "ip4"); - case NSH_VXLAN_GPE_INPUT_NEXT_IP6_INPUT: - return format (s, "ip6"); - case NSH_VXLAN_GPE_INPUT_NEXT_ETHERNET_INPUT: - return format (s, "ethernet"); - case NSH_VXLAN_GPE_INPUT_NEXT_NSH_VXLAN_GPE_ENCAP: - return format (s, "nsh-vxlan-gpe"); - default: - return format (s, "unknown %d", next_index); - } - return s; -} - -u8 * format_nsh_vxlan_gpe_tunnel (u8 * s, va_list * args) -{ - nsh_vxlan_gpe_tunnel_t * t = va_arg (*args, nsh_vxlan_gpe_tunnel_t *); - nsh_vxlan_gpe_main_t * ngm = &nsh_vxlan_gpe_main; - - s = format (s, - "[%d] %U (src) %U (dst) fibs: encap %d, decap %d", - t - ngm->tunnels, - format_ip4_address, &t->src, - format_ip4_address, &t->dst, - t->encap_fib_index, - t->decap_fib_index); - s = format (s, " decap next %U\n", format_decap_next, t->decap_next_index); - s = format (s, " vxlan VNI %d ", t->vni); - s = format (s, "nsh ver %d ", (t->nsh_hdr.ver_o_c>>6)); - if (t->nsh_hdr.ver_o_c & NSH_O_BIT) - s = format (s, "O-set "); - - if (t->nsh_hdr.ver_o_c & NSH_C_BIT) - s = format (s, "C-set "); - - s = format (s, "len %d (%d bytes) md_type %d next_protocol %d\n", - t->nsh_hdr.length, t->nsh_hdr.length * 4, t->nsh_hdr.md_type, t->nsh_hdr.next_protocol); - - s = format (s, " service path %d service index %d\n", - (t->nsh_hdr.spi_si>>NSH_SPI_SHIFT) & NSH_SPI_MASK, - t->nsh_hdr.spi_si & NSH_SINDEX_MASK); - - s = format (s, " c1 %d c2 %d c3 %d c4 %d\n", - t->nsh_hdr.c1, t->nsh_hdr.c2, t->nsh_hdr.c3, t->nsh_hdr.c4); - - return s; -} - -static u8 * format_nsh_vxlan_gpe_name (u8 * s, va_list * args) -{ - nsh_vxlan_gpe_main_t * ngm = &nsh_vxlan_gpe_main; - u32 i = va_arg (*args, u32); - u32 show_dev_instance = ~0; - - if (i < vec_len (ngm->dev_inst_by_real)) - show_dev_instance = ngm->dev_inst_by_real[i]; - - if (show_dev_instance != ~0) - i = show_dev_instance; - - return format (s, "nsh_vxlan_gpe_tunnel%d", i); -} - -static int nsh_vxlan_gpe_name_renumber (vnet_hw_interface_t * hi, - u32 new_dev_instance) -{ - nsh_vxlan_gpe_main_t * ngm = &nsh_vxlan_gpe_main; - - vec_validate_init_empty (ngm->dev_inst_by_real, hi->dev_instance, ~0); - - ngm->dev_inst_by_real [hi->dev_instance] = new_dev_instance; - - return 0; -} - -static uword dummy_interface_tx (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) -{ - clib_warning ("you shouldn't be here, leaking buffers..."); - return frame->n_vectors; -} - -VNET_DEVICE_CLASS (nsh_vxlan_gpe_device_class,static) = { - .name = "NSH_VXLAN_GPE", - .format_device_name = format_nsh_vxlan_gpe_name, - .format_tx_trace = format_nsh_vxlan_gpe_encap_trace, - .tx_function = dummy_interface_tx, - .name_renumber = nsh_vxlan_gpe_name_renumber, -}; - -static uword dummy_set_rewrite (vnet_main_t * vnm, - u32 sw_if_index, - u32 l3_type, - void * dst_address, - void * rewrite, - uword max_rewrite_bytes) -{ - return 0; -} - -static u8 * format_nsh_vxlan_gpe_header_with_length (u8 * s, va_list * args) -{ - u32 dev_instance = va_arg (*args, u32); - s = format (s, "unimplemented dev %u", dev_instance); - return s; -} - -VNET_HW_INTERFACE_CLASS (nsh_vxlan_gpe_hw_class) = { - .name = "NSH_VXLAN_GPE", - .format_header = format_nsh_vxlan_gpe_header_with_length, - .set_rewrite = dummy_set_rewrite, -}; - -#define foreach_copy_field \ -_(src.as_u32) \ -_(dst.as_u32) \ -_(vni) \ -_(encap_fib_index) \ -_(decap_fib_index) \ -_(decap_next_index) - - -#define foreach_copy_nshhdr_field \ -_(ver_o_c) \ -_(length) \ -_(md_type) \ -_(next_protocol) \ -_(spi_si) \ -_(c1) \ -_(c2) \ -_(c3) \ -_(c4) \ -_(tlvs) - -#define foreach_32bit_field \ -_(spi_si) \ -_(c1) \ -_(c2) \ -_(c3) \ -_(c4) - -static int nsh_vxlan_gpe_rewrite (nsh_vxlan_gpe_tunnel_t * t) -{ - u8 *rw = 0; - ip4_header_t * ip0; - nsh_header_t * nsh0; - ip4_vxlan_gpe_and_nsh_header_t * h0; - int len; - - len = sizeof (*h0) + vec_len(t->nsh_hdr.tlvs)*4; - - vec_validate_aligned (rw, len-1, CLIB_CACHE_LINE_BYTES); - - h0 = (ip4_vxlan_gpe_and_nsh_header_t *) rw; - - /* Fixed portion of the (outer) ip4 header */ - ip0 = &h0->ip4; - ip0->ip_version_and_header_length = 0x45; - ip0->ttl = 254; - ip0->protocol = IP_PROTOCOL_UDP; - - /* we fix up the ip4 header length and checksum after-the-fact */ - ip0->src_address.as_u32 = t->src.as_u32; - ip0->dst_address.as_u32 = t->dst.as_u32; - ip0->checksum = ip4_header_checksum (ip0); - - /* UDP header, randomize src port on something, maybe? */ - h0->udp.src_port = clib_host_to_net_u16 (4790); - h0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gpe); - - /* VXLAN header. Are we having fun yet? */ - h0->vxlan.flags = VXLAN_GPE_FLAGS_I | VXLAN_GPE_FLAGS_P; - h0->vxlan.ver_res = VXLAN_GPE_VERSION; - h0->vxlan.next_protocol = VXLAN_NEXT_PROTOCOL_NSH; - h0->vxlan.vni_res = clib_host_to_net_u32 (t->vni<<8); - - /* NSH header */ - nsh0 = &h0->nsh; - nsh0->ver_o_c = t->nsh_hdr.ver_o_c; - nsh0->md_type = t->nsh_hdr.md_type; - nsh0->next_protocol = t->nsh_hdr.next_protocol; - nsh0->spi_si = t->nsh_hdr.spi_si; - nsh0->c1 = t->nsh_hdr.c1; - nsh0->c2 = t->nsh_hdr.c2; - nsh0->c3 = t->nsh_hdr.c3; - nsh0->c4 = t->nsh_hdr.c4; - - /* Endian swap 32-bit fields */ -#define _(x) nsh0->x = clib_host_to_net_u32(nsh0->x); - foreach_32bit_field; -#undef _ - - /* fix nsh header length */ - t->nsh_hdr.length = 6 + vec_len(t->nsh_hdr.tlvs); - nsh0->length = t->nsh_hdr.length; - - /* Copy any TLVs */ - if (vec_len(t->nsh_hdr.tlvs)) - clib_memcpy (nsh0->tlvs, t->nsh_hdr.tlvs, 4*vec_len(t->nsh_hdr.tlvs)); - - t->rewrite = rw; - return (0); -} - -int vnet_nsh_vxlan_gpe_add_del_tunnel -(vnet_nsh_vxlan_gpe_add_del_tunnel_args_t *a, u32 * sw_if_indexp) -{ - nsh_vxlan_gpe_main_t * ngm = &nsh_vxlan_gpe_main; - nsh_vxlan_gpe_tunnel_t *t = 0; - vnet_main_t * vnm = ngm->vnet_main; - vnet_hw_interface_t * hi; - uword * p; - u32 hw_if_index = ~0; - u32 sw_if_index = ~0; - int rv; - nsh_vxlan_gpe_tunnel_key_t key, *key_copy; - hash_pair_t *hp; - - key.src = a->dst.as_u32; /* decap src in key is encap dst in config */ - key.vni = clib_host_to_net_u32 (a->vni << 8); - key.spi_si = clib_host_to_net_u32(a->nsh_hdr.spi_si); - key.pad = 0; - - p = hash_get_mem (ngm->nsh_vxlan_gpe_tunnel_by_key, &key); - - if (a->is_add) - { - /* adding a tunnel: tunnel must not already exist */ - if (p) - return VNET_API_ERROR_INVALID_VALUE; - - if (a->decap_next_index >= NSH_VXLAN_GPE_INPUT_N_NEXT) - return VNET_API_ERROR_INVALID_DECAP_NEXT; - - pool_get_aligned (ngm->tunnels, t, CLIB_CACHE_LINE_BYTES); - memset (t, 0, sizeof (*t)); - - /* copy from arg structure */ -#define _(x) t->x = a->x; - foreach_copy_field; -#undef _ - - /* copy from arg structure */ -#define _(x) t->nsh_hdr.x = a->nsh_hdr.x; - foreach_copy_nshhdr_field; -#undef _ - - rv = nsh_vxlan_gpe_rewrite (t); - - if (rv) - { - pool_put (ngm->tunnels, t); - return rv; - } - - key_copy = clib_mem_alloc (sizeof (*key_copy)); - clib_memcpy (key_copy, &key, sizeof (*key_copy)); - - hash_set_mem (ngm->nsh_vxlan_gpe_tunnel_by_key, key_copy, - t - ngm->tunnels); - - if (vec_len (ngm->free_nsh_vxlan_gpe_tunnel_hw_if_indices) > 0) - { - hw_if_index = ngm->free_nsh_vxlan_gpe_tunnel_hw_if_indices - [vec_len (ngm->free_nsh_vxlan_gpe_tunnel_hw_if_indices)-1]; - _vec_len (ngm->free_nsh_vxlan_gpe_tunnel_hw_if_indices) -= 1; - - hi = vnet_get_hw_interface (vnm, hw_if_index); - hi->dev_instance = t - ngm->tunnels; - hi->hw_instance = hi->dev_instance; - } - else - { - hw_if_index = vnet_register_interface - (vnm, nsh_vxlan_gpe_device_class.index, t - ngm->tunnels, - nsh_vxlan_gpe_hw_class.index, t - ngm->tunnels); - hi = vnet_get_hw_interface (vnm, hw_if_index); - hi->output_node_index = nsh_vxlan_gpe_encap_node.index; - } - - t->hw_if_index = hw_if_index; - t->sw_if_index = sw_if_index = hi->sw_if_index; - - vnet_sw_interface_set_flags (vnm, hi->sw_if_index, - VNET_SW_INTERFACE_FLAG_ADMIN_UP); - } - else - { - /* deleting a tunnel: tunnel must exist */ - if (!p) - return VNET_API_ERROR_NO_SUCH_ENTRY; - - t = pool_elt_at_index (ngm->tunnels, p[0]); - - vnet_sw_interface_set_flags (vnm, t->sw_if_index, 0 /* down */); - vec_add1 (ngm->free_nsh_vxlan_gpe_tunnel_hw_if_indices, t->hw_if_index); - - hp = hash_get_pair (ngm->nsh_vxlan_gpe_tunnel_by_key, &key); - key_copy = (void *)(hp->key); - hash_unset_mem (ngm->nsh_vxlan_gpe_tunnel_by_key, &key); - clib_mem_free (key_copy); - - vec_free (t->rewrite); - pool_put (ngm->tunnels, t); - } - - if (sw_if_indexp) - *sw_if_indexp = sw_if_index; - - return 0; -} - -static u32 fib_index_from_fib_id (u32 fib_id) -{ - ip4_main_t * im = &ip4_main; - uword * p; - - p = hash_get (im->fib_index_by_table_id, fib_id); - if (!p) - return ~0; - - return p[0]; -} - -static uword unformat_decap_next (unformat_input_t * input, va_list * args) -{ - u32 * result = va_arg (*args, u32 *); - u32 tmp; - - if (unformat (input, "drop")) - *result = NSH_VXLAN_GPE_INPUT_NEXT_DROP; - else if (unformat (input, "ip4")) - *result = NSH_VXLAN_GPE_INPUT_NEXT_IP4_INPUT; - else if (unformat (input, "ip6")) - *result = NSH_VXLAN_GPE_INPUT_NEXT_IP6_INPUT; - else if (unformat (input, "ethernet")) - *result = NSH_VXLAN_GPE_INPUT_NEXT_ETHERNET_INPUT; - else if (unformat (input, "nsh-vxlan-gpe")) - *result = NSH_VXLAN_GPE_INPUT_NEXT_NSH_VXLAN_GPE_ENCAP; - else if (unformat (input, "%d", &tmp)) - *result = tmp; - else - return 0; - return 1; -} - -static clib_error_t * -nsh_vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm, - unformat_input_t * input, - vlib_cli_command_t * cmd) -{ - unformat_input_t _line_input, * line_input = &_line_input; - ip4_address_t src, dst; - u8 is_add = 1; - u8 src_set = 0; - u8 dst_set = 0; - u32 encap_fib_index = 0; - u32 decap_fib_index = 0; - u8 ver_o_c = 0; - u8 length = 0; - u8 md_type = 0; - u8 next_protocol = 1; /* default: ip4 */ - u32 decap_next_index = NSH_VXLAN_GPE_INPUT_NEXT_IP4_INPUT; - u32 spi; - u8 spi_set = 0; - u32 si; - u32 vni; - u8 vni_set = 0; - u8 si_set = 0; - u32 spi_si; - u32 c1 = 0; - u32 c2 = 0; - u32 c3 = 0; - u32 c4 = 0; - u32 *tlvs = 0; - u32 tmp; - int rv; - vnet_nsh_vxlan_gpe_add_del_tunnel_args_t _a, * a = &_a; - - /* Get a line of input. */ - if (! unformat_user (input, unformat_line_input, line_input)) - return 0; - - while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { - if (unformat (line_input, "del")) - is_add = 0; - else if (unformat (line_input, "src %U", - unformat_ip4_address, &src)) - src_set = 1; - else if (unformat (line_input, "dst %U", - unformat_ip4_address, &dst)) - dst_set = 1; - else if (unformat (line_input, "encap-vrf-id %d", &tmp)) - { - encap_fib_index = fib_index_from_fib_id (tmp); - if (encap_fib_index == ~0) - return clib_error_return (0, "nonexistent encap fib id %d", tmp); - } - else if (unformat (line_input, "decap-vrf-id %d", &tmp)) - { - decap_fib_index = fib_index_from_fib_id (tmp); - if (decap_fib_index == ~0) - return clib_error_return (0, "nonexistent decap fib id %d", tmp); - } - else if (unformat (line_input, "decap-next %U", unformat_decap_next, - &decap_next_index)) - ; - else if (unformat (line_input, "vni %d", &vni)) - vni_set = 1; - else if (unformat (line_input, "version %d", &tmp)) - ver_o_c |= (tmp & 3) << 6; - else if (unformat (line_input, "o-bit %d", &tmp)) - ver_o_c |= (tmp & 1) << 5; - else if (unformat (line_input, "c-bit %d", &tmp)) - ver_o_c |= (tmp & 1) << 4; - else if (unformat (line_input, "md-type %d", &tmp)) - md_type = tmp; - else if (unformat(line_input, "next-ip4")) - next_protocol = 1; - else if (unformat(line_input, "next-ip6")) - next_protocol = 2; - else if (unformat(line_input, "next-ethernet")) - next_protocol = 3; - else if (unformat(line_input, "next-nsh")) - next_protocol = 4; - else if (unformat (line_input, "c1 %d", &c1)) - ; - else if (unformat (line_input, "c2 %d", &c2)) - ; - else if (unformat (line_input, "c3 %d", &c3)) - ; - else if (unformat (line_input, "c4 %d", &c4)) - ; - else if (unformat (line_input, "spi %d", &spi)) - spi_set = 1; - else if (unformat (line_input, "si %d", &si)) - si_set = 1; - else if (unformat (line_input, "tlv %x")) - vec_add1 (tlvs, tmp); - else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); - } - - unformat_free (line_input); - - if (src_set == 0) - return clib_error_return (0, "tunnel src address not specified"); - - if (dst_set == 0) - return clib_error_return (0, "tunnel dst address not specified"); - - if (vni_set == 0) - return clib_error_return (0, "vni not specified"); - - if (spi_set == 0) - return clib_error_return (0, "spi not specified"); - - if (si_set == 0) - return clib_error_return (0, "si not specified"); - - spi_si = (spi<<8) | si; - - memset (a, 0, sizeof (*a)); - - a->is_add = is_add; - -#define _(x) a->x = x; - foreach_copy_field; -#undef _ - -#define _(x) a->nsh_hdr.x = x; - foreach_copy_nshhdr_field; -#undef _ - - rv = vnet_nsh_vxlan_gpe_add_del_tunnel (a, 0 /* hw_if_indexp */); - - switch(rv) - { - case 0: - break; - case VNET_API_ERROR_INVALID_DECAP_NEXT: - return clib_error_return (0, "invalid decap-next..."); - - case VNET_API_ERROR_TUNNEL_EXIST: - return clib_error_return (0, "tunnel already exists..."); - - case VNET_API_ERROR_NO_SUCH_ENTRY: - return clib_error_return (0, "tunnel does not exist..."); - - default: - return clib_error_return - (0, "vnet_nsh_vxlan_gpe_add_del_tunnel returned %d", rv); - } - - return 0; -} - -VLIB_CLI_COMMAND (create_nsh_vxlan_gpe_tunnel_command, static) = { - .path = "nsh vxlan tunnel", - .short_help = - "nsh vxlan tunnel src dst " - " c1 c2 c3 c4 spi si vni \n" - " [encap-vrf-id ] [decap-vrf-id ] [o-bit <1|0>] [c-bit <1|0>]\n" - " [md-type ][next-ip4][next-ip6][next-ethernet][next-nsh]\n" - " [tlv ][decap-next [ip4|ip6|ethernet|nsh-vxlan-gpe]][del]\n", - .function = nsh_vxlan_gpe_add_del_tunnel_command_fn, -}; - -static clib_error_t * -show_nsh_vxlan_gpe_tunnel_command_fn (vlib_main_t * vm, - unformat_input_t * input, - vlib_cli_command_t * cmd) -{ - nsh_vxlan_gpe_main_t * ngm = &nsh_vxlan_gpe_main; - nsh_vxlan_gpe_tunnel_t * t; - - if (pool_elts (ngm->tunnels) == 0) - vlib_cli_output (vm, "No nsh-vxlan-gpe tunnels configured..."); - - pool_foreach (t, ngm->tunnels, - ({ - vlib_cli_output (vm, "%U", format_nsh_vxlan_gpe_tunnel, t); - })); - - return 0; -} - -VLIB_CLI_COMMAND (show_nsh_vxlan_gpe_tunnel_command, static) = { - .path = "show nsh vxlan tunnel", - .function = show_nsh_vxlan_gpe_tunnel_command_fn, -}; - -clib_error_t *nsh_vxlan_gpe_init (vlib_main_t *vm) -{ - nsh_vxlan_gpe_main_t *ngm = &nsh_vxlan_gpe_main; - - ngm->vnet_main = vnet_get_main(); - ngm->vlib_main = vm; - - ngm->nsh_vxlan_gpe_tunnel_by_key - = hash_create_mem (0, sizeof(nsh_vxlan_gpe_tunnel_key_t), sizeof (uword)); - - udp_register_dst_port (vm, UDP_DST_PORT_vxlan_gpe, - nsh_vxlan_gpe_input_node.index, 1 /* is_ip4 */); - return 0; -} - -VLIB_INIT_FUNCTION(nsh_vxlan_gpe_init); - diff --git a/vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.h b/vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.h deleted file mode 100644 index 3effd3318cb..00000000000 --- a/vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.h +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright (c) 2015 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef included_vnet_nsh_vxlan_gpe_h -#define included_vnet_nsh_vxlan_gpe_h - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -typedef CLIB_PACKED (struct { - ip4_header_t ip4; /* 20 bytes */ - udp_header_t udp; /* 8 bytes */ - vxlan_gpe_header_t vxlan; /* 8 bytes */ - nsh_header_t nsh; /* 28 bytes */ -}) ip4_vxlan_gpe_and_nsh_header_t; - -typedef CLIB_PACKED(struct { - /* - * Key fields: ip src, vxlan vni, nsh spi_si - * all fields in NET byte order - */ - union { - struct { - u32 src; - u32 vni; /* shifted 8 bits */ - u32 spi_si; - u32 pad; - }; - u64 as_u64[2]; - }; -}) nsh_vxlan_gpe_tunnel_key_t; - -typedef struct { - /* Rewrite string. $$$$ embed vnet_rewrite header */ - u8 * rewrite; - - /* decap next index */ - u32 decap_next_index; - - /* tunnel src and dst addresses */ - ip4_address_t src; - ip4_address_t dst; - - /* FIB indices */ - u32 encap_fib_index; /* tunnel partner lookup here */ - u32 decap_fib_index; /* inner IP lookup here */ - - /* vxlan VNI in HOST byte order, shifted left 8 bits */ - u32 vni; - - /* vnet intfc hw/sw_if_index */ - u32 hw_if_index; - u32 sw_if_index; - - /* NSH header fields in HOST byte order */ - nsh_header_t nsh_hdr; -} nsh_vxlan_gpe_tunnel_t; - -#define foreach_nsh_vxlan_gpe_input_next \ -_(DROP, "error-drop") \ -_(IP4_INPUT, "ip4-input") \ -_(IP6_INPUT, "ip6-input") \ -_(ETHERNET_INPUT, "ethernet-input") \ -_(NSH_VXLAN_GPE_ENCAP, "nsh-vxlan-gpe-encap") - -typedef enum { -#define _(s,n) NSH_VXLAN_GPE_INPUT_NEXT_##s, - foreach_nsh_vxlan_gpe_input_next -#undef _ - NSH_VXLAN_GPE_INPUT_N_NEXT, -} nsh_vxlan_gpe_input_next_t; - -typedef enum { -#define nsh_vxlan_gpe_error(n,s) NSH_VXLAN_GPE_ERROR_##n, -#include -#undef nsh_vxlan_gpe_error - NSH_VXLAN_GPE_N_ERROR, -} nsh_vxlan_gpe_input_error_t; - -typedef struct { - /* vector of encap tunnel instances */ - nsh_vxlan_gpe_tunnel_t *tunnels; - - /* lookup tunnel by key */ - uword * nsh_vxlan_gpe_tunnel_by_key; - - /* Free vlib hw_if_indices */ - u32 * free_nsh_vxlan_gpe_tunnel_hw_if_indices; - - /* show device instance by real device instance */ - u32 * dev_inst_by_real; - - /* convenience */ - vlib_main_t * vlib_main; - vnet_main_t * vnet_main; -} nsh_vxlan_gpe_main_t; - -nsh_vxlan_gpe_main_t nsh_vxlan_gpe_main; - -extern vlib_node_registration_t nsh_vxlan_gpe_input_node; -extern vlib_node_registration_t nsh_vxlan_gpe_encap_node; - -u8 * format_nsh_vxlan_gpe_encap_trace (u8 * s, va_list * args); - -typedef struct { - u8 is_add; - ip4_address_t src, dst; - u32 encap_fib_index; - u32 decap_fib_index; - u32 decap_next_index; - u32 vni; - nsh_header_t nsh_hdr; -} vnet_nsh_vxlan_gpe_add_del_tunnel_args_t; - -int vnet_nsh_vxlan_gpe_add_del_tunnel -(vnet_nsh_vxlan_gpe_add_del_tunnel_args_t *a, u32 * sw_if_indexp); - -#endif /* included_vnet_nsh_vxlan_gpe_h */ diff --git a/vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe_error.def b/vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe_error.def deleted file mode 100644 index 4ba64fe4dc5..00000000000 --- a/vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe_error.def +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright (c) 2015 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -nsh_vxlan_gpe_error (DECAPSULATED, "good packets decapsulated") -nsh_vxlan_gpe_error (NO_SUCH_TUNNEL, "no such tunnel packets") diff --git a/vnet/vnet/nsh-vxlan-gpe/vxlan-gpe-rfc.txt b/vnet/vnet/nsh-vxlan-gpe/vxlan-gpe-rfc.txt deleted file mode 100644 index 35cee50f573..00000000000 --- a/vnet/vnet/nsh-vxlan-gpe/vxlan-gpe-rfc.txt +++ /dev/null @@ -1,868 +0,0 @@ -Network Working Group P. Quinn -Internet-Draft Cisco Systems, Inc. -Intended status: Experimental P. Agarwal -Expires: January 4, 2015 Broadcom - R. Fernando - L. Kreeger - D. Lewis - F. Maino - M. Smith - N. Yadav - Cisco Systems, Inc. - L. Yong - Huawei USA - X. Xu - Huawei Technologies - U. Elzur - Intel - P. Garg - Microsoft - July 3, 2014 - - - Generic Protocol Extension for VXLAN - draft-quinn-vxlan-gpe-03.txt - -Abstract - - This draft describes extending Virtual eXtensible Local Area Network - (VXLAN), via changes to the VXLAN header, with three new - capabilities: support for multi-protocol encapsulation, operations, - administration and management (OAM) signaling and explicit - versioning. - -Status of this Memo - - This Internet-Draft is submitted in full conformance with the - provisions of BCP 78 and BCP 79. - - Internet-Drafts are working documents of the Internet Engineering - Task Force (IETF). Note that other groups may also distribute - working documents as Internet-Drafts. The list of current Internet- - Drafts is at http://datatracker.ietf.org/drafts/current/. - - Internet-Drafts are draft documents valid for a maximum of six months - and may be updated, replaced, or obsoleted by other documents at any - time. It is inappropriate to use Internet-Drafts as reference - material or to cite them other than as "work in progress." - - - - -Quinn, et al. Expires January 4, 2015 [Page 1] - -Internet-Draft Generic Protocol Extension for VXLAN July 2014 - - - This Internet-Draft will expire on January 4, 2015. - -Copyright Notice - - Copyright (c) 2014 IETF Trust and the persons identified as the - document authors. All rights reserved. - - This document is subject to BCP 78 and the IETF Trust's Legal - Provisions Relating to IETF Documents - (http://trustee.ietf.org/license-info) in effect on the date of - publication of this document. Please review these documents - carefully, as they describe your rights and restrictions with respect - to this document. Code Components extracted from this document must - include Simplified BSD License text as described in Section 4.e of - the Trust Legal Provisions and are provided without warranty as - described in the Simplified BSD License. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Quinn, et al. Expires January 4, 2015 [Page 2] - -Internet-Draft Generic Protocol Extension for VXLAN July 2014 - - -Table of Contents - - 1. Introduction . . . . . . . . . . . . . . . . . . . . . . . . . 4 - 2. VXLAN Without Protocol Extension . . . . . . . . . . . . . . . 5 - 3. Generic Protocol Extension VXLAN (VXLAN-gpe) . . . . . . . . . 6 - 3.1. Multi Protocol Support . . . . . . . . . . . . . . . . . . 6 - 3.2. OAM Support . . . . . . . . . . . . . . . . . . . . . . . 7 - 3.3. Version Bits . . . . . . . . . . . . . . . . . . . . . . . 7 - 4. Backward Compatibility . . . . . . . . . . . . . . . . . . . . 8 - 4.1. VXLAN VTEP to VXLAN-gpe VTEP . . . . . . . . . . . . . . . 8 - 4.2. VXLAN-gpe VTEP to VXLAN VTEP . . . . . . . . . . . . . . . 8 - 4.3. VXLAN-gpe UDP Ports . . . . . . . . . . . . . . . . . . . 8 - 4.4. VXLAN-gpe and Encapsulated IP Header Fields . . . . . . . 8 - 5. VXLAN-gpe Examples . . . . . . . . . . . . . . . . . . . . . . 9 - 6. Security Considerations . . . . . . . . . . . . . . . . . . . 11 - 7. Acknowledgments . . . . . . . . . . . . . . . . . . . . . . . 12 - 8. IANA Considerations . . . . . . . . . . . . . . . . . . . . . 13 - 8.1. UDP Port . . . . . . . . . . . . . . . . . . . . . . . . . 13 - 8.2. VXLAN-gpe Next Protocol . . . . . . . . . . . . . . . . . 13 - 8.3. VXLAN-gpe Reserved Bits . . . . . . . . . . . . . . . . . 13 - 9. References . . . . . . . . . . . . . . . . . . . . . . . . . . 14 - 9.1. Normative References . . . . . . . . . . . . . . . . . . . 14 - 9.2. Informative References . . . . . . . . . . . . . . . . . . 14 - Authors' Addresses . . . . . . . . . . . . . . . . . . . . . . . . 15 - - - - - - - - - - - - - - - - - - - - - - - - - - - -Quinn, et al. Expires January 4, 2015 [Page 3] - -Internet-Draft Generic Protocol Extension for VXLAN July 2014 - - -1. Introduction - - Virtual eXtensible Local Area Network [VXLAN] defines an - encapsulation format that encapsulates Ethernet frames in an outer - UDP/IP transport. As data centers evolve, the need to carry other - protocols encapsulated in an IP packet is required, as well as the - need to provide increased visibility and diagnostic capabilities - within the overlay. The VXLAN header does not specify the protocol - being encapsulated and therefore is currently limited to - encapsulating only Ethernet frame payload, nor does it provide the - ability to define OAM protocols. Rather than defining yet another - encapsulation, VXLAN is extended to provide protocol typing and OAM - capabilities. - - This document describes extending VXLAN via the following changes: - - Next Protocol Bit (P bit): A reserved flag bit is allocated, and set - in the VXLAN-gpe header to indicate that a next protocol field is - present. - - OAM Flag Bit (O bit): A reserved flag bit is allocated, and set in - the VXLAN-gpe header, to indicate that the packet is an OAM - packet. - - Version: Two reserved bits are allocated, and set in the VXLAN-gpe - header, to indicate VXLAN-gpe protocol version. - - Next Protocol: A 8 bit next protocol field is present in the VXLAN- - gpe header. - - - - - - - - - - - - - - - - - - - - - - -Quinn, et al. Expires January 4, 2015 [Page 4] - -Internet-Draft Generic Protocol Extension for VXLAN July 2014 - - -2. VXLAN Without Protocol Extension - - As described in the introduction, the VXLAN header has no protocol - identifier that indicates the type of payload being carried by VXLAN. - Because of this, VXLAN is limited to an Ethernet payload. - Furthermore, the VXLAN header has no mechanism to signal OAM packets. - - The VXLAN header defines bits 0-7 as flags (some defined, some - reserved), the VXLAN network identifier (VNI) field and several - reserved bits. The flags provide flexibility to define how the - reserved bits can be used to change the definition of the VXLAN - header. - - - - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - |R|R|R|R|I|R|R|R| Reserved | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | VXLAN Network Identifier (VNI) | Reserved | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - - - Figure 1: VXLAN Header - - - - - - - - - - - - - - - - - - - - - - - - - - -Quinn, et al. Expires January 4, 2015 [Page 5] - -Internet-Draft Generic Protocol Extension for VXLAN July 2014 - - -3. Generic Protocol Extension VXLAN (VXLAN-gpe) - -3.1. Multi Protocol Support - - This draft defines the following two changes to the VXLAN header in - order to support multi-protocol encapsulation: - - P Bit: Flag bit 5 is defined as the Next Protocol bit. The P bit - MUST be set to 1 to indicate the presence of the 8 bit next - protocol field. - - P = 0 indicates that the payload MUST conform to VXLAN as defined - in [VXLAN]. - - Flag bit 5 was chosen as the P bit because this flag bit is - currently reserved in VXLAN. - - Next Protocol Field: The lower 8 bits of the first word are used to - carry a next protocol. This next protocol field contains the - protocol of the encapsulated payload packet. A new protocol - registry will be requested from IANA. - - This draft defines the following Next Protocol values: - - 0x1 : IPv4 - 0x2 : IPv6 - 0x3 : Ethernet - 0x4 : Network Service Header [NSH] - - - - - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - |R|R|R|R|I|P|R|R| Reserved |Next Protocol | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | VXLAN Network Identifier (VNI) | Reserved | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - - - - Figure 2: VXLAN-gpe Next Protocol - - - - - - - - -Quinn, et al. Expires January 4, 2015 [Page 6] - -Internet-Draft Generic Protocol Extension for VXLAN July 2014 - - -3.2. OAM Support - - Flag bit 7 is defined as the O bit. When the O bit is set to 1, the - packet is an OAM packet and OAM processing MUST occur. The OAM - protocol details are out of scope for this document. As with the - P-bit, bit 7 is currently a reserved flag in VXLAN. - - - - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - |R|R|R|R|I|P|R|O| Reserved |Next Protocol | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | VXLAN Network Identifier (VNI) | Reserved | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - - - - Figure 3: VXLAN-gpe OAM Bit - -3.3. Version Bits - - VXLAN-gpe bits 8 and 9 are defined as version bits. These bits are - reserved in VXLAN. The version field is used to ensure backward - compatibility going forward with future VXLAN-gpe updates. - - The initial version for VXLAN-gpe is 0. - - - - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - |R|R|R|R|I|P|R|O|Ver| Reserved |Next Protocol | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | VXLAN Network Identifier (VNI) | Reserved | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - - - - - - Figure 4: VXLAN-gpe Version Bits - - - - - - - -Quinn, et al. Expires January 4, 2015 [Page 7] - -Internet-Draft Generic Protocol Extension for VXLAN July 2014 - - -4. Backward Compatibility - -4.1. VXLAN VTEP to VXLAN-gpe VTEP - - As per VXLAN, reserved bits 5 and 7, VXLAN-gpe P and O-bits - respectively must be set to zero. The remaining reserved bits must - be zero, including the VXLAN-gpe version field, bits 8 and 9. The - encapsulated payload MUST be Ethernet. - -4.2. VXLAN-gpe VTEP to VXLAN VTEP - - A VXLAN-gpe VTEP MUST NOT encapsulate non-Ethernet frames to a VXLAN - VTEP. When encapsulating Ethernet frames to a VXLAN VTEP, the VXLAN- - gpe VTEP will set the P bit to 0, the Next Protocol to 0 and use UDP - destination port 4789. A VXLAN-gpe VTEP MUST also set O = 0 and Ver - = 0 when encapsulating Ethernet frames to VXLAN VTEP. The receiving - VXLAN VTEP will threat this packet as a VXLAN packet. - - A method for determining the capabilities of a VXLAN VTEP (gpe or - non-gpe) is out of the scope of this draft. - -4.3. VXLAN-gpe UDP Ports - - VXLAN-gpe uses a new UDP destination port (to be assigned by IANA) - when sending traffic to VXLAN-gpe VTEPs. - -4.4. VXLAN-gpe and Encapsulated IP Header Fields - - When encapsulating and decapsulating IPv4 and IPv6 packets, certain - fields, such as IPv4 Time to Live (TTL) from the inner IP header need - to be considered. VXLAN-gpe IP encapsulation and decapsulation - utilizes the techniques described in [RFC6830], section 5.3. - - - - - - - - - - - - - - - - - - - -Quinn, et al. Expires January 4, 2015 [Page 8] - -Internet-Draft Generic Protocol Extension for VXLAN July 2014 - - -5. VXLAN-gpe Examples - - This section provides three examples of protocols encapsulated using - the Generic Protocol Extension for VXLAN described in this document. - - - - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - |R|R|R|R|I|1|R|0|0|0| Reserved | NP = IPv4 | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | VXLAN Network Identifier (VNI) | Reserved | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Original IPv4 Packet | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - - - - Figure 5: IPv4 and VXLAN-gpe - - - - - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - |R|R|R|R|I|1|R|0|0|0| Reserved | NP = IPv6 | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | VXLAN Network Identifier (VNI) | Reserved | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Original IPv6 Packet | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - - - - Figure 6: IPv6 and VXLAN-gpe - - - - - - - - - - - - - - -Quinn, et al. Expires January 4, 2015 [Page 9] - -Internet-Draft Generic Protocol Extension for VXLAN July 2014 - - - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - |R|R|R|R|I|1|R|0|0|0| Reserved |NP = Ethernet | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | VXLAN Network Identifier (VNI) | Reserved | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Original Ethernet Frame | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - - - - Figure 7: Ethernet and VXLAN-gpe - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Quinn, et al. Expires January 4, 2015 [Page 10] - -Internet-Draft Generic Protocol Extension for VXLAN July 2014 - - -6. Security Considerations - - VXLAN's security is focused on issues around L2 encapsulation into - L3. With VXLAN-gpe, issues such as spoofing, flooding, and traffic - redirection are dependent on the particular protocol payload - encapsulated. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Quinn, et al. Expires January 4, 2015 [Page 11] - -Internet-Draft Generic Protocol Extension for VXLAN July 2014 - - -7. Acknowledgments - - A special thank you goes to Dino Farinacci for his guidance and - detailed review. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Quinn, et al. Expires January 4, 2015 [Page 12] - -Internet-Draft Generic Protocol Extension for VXLAN July 2014 - - -8. IANA Considerations - -8.1. UDP Port - - A new UDP port will be requested from IANA. - -8.2. VXLAN-gpe Next Protocol - - IANA is requested to set up a registry of "Next Protocol". These are - 8-bit values. Next Protocol values 0, 1, 2, 3 and 4 are defined in - this draft. New values are assigned via Standards Action [RFC5226]. - - +---------------+-------------+---------------+ - | Next Protocol | Description | Reference | - +---------------+-------------+---------------+ - | 0 | Reserved | This document | - | | | | - | 1 | IPv4 | This document | - | | | | - | 2 | IPv6 | This document | - | | | | - | 3 | Ethernet | This document | - | | | | - | 4 | NSH | This document | - | | | | - | 5..253 | Unassigned | | - +---------------+-------------+---------------+ - - Table 1 - -8.3. VXLAN-gpe Reserved Bits - - There are ten bits at the beginning of the VXLAN-gpe header. New - bits are assigned via Standards Action [RFC5226]. - - Bits 0-3 - Reserved - Bit 4 - Instance ID (I bit) - Bit 5 - Next Protocol (P bit) - Bit 6 - Reserved - Bit 7 - OAM (O bit) - Bits 8-9 - Version - - - - - - - - - - -Quinn, et al. Expires January 4, 2015 [Page 13] - -Internet-Draft Generic Protocol Extension for VXLAN July 2014 - - -9. References - -9.1. Normative References - - [RFC0768] Postel, J., "User Datagram Protocol", STD 6, RFC 768, - August 1980. - - [RFC0791] Postel, J., "Internet Protocol", STD 5, RFC 791, - September 1981. - - [RFC2119] Bradner, S., "Key words for use in RFCs to Indicate - Requirement Levels", BCP 14, RFC 2119, March 1997. - - [RFC5226] Narten, T. and H. Alvestrand, "Guidelines for Writing an - IANA Considerations Section in RFCs", BCP 26, RFC 5226, - May 2008. - -9.2. Informative References - - [NSH] Quinn, P. and et al. , "Network Service Header", 2014. - - [RFC1700] Reynolds, J. and J. Postel, "Assigned Numbers", RFC 1700, - October 1994. - - [RFC6830] Farinacci, D., Fuller, V., Meyer, D., and D. Lewis, "The - Locator/ID Separation Protocol (LISP)", RFC 6830, - January 2013. - - [VXLAN] Dutt, D., Mahalingam, M., Duda, K., Agarwal, P., Kreeger, - L., Sridhar, T., Bursell, M., and C. Wright, "VXLAN: A - Framework for Overlaying Virtualized Layer 2 Networks over - Layer 3 Networks", 2013. - - - - - - - - - - - - - - - - - - - -Quinn, et al. Expires January 4, 2015 [Page 14] - -Internet-Draft Generic Protocol Extension for VXLAN July 2014 - - -Authors' Addresses - - Paul Quinn - Cisco Systems, Inc. - - Email: paulq@cisco.com - - - Puneet Agarwal - Broadcom - - Email: pagarwal@broadcom.com - - - Rex Fernando - Cisco Systems, Inc. - - Email: rex@cisco.com - - - Larry Kreeger - Cisco Systems, Inc. - - Email: kreeger@cisco.com - - - Darrel Lewis - Cisco Systems, Inc. - - Email: darlewis@cisco.com - - - Fabio Maino - Cisco Systems, Inc. - - Email: kreeger@cisco.com - - - Michael Smith - Cisco Systems, Inc. - - Email: michsmit@cisco.com - - - - - - - - - -Quinn, et al. Expires January 4, 2015 [Page 15] - -Internet-Draft Generic Protocol Extension for VXLAN July 2014 - - - Navindra Yadav - Cisco Systems, Inc. - - Email: nyadav@cisco.com - - - Lucy Yong - Huawei USA - - Email: lucy.yong@huawei.com - - - Xiaohu Xu - Huawei Technologies - - Email: xuxiaohu@huawei.com - - - Uri Elzur - Intel - - Email: uri.elzur@intel.com - - - Pankaj Garg - Microsoft - - Email: Garg.Pankaj@microsoft.com diff --git a/vnet/vnet/nsh-vxlan-gpe/vxlan_gpe_packet.h b/vnet/vnet/nsh-vxlan-gpe/vxlan_gpe_packet.h deleted file mode 100644 index efc85c4bb54..00000000000 --- a/vnet/vnet/nsh-vxlan-gpe/vxlan_gpe_packet.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2015 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef included_vxlan_gpe_packet_h -#define included_vxlan_gpe_packet_h - -/* - * From draft-quinn-vxlan-gpe-03.txt - * - * 0 1 2 3 - * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * |R|R|R|R|I|P|R|O|Ver| Reserved |Next Protocol | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * | VXLAN Network Identifier (VNI) | Reserved | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * - * I Bit: Flag bit 4 indicates that the VNI is valid. - * - * P Bit: Flag bit 5 is defined as the Next Protocol bit. The P bit - * MUST be set to 1 to indicate the presence of the 8 bit next - * protocol field. - * - * O Bit: Flag bit 7 is defined as the O bit. When the O bit is set to 1, - * - * the packet is an OAM packet and OAM processing MUST occur. The OAM - * protocol details are out of scope for this document. As with the - * P-bit, bit 7 is currently a reserved flag in VXLAN. - * - * VXLAN-gpe bits 8 and 9 are defined as version bits. These bits are - * reserved in VXLAN. The version field is used to ensure backward - * compatibility going forward with future VXLAN-gpe updates. - * - * The initial version for VXLAN-gpe is 0. - * - * This draft defines the following Next Protocol values: - * - * 0x1 : IPv4 - * 0x2 : IPv6 - * 0x3 : Ethernet - * 0x4 : Network Service Header [NSH] - */ - -typedef struct { - u8 flags; - u8 ver_res; - u8 res; - u8 next_protocol; - u32 vni_res; -} vxlan_gpe_header_t; - -#define VXLAN_GPE_FLAGS_I 0x08 -#define VXLAN_GPE_FLAGS_P 0x04 -#define VXLAN_GPE_FLAGS_O 0x01 - -#define VXLAN_GPE_VERSION 0x0 - -#define VXLAN_NEXT_PROTOCOL_IP4 0x1 -#define VXLAN_NEXT_PROTOCOL_IP6 0x2 -#define VXLAN_NEXT_PROTOCOL_ETHERNET 0x3 -#define VXLAN_NEXT_PROTOCOL_NSH 0x4 - -#endif /* included_vxlan_gpe_packet_h */ diff --git a/vnet/vnet/nsh/nsh.c b/vnet/vnet/nsh/nsh.c new file mode 100644 index 00000000000..49edf711038 --- /dev/null +++ b/vnet/vnet/nsh/nsh.c @@ -0,0 +1,770 @@ +/* + * nsh.c - nsh mapping + * + * Copyright (c) 2013 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + + +typedef struct { + nsh_header_t nsh_header; +} nsh_input_trace_t; + +u8 * format_nsh_header (u8 * s, va_list * args) +{ + nsh_header_t * nsh = va_arg (*args, nsh_header_t *); + + s = format (s, "nsh ver %d ", (nsh->ver_o_c>>6)); + if (nsh->ver_o_c & NSH_O_BIT) + s = format (s, "O-set "); + + if (nsh->ver_o_c & NSH_C_BIT) + s = format (s, "C-set "); + + s = format (s, "len %d (%d bytes) md_type %d next_protocol %d\n", + nsh->length, nsh->length * 4, nsh->md_type, nsh->next_protocol); + + s = format (s, " service path %d service index %d\n", + (nsh->nsp_nsi>>NSH_NSP_SHIFT) & NSH_NSP_MASK, + nsh->nsp_nsi & NSH_NSI_MASK); + + s = format (s, " c1 %d c2 %d c3 %d c4 %d\n", + nsh->c1, nsh->c2, nsh->c3, nsh->c4); + + return s; +} + +u8 * format_nsh_map (u8 * s, va_list * args) +{ + nsh_map_t * map = va_arg (*args, nsh_map_t *); + + s = format (s, "nsh entry nsp: %d nsi: %d ", + (map->nsp_nsi>>NSH_NSP_SHIFT) & NSH_NSP_MASK, + map->nsp_nsi & NSH_NSI_MASK); + s = format (s, "maps to nsp: %d nsi: %d ", + (map->mapped_nsp_nsi>>NSH_NSP_SHIFT) & NSH_NSP_MASK, + map->mapped_nsp_nsi & NSH_NSI_MASK); + + switch (map->next_node) + { + case NSH_INPUT_NEXT_ENCAP_GRE: + { + s = format (s, "encapped by GRE intf: %d", map->sw_if_index); + break; + } + case NSH_INPUT_NEXT_ENCAP_VXLANGPE: + { + s = format (s, "encapped by VXLAN GPE intf: %d", map->sw_if_index); + break; + } + default: + s = format (s, "only GRE and VXLANGPE support in this rev"); + } + + return s; +} + + +#define foreach_copy_nshhdr_field \ +_(ver_o_c) \ +_(length) \ +_(md_type) \ +_(next_protocol) \ +_(nsp_nsi) \ +_(c1) \ +_(c2) \ +_(c3) \ +_(c4) +/* Temp killing tlvs as its causing pain - fix in NSH_SFC */ + + +#define foreach_32bit_field \ +_(nsp_nsi) \ +_(c1) \ +_(c2) \ +_(c3) \ +_(c4) + + +u8 * format_nsh_header_with_length (u8 * s, va_list * args) +{ + nsh_header_t * h = va_arg (*args, nsh_header_t *); + u32 max_header_bytes = va_arg (*args, u32); + u32 tmp, header_bytes; + + header_bytes = sizeof (h[0]); + if (max_header_bytes != 0 && header_bytes > max_header_bytes) + return format (s, "nsh header truncated"); + + tmp = clib_net_to_host_u32 (h->nsp_nsi); + s = format (s, " nsp %d nsi %d ", + (tmp>>NSH_NSP_SHIFT) & NSH_NSP_MASK, + tmp & NSH_NSI_MASK); + + s = format (s, "c1 %u c2 %u c3 %u c4 %u", + clib_net_to_host_u32 (h->c1), + clib_net_to_host_u32 (h->c2), + clib_net_to_host_u32 (h->c3), + clib_net_to_host_u32 (h->c4)); + + s = format (s, "ver %d ", h->ver_o_c>>6); + + if (h->ver_o_c & NSH_O_BIT) + s = format (s, "O-set "); + + if (h->ver_o_c & NSH_C_BIT) + s = format (s, "C-set "); + + s = format (s, "len %d (%d bytes) md_type %d next_protocol %d\n", + h->length, h->length * 4, h->md_type, h->next_protocol); + return s; +} + +u8 * format_nsh_input_map_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + nsh_input_trace_t * t + = va_arg (*args, nsh_input_trace_t *); + + s = format (s, "\n %U", format_nsh_header, &t->nsh_header, + (u32) sizeof (t->nsh_header) ); + + return s; +} + +static uword +nsh_input_map (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + u32 n_left_from, next_index, * from, * to_next; + nsh_main_t * nm = &nsh_main; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, + to_next, n_left_to_next); + + while (n_left_from >= 4 && n_left_to_next >= 2) + { + u32 bi0, bi1; + vlib_buffer_t * b0, * b1; + u32 next0 = NSH_INPUT_NEXT_DROP, next1 = NSH_INPUT_NEXT_DROP; + uword * entry0, * entry1; + nsh_header_t * hdr0 = 0, * hdr1 = 0; + u32 nsp_nsi0, nsp_nsi1; + u32 error0, error1; + nsh_map_t * map0 = 0, * map1 = 0; + + /* Prefetch next iteration. */ + { + vlib_buffer_t * p2, * p3; + + p2 = vlib_get_buffer (vm, from[2]); + p3 = vlib_get_buffer (vm, from[3]); + + vlib_prefetch_buffer_header (p2, LOAD); + vlib_prefetch_buffer_header (p3, LOAD); + + CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); + } + + bi0 = from[0]; + bi1 = from[1]; + to_next[0] = bi0; + to_next[1] = bi1; + from += 2; + to_next += 2; + n_left_from -= 2; + n_left_to_next -= 2; + + error0 = 0; + error1 = 0; + + b0 = vlib_get_buffer (vm, bi0); + hdr0 = vlib_buffer_get_current (b0); + nsp_nsi0 = clib_net_to_host_u32(hdr0->nsp_nsi); + entry0 = hash_get_mem (nm->nsh_mapping_by_key, &nsp_nsi0); + + b1 = vlib_get_buffer (vm, bi1); + hdr1 = vlib_buffer_get_current (b1); + nsp_nsi1 = clib_net_to_host_u32(hdr1->nsp_nsi); + entry1 = hash_get_mem (nm->nsh_mapping_by_key, &nsp_nsi1); + + if (PREDICT_FALSE(entry0 == 0)) + { + error0 = NSH_INPUT_ERROR_NO_MAPPING; + goto trace0; + } + + if (PREDICT_FALSE(entry1 == 0)) + { + error1 = NSH_INPUT_ERROR_NO_MAPPING; + goto trace1; + } + + /* Entry should point to a mapping ...*/ + map0 = pool_elt_at_index (nm->nsh_mappings, entry0[0]); + map1 = pool_elt_at_index (nm->nsh_mappings, entry1[0]); + + if (PREDICT_FALSE(map0 == 0)) + { + error0 = NSH_INPUT_ERROR_NO_MAPPING; + goto trace0; + } + + if (PREDICT_FALSE(map1 == 0)) + { + error1 = NSH_INPUT_ERROR_NO_MAPPING; + goto trace1; + } + + entry0 = hash_get_mem (nm->nsh_entry_by_key, &map0->mapped_nsp_nsi); + entry1 = hash_get_mem (nm->nsh_entry_by_key, &map1->mapped_nsp_nsi); + + if (PREDICT_FALSE(entry0 == 0)) + { + error0 = NSH_INPUT_ERROR_NO_MAPPING; + goto trace0; + } + if (PREDICT_FALSE(entry1 == 0)) + { + error1 = NSH_INPUT_ERROR_NO_MAPPING; + goto trace1; + } + + hdr0 = pool_elt_at_index (nm->nsh_entries, entry0[0]); + hdr1 = pool_elt_at_index (nm->nsh_entries, entry1[0]); + + /* set up things for next node to transmit ie which node to handle it and where */ + next0 = map0->next_node; + next1 = map1->next_node; + vnet_buffer(b0)->sw_if_index[VLIB_TX] = map0->sw_if_index; + vnet_buffer(b1)->sw_if_index[VLIB_TX] = map1->sw_if_index; + + trace0: + b0->error = error0 ? node->errors[error0] : 0; + + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + nsh_input_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->nsh_header = *hdr0; + } + + trace1: + b1->error = error1 ? node->errors[error1] : 0; + + if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) + { + nsh_input_trace_t *tr = + vlib_add_trace (vm, node, b1, sizeof (*tr)); + tr->nsh_header = *hdr1; + } + + + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, + to_next, n_left_to_next, + bi0, bi1, next0, next1); + + } + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t * b0; + u32 next0 = NSH_INPUT_NEXT_DROP; + uword * entry0; + nsh_header_t * hdr0 = 0; + u32 nsp_nsi0; + u32 error0; + nsh_map_t * map0 = 0; + + next_index = next0; + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + error0 = 0; + + b0 = vlib_get_buffer (vm, bi0); + hdr0 = vlib_buffer_get_current (b0); + nsp_nsi0 = clib_net_to_host_u32(hdr0->nsp_nsi); + entry0 = hash_get_mem (nm->nsh_mapping_by_key, &nsp_nsi0); + + if (PREDICT_FALSE(entry0 == 0)) + { + error0 = NSH_INPUT_ERROR_NO_MAPPING; + goto trace00; + } + + /* Entry should point to a mapping ...*/ + map0 = pool_elt_at_index (nm->nsh_mappings, entry0[0]); + + if (PREDICT_FALSE(map0 == 0)) + { + error0 = NSH_INPUT_ERROR_NO_MAPPING; + goto trace00; + } + + entry0 = hash_get_mem (nm->nsh_entry_by_key, &map0->mapped_nsp_nsi); + + if (PREDICT_FALSE(entry0 == 0)) + { + error0 = NSH_INPUT_ERROR_NO_MAPPING; + goto trace00; + } + + hdr0 = pool_elt_at_index (nm->nsh_entries, entry0[0]); + + /* set up things for next node to transmit ie which node to handle it and where */ + next0 = map0->next_node; + vnet_buffer(b0)->sw_if_index[VLIB_TX] = map0->sw_if_index; + + trace00: + b0->error = error0 ? node->errors[error0] : 0; + + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + nsh_input_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->nsh_header = *hdr0; + } + + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + + } + + + return from_frame->n_vectors; +} + + +int vnet_nsh_add_del_map (vnet_nsh_add_del_map_args_t *a) +{ + nsh_main_t * nm = &nsh_main; + nsh_map_t *map = 0; + u32 key, *key_copy; + uword * entry; + hash_pair_t *hp; + + key = a->map.nsp_nsi; + + entry = hash_get_mem (nm->nsh_mapping_by_key, &key); + + if (a->is_add) + { + /* adding an entry, must not already exist */ + if (entry) + return VNET_API_ERROR_INVALID_VALUE; + + pool_get_aligned (nm->nsh_mappings, map, CLIB_CACHE_LINE_BYTES); + memset (map, 0, sizeof (*map)); + + /* copy from arg structure */ + map->nsp_nsi = a->map.nsp_nsi; + map->mapped_nsp_nsi = a->map.mapped_nsp_nsi; + map->sw_if_index = a->map.sw_if_index; + map->next_node = a->map.next_node; + + + key_copy = clib_mem_alloc (sizeof (*key_copy)); + clib_memcpy (key_copy, &key, sizeof (*key_copy)); + + hash_set_mem (nm->nsh_mapping_by_key, key_copy, + map - nm->nsh_mappings); + } + else + { + if (!entry) + return VNET_API_ERROR_NO_SUCH_ENTRY; + + map = pool_elt_at_index (nm->nsh_mappings, entry[0]); + hp = hash_get_pair (nm->nsh_mapping_by_key, &key); + key_copy = (void *)(hp->key); + hash_unset_mem (nm->nsh_mapping_by_key, &key); + clib_mem_free (key_copy); + + pool_put (nm->nsh_mappings, map); + } + + return 0; +} + +static clib_error_t * +nsh_add_del_map_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, * line_input = &_line_input; + u8 is_add = 1; + u32 nsp, nsi, mapped_nsp, mapped_nsi; + int nsp_set = 0, nsi_set = 0, mapped_nsp_set = 0, mapped_nsi_set = 0; + u32 next_node = ~0; + u32 sw_if_index = ~0; // temporary requirement to get this moved over to NSHSFC + vnet_nsh_add_del_map_args_t _a, * a = &_a; + int rv; + + /* Get a line of input. */ + if (! unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { + if (unformat (line_input, "del")) + is_add = 0; + else if (unformat (line_input, "nsp %d", &nsp)) + nsp_set = 1; + else if (unformat (line_input, "nsi %d", &nsi)) + nsi_set = 1; + else if (unformat (line_input, "mapped-nsp %d", &mapped_nsp)) + mapped_nsp_set = 1; + else if (unformat (line_input, "mapped-nsi %d", &mapped_nsi)) + mapped_nsi_set = 1; + else if (unformat (line_input, "encap-gre-intf %d", &sw_if_index)) + next_node = NSH_INPUT_NEXT_ENCAP_GRE; + else if (unformat (line_input, "encap-vxlan-gpe-intf %d", &sw_if_index)) + next_node = NSH_INPUT_NEXT_ENCAP_VXLANGPE; + else if (unformat (line_input, "encap-none")) + next_node = NSH_INPUT_NEXT_DROP; // Once moved to NSHSFC see nsh.h:foreach_nsh_input_next to handle this case + else + return clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + } + + unformat_free (line_input); + + if (nsp_set == 0 || nsi_set == 0) + return clib_error_return (0, "nsp nsi pair required. Key: for NSH entry"); + + if (mapped_nsp_set == 0 || mapped_nsi_set == 0) + return clib_error_return (0, "mapped-nsp mapped-nsi pair required. Key: for NSH entry"); + + if (next_node == ~0) + return clib_error_return (0, "must specific action: [encap-gre-intf | encap-vxlan-gpe-intf | encap-none]"); + + memset (a, 0, sizeof (*a)); + + /* set args structure */ + a->is_add = is_add; + a->map.nsp_nsi = (nsp<< NSH_NSP_SHIFT) | nsi; + a->map.mapped_nsp_nsi = (mapped_nsp<< NSH_NSP_SHIFT) | mapped_nsi; + a->map.sw_if_index = sw_if_index; + a->map.next_node = next_node; + + + rv = vnet_nsh_add_del_map (a); + + switch(rv) + { + case 0: + break; + case VNET_API_ERROR_INVALID_VALUE: + return clib_error_return (0, "mapping already exists. Remove it first."); + + case VNET_API_ERROR_NO_SUCH_ENTRY: + return clib_error_return (0, "mapping does not exist."); + + default: + return clib_error_return + (0, "vnet_nsh_add_del_map returned %d", rv); + } + return 0; +} + + +VLIB_CLI_COMMAND (create_nsh_map_command, static) = { + .path = "create nsh map", + .short_help = + "create nsh map nsp nsi [del] map-nsp map-nsi [encap-gre-intf | encap-vxlan-gpe-intf | encap-none]\n", + .function = nsh_add_del_map_command_fn, +}; + +static clib_error_t * +show_nsh_map_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + nsh_main_t * nm = &nsh_main; + nsh_map_t * map; + + if (pool_elts (nm->nsh_mappings) == 0) + vlib_cli_output (vm, "No nsh maps configured."); + + pool_foreach (map, nm->nsh_mappings, + ({ + vlib_cli_output (vm, "%U", format_nsh_map, map); + })); + + return 0; +} + +VLIB_CLI_COMMAND (show_nsh_map_command, static) = { + .path = "show nsh map", + .function = show_nsh_map_command_fn, +}; + + +int vnet_nsh_add_del_entry (vnet_nsh_add_del_entry_args_t *a) +{ + nsh_main_t * nm = &nsh_main; + nsh_header_t *hdr = 0; + u32 key, *key_copy; + uword * entry; + hash_pair_t *hp; + + key = a->nsh.nsp_nsi; + + entry = hash_get_mem (nm->nsh_entry_by_key, &key); + + if (a->is_add) + { + /* adding an entry, must not already exist */ + if (entry) + return VNET_API_ERROR_INVALID_VALUE; + + pool_get_aligned (nm->nsh_entries, hdr, CLIB_CACHE_LINE_BYTES); + memset (hdr, 0, sizeof (*hdr)); + + /* copy from arg structure */ +#define _(x) hdr->x = a->nsh.x; + foreach_copy_nshhdr_field; +#undef _ + + key_copy = clib_mem_alloc (sizeof (*key_copy)); + clib_memcpy (key_copy, &key, sizeof (*key_copy)); + + hash_set_mem (nm->nsh_entry_by_key, key_copy, + hdr - nm->nsh_entries); + } + else + { + if (!entry) + return VNET_API_ERROR_NO_SUCH_ENTRY; + + hdr = pool_elt_at_index (nm->nsh_entries, entry[0]); + hp = hash_get_pair (nm->nsh_entry_by_key, &key); + key_copy = (void *)(hp->key); + hash_unset_mem (nm->nsh_entry_by_key, &key); + clib_mem_free (key_copy); + + pool_put (nm->nsh_entries, hdr); + } + + return 0; +} + + +static clib_error_t * +nsh_add_del_entry_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, * line_input = &_line_input; + u8 is_add = 1; + u8 ver_o_c = 0; + u8 length = 0; + u8 md_type = 0; + u8 next_protocol = 1; /* default: ip4 */ + u32 nsp; + u8 nsp_set = 0; + u32 nsi; + u8 nsi_set = 0; + u32 nsp_nsi; + u32 c1 = 0; + u32 c2 = 0; + u32 c3 = 0; + u32 c4 = 0; + u32 *tlvs = 0; + u32 tmp; + int rv; + vnet_nsh_add_del_entry_args_t _a, * a = &_a; + + /* Get a line of input. */ + if (! unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { + if (unformat (line_input, "del")) + is_add = 0; + else if (unformat (line_input, "version %d", &tmp)) + ver_o_c |= (tmp & 3) << 6; + else if (unformat (line_input, "o-bit %d", &tmp)) + ver_o_c |= (tmp & 1) << 5; + else if (unformat (line_input, "c-bit %d", &tmp)) + ver_o_c |= (tmp & 1) << 4; + else if (unformat (line_input, "md-type %d", &tmp)) + md_type = tmp; + else if (unformat(line_input, "next-ip4")) + next_protocol = 1; + else if (unformat(line_input, "next-ip6")) + next_protocol = 2; + else if (unformat(line_input, "next-ethernet")) + next_protocol = 3; + else if (unformat (line_input, "c1 %d", &c1)) + ; + else if (unformat (line_input, "c2 %d", &c2)) + ; + else if (unformat (line_input, "c3 %d", &c3)) + ; + else if (unformat (line_input, "c4 %d", &c4)) + ; + else if (unformat (line_input, "nsp %d", &nsp)) + nsp_set = 1; + else if (unformat (line_input, "nsi %d", &nsi)) + nsi_set = 1; + else if (unformat (line_input, "tlv %x")) + vec_add1 (tlvs, tmp); + else + return clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + } + + unformat_free (line_input); + + if (nsp_set == 0) + return clib_error_return (0, "nsp not specified"); + + if (nsi_set == 0) + return clib_error_return (0, "nsi not specified"); + + if (md_type != 1) + return clib_error_return (0, "md-type 1 only supported at this time"); + + md_type = 1; + length = 6; + + nsp_nsi = (nsp<<8) | nsi; + + memset (a, 0, sizeof (*a)); + + a->is_add = is_add; + +#define _(x) a->nsh.x = x; + foreach_copy_nshhdr_field; +#undef _ + + a->nsh.tlvs[0] = 0 ; // TODO FIXME this shouldn't be set 0 - in NSH_SFC project + + rv = vnet_nsh_add_del_entry (a); + + switch(rv) + { + case 0: + break; + default: + return clib_error_return + (0, "vnet_nsh_add_del_entry returned %d", rv); + } + + return 0; +} + +VLIB_CLI_COMMAND (create_nsh_entry_command, static) = { + .path = "create nsh entry", + .short_help = + "create nsh entry {nsp nsi } c1 c2 c3 c4 " + " [md-type ] [tlv ] [del]\n", + .function = nsh_add_del_entry_command_fn, +}; + +static clib_error_t * +show_nsh_entry_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + nsh_main_t * nm = &nsh_main; + nsh_header_t * hdr; + + if (pool_elts (nm->nsh_entries) == 0) + vlib_cli_output (vm, "No nsh entries configured."); + + pool_foreach (hdr, nm->nsh_entries, + ({ + vlib_cli_output (vm, "%U", format_nsh_header, hdr); + })); + + return 0; +} + +VLIB_CLI_COMMAND (show_nsh_entry_command, static) = { + .path = "show nsh entry", + .function = show_nsh_entry_command_fn, +}; + +static char * nsh_input_error_strings[] = { +#define _(sym,string) string, + foreach_nsh_input_error +#undef _ +}; + +VLIB_REGISTER_NODE (nsh_input_node) = { + .function = nsh_input_map, + .name = "nsh-input", + .vector_size = sizeof (u32), + .format_trace = format_nsh_input_map_trace, + .format_buffer = format_nsh_header_with_length, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(nsh_input_error_strings), + .error_strings = nsh_input_error_strings, + + .n_next_nodes = NSH_INPUT_N_NEXT, + + .next_nodes = { +#define _(s,n) [NSH_INPUT_NEXT_##s] = n, + foreach_nsh_input_next +#undef _ + }, +}; + +clib_error_t *nsh_init (vlib_main_t *vm) +{ + nsh_main_t *nm = &nsh_main; + + nm->vnet_main = vnet_get_main(); + nm->vlib_main = vm; + + nm->nsh_mapping_by_key + = hash_create_mem (0, sizeof(u32), sizeof (uword)); + + nm->nsh_mapping_by_mapped_key + = hash_create_mem (0, sizeof(u32), sizeof (uword)); + + nm->nsh_entry_by_key + = hash_create_mem (0, sizeof(u32), sizeof (uword)); + + return 0; +} + +VLIB_INIT_FUNCTION(nsh_init); diff --git a/vnet/vnet/nsh/nsh.h b/vnet/vnet/nsh/nsh.h new file mode 100644 index 00000000000..d1c46121d25 --- /dev/null +++ b/vnet/vnet/nsh/nsh.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef included_vnet_nsh_h +#define included_vnet_nsh_h + +#include +#include +#include + +typedef struct { + + /** Key for nsh_header_t entry: 24bit NSP 8bit NSI */ + u32 nsp_nsi; + + /** Key for nsh_header_t entry to map to. : 24bit NSP 8bit NSI + * This may be ~0 if next action is to decap to NSH next protocol + * Note the following heuristic: + * if nsp_nsi == mapped_nsp_nsi then use-case is like SFC SFF + * if nsp_nsi != mapped_nsp_nsi then use-case is like SFC SF + * Note: these are heuristics. Rules about NSI decrement are out of scope + */ + u32 mapped_nsp_nsi; + + /* vnet intfc sw_if_index */ + u32 sw_if_index; + + u32 next_node; + +} nsh_map_t; + +typedef struct { + nsh_map_t map; + u32 is_add; +} vnet_nsh_add_del_map_args_t; + +typedef struct { + u8 is_add; + nsh_header_t nsh; +} vnet_nsh_add_del_entry_args_t; + +typedef struct { + /* vector of nsh_header entry instances */ + nsh_header_t *nsh_entries; + + /* hash lookup nsh header by key: {u32: nsp_nsi} */ + uword * nsh_entry_by_key; + + /* vector of nsh_mappings */ + nsh_map_t *nsh_mappings; + + /* hash lookup nsh mapping by key: {u32: nsp_nsi} */ + uword * nsh_mapping_by_key; + uword * nsh_mapping_by_mapped_key; // for use in NSHSFC + + /* convenience */ + vlib_main_t * vlib_main; + vnet_main_t * vnet_main; +} nsh_main_t; + +nsh_main_t nsh_main; + +u8 * format_nsh_input_map_trace (u8 * s, va_list * args); +u8 * format_nsh_header_with_length (u8 * s, va_list * args); + +/* Statistics (not really errors) */ +#define foreach_nsh_input_error \ +_(MAPPED, "NSH header found and mapped") \ +_(NO_MAPPING, "no mapping for nsh key") \ +_(INVALID_NEXT_PROTOCOL, "invalid next protocol") \ + +typedef enum { +#define _(sym,str) NSH_INPUT_ERROR_##sym, + foreach_nsh_input_error +#undef _ + NSH_INPUT_N_ERROR, + +} nsh_input_error_t; + +#define foreach_nsh_input_next \ + _(DROP, "error-drop") \ + _(ENCAP_GRE, "gre-input" ) \ + _(ENCAP_VXLANGPE, "vxlan-gpe-encap" ) \ +/* /\* TODO once moved to Project:NSH_SFC *\/ */ + /* _(ENCAP_ETHERNET, "*** TX TO ETHERNET ***") \ */ +/* _(DECAP_ETHERNET_LOOKUP, "ethernet-input" ) \ */ +/* _(DECAP_IP4_INPUT, "ip4-input") \ */ +/* _(DECAP_IP6_INPUT, "ip6-input" ) \ */ + +typedef enum { +#define _(s,n) NSH_INPUT_NEXT_##s, + foreach_nsh_input_next +#undef _ + NSH_INPUT_N_NEXT, +} nsh_input_next_t; + +#endif /* included_vnet_nsh_h */ diff --git a/vnet/vnet/nsh/nsh_error.def b/vnet/vnet/nsh/nsh_error.def index 532b02a6e89..c54e3b895c8 100644 --- a/vnet/vnet/nsh/nsh_error.def +++ b/vnet/vnet/nsh/nsh_error.def @@ -12,6 +12,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -nsh_gre_error (DECAPSULATED, "good packets decapsulated") -nsh_gre_error (NO_SUCH_TUNNEL, "no such tunnel packets") -nsh_gre_error (INVALID_NEXT_PROTOCOL, "invalid next protocol") +nsh_input_error (DECAPSULATED, "good packets decapsulated") +nsh_input_error (NO_MAPPING, "no mapping for nsh key") +nsh_input_error (INVALID_NEXT_PROTOCOL, "invalid next protocol") \ No newline at end of file diff --git a/vnet/vnet/nsh/nsh_gre_error.def b/vnet/vnet/nsh/nsh_gre_error.def new file mode 100644 index 00000000000..45d8ef424b5 --- /dev/null +++ b/vnet/vnet/nsh/nsh_gre_error.def @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +nsh_gre_error (DECAPSULATED, "good packets decapsulated") +nsh_gre_error (NO_SUCH_TUNNEL, "no such tunnel packets") +nsh_gre_error (INVALID_NEXT_PROTOCOL, "invalid next protocol") \ No newline at end of file diff --git a/vnet/vnet/nsh/nsh_packet.h b/vnet/vnet/nsh/nsh_packet.h index 87d46a93b6d..cbe4f1e4193 100644 --- a/vnet/vnet/nsh/nsh_packet.h +++ b/vnet/vnet/nsh/nsh_packet.h @@ -73,20 +73,21 @@ typedef CLIB_PACKED(struct { u8 length; u8 md_type; u8 next_protocol; - u32 spi_si; + u32 nsp_nsi; // nsp 24 bits, nsi 8 bits /* Context headers, always present */ u32 c1; u32 c2; u32 c3; u32 c4; /* Optional variable length metadata */ - u32 * tlvs; + u32 tlvs[0]; }) nsh_header_t; +#define NSH_VERSION (0<<6) #define NSH_O_BIT (1<<5) #define NSH_C_BIT (1<<4) /* Network byte order shift / mask */ -#define NSH_SINDEX_MASK 0xFF -#define NSH_SPI_MASK (0x00FFFFFF) -#define NSH_SPI_SHIFT 8 +#define NSH_NSI_MASK 0xFF +#define NSH_NSP_MASK (0x00FFFFFF) +#define NSH_NSP_SHIFT 8 #endif /* included_vnet_nsh_packet_h */ diff --git a/vnet/vnet/vxlan-gpe/decap.c b/vnet/vnet/vxlan-gpe/decap.c new file mode 100644 index 00000000000..aed5857d0b1 --- /dev/null +++ b/vnet/vnet/vxlan-gpe/decap.c @@ -0,0 +1,450 @@ +/* + * decap.c - decapsulate VXLAN GPE + * + * Copyright (c) 2013 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +typedef struct { + u32 next_index; + u32 tunnel_index; + u32 error; +} vxlan_gpe_rx_trace_t; + +static u8 * format_vxlan_gpe_rx_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + vxlan_gpe_rx_trace_t * t = va_arg (*args, vxlan_gpe_rx_trace_t *); + + if (t->tunnel_index != ~0) + { + s = format (s, "VXLAN-GPE: tunnel %d next %d error %d", t->tunnel_index, + t->next_index, t->error); + } + else + { + s = format (s, "VXLAN-GPE: no tunnel next %d error %d\n", t->next_index, + t->error); + } + return s; +} + + +static u8 * format_vxlan_gpe_with_length (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + + + return s; +} + +static uword +vxlan_gpe_input (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + u32 n_left_from, next_index, * from, * to_next; + vxlan_gpe_main_t * ngm = &vxlan_gpe_main; + vnet_main_t * vnm = ngm->vnet_main; + vnet_interface_main_t * im = &vnm->interface_main; + u32 last_tunnel_index = ~0; + vxlan_gpe_tunnel_key_t last_key; + u32 pkts_decapsulated = 0; + u32 cpu_index = os_get_cpu_number(); + u32 stats_sw_if_index, stats_n_packets, stats_n_bytes; + + memset (&last_key, 0xff, sizeof (last_key)); + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + stats_sw_if_index = node->runtime_data[0]; + stats_n_packets = stats_n_bytes = 0; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, + to_next, n_left_to_next); + + while (n_left_from >= 4 && n_left_to_next >= 2) + { + u32 bi0, bi1; + vlib_buffer_t * b0, * b1; + u32 next0, next1; + ip4_vxlan_gpe_header_t * iuvn0, * iuvn1; + uword * p0, * p1; + u32 tunnel_index0, tunnel_index1; + vxlan_gpe_tunnel_t * t0, * t1; + vxlan_gpe_tunnel_key_t key0, key1; + u32 error0, error1; + u32 sw_if_index0, sw_if_index1, len0, len1; + + /* Prefetch next iteration. */ + { + vlib_buffer_t * p2, * p3; + + p2 = vlib_get_buffer (vm, from[2]); + p3 = vlib_get_buffer (vm, from[3]); + + vlib_prefetch_buffer_header (p2, LOAD); + vlib_prefetch_buffer_header (p3, LOAD); + + CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); + } + + bi0 = from[0]; + bi1 = from[1]; + to_next[0] = bi0; + to_next[1] = bi1; + from += 2; + to_next += 2; + n_left_to_next -= 2; + n_left_from -= 2; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + + /* udp leaves current_data pointing at the vxlan header */ + vlib_buffer_advance + (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t))); + vlib_buffer_advance + (b1, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t))); + + iuvn0 = vlib_buffer_get_current (b0); + iuvn1 = vlib_buffer_get_current (b1); + + /* pop (ip, udp, vxlan) */ + vlib_buffer_advance (b0, sizeof (*iuvn0)); + vlib_buffer_advance (b1, sizeof (*iuvn1)); + + tunnel_index0 = ~0; + tunnel_index1 = ~0; + error0 = 0; + error1 = 0; + + next0 = (iuvn0->vxlan.protocol < VXLAN_GPE_INPUT_N_NEXT) ? iuvn0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP; + next1 = (iuvn1->vxlan.protocol < VXLAN_GPE_INPUT_N_NEXT) ? iuvn1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP; + + + + + key0.local = iuvn0->ip4.dst_address.as_u32; + key1.local = iuvn1->ip4.dst_address.as_u32; + + key0.remote = iuvn0->ip4.src_address.as_u32; + key1.remote = iuvn1->ip4.src_address.as_u32; + + key0.vni = iuvn0->vxlan.vni_res; + key1.vni = iuvn1->vxlan.vni_res; + + key0.pad = 0; + key1.pad = 0; + + /* Processing for key0 */ + if (PREDICT_FALSE ((key0.as_u64[0] != last_key.as_u64[0]) + || (key0.as_u64[1] != last_key.as_u64[1]))) + { + p0 = hash_get_mem (ngm->vxlan_gpe_tunnel_by_key, &key0); + + if (p0 == 0) + { + error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL; + goto trace0; + } + + last_key.as_u64[0] = key0.as_u64[0]; + last_key.as_u64[1] = key0.as_u64[1]; + tunnel_index0 = last_tunnel_index = p0[0]; + } + else + tunnel_index0 = last_tunnel_index; + + t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0); + + next0 = t0->protocol; + + sw_if_index0 = t0->sw_if_index; + len0 = vlib_buffer_length_in_chain(vm, b0); + + /* Required to make the l2 tag push / pop code work on l2 subifs */ + vnet_update_l2_len (b0); + + /* + * ip[46] lookup in the configured FIB + */ + vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index; + + pkts_decapsulated++; + stats_n_packets += 1; + stats_n_bytes += len0; + + if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index)) + { + stats_n_packets -= 1; + stats_n_bytes -= len0; + if (stats_n_packets) + vlib_increment_combined_counter( + im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, + cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + stats_n_packets = 1; + stats_n_bytes = len0; + stats_sw_if_index = sw_if_index0; + } + + trace0: + b0->error = error0 ? node->errors[error0] : 0; + + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + vxlan_gpe_rx_trace_t *tr + = vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->next_index = next0; + tr->error = error0; + tr->tunnel_index = tunnel_index0; + } + + + /* Processing for key1 */ + if (PREDICT_FALSE ((key1.as_u64[0] != last_key.as_u64[0]) + || (key1.as_u64[1] != last_key.as_u64[1]))) + { + p1 = hash_get_mem (ngm->vxlan_gpe_tunnel_by_key, &key1); + + if (p1 == 0) + { + error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL; + goto trace1; + } + + last_key.as_u64[0] = key1.as_u64[0]; + last_key.as_u64[1] = key1.as_u64[1]; + tunnel_index1 = last_tunnel_index = p1[0]; + } + else + tunnel_index1 = last_tunnel_index; + + t1 = pool_elt_at_index (ngm->tunnels, tunnel_index1); + + next1 = t1->protocol; + sw_if_index1 = t1->sw_if_index; + len1 = vlib_buffer_length_in_chain(vm, b1); + + /* Required to make the l2 tag push / pop code work on l2 subifs */ + vnet_update_l2_len (b1); + + /* + * ip[46] lookup in the configured FIB + */ + vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index; + + + pkts_decapsulated++; + stats_n_packets += 1; + stats_n_bytes += len1; + + /* Batch stats increment on the same vxlan tunnel so counter + is not incremented per packet */ + if (PREDICT_FALSE(sw_if_index1 != stats_sw_if_index)) + { + stats_n_packets -= 1; + stats_n_bytes -= len1; + if (stats_n_packets) + vlib_increment_combined_counter( + im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, + cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + stats_n_packets = 1; + stats_n_bytes = len1; + stats_sw_if_index = sw_if_index1; + } + vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index; + + trace1: + b1->error = error1 ? node->errors[error1] : 0; + + if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) + { + vxlan_gpe_rx_trace_t *tr + = vlib_add_trace (vm, node, b1, sizeof (*tr)); + tr->next_index = next1; + tr->error = error1; + tr->tunnel_index = tunnel_index1; + } + + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, + to_next, n_left_to_next, + bi0, bi1, next0, next1); + } + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t * b0; + u32 next0; + ip4_vxlan_gpe_header_t * iuvn0; + uword * p0; + u32 tunnel_index0; + vxlan_gpe_tunnel_t * t0; + vxlan_gpe_tunnel_key_t key0; + u32 error0; + u32 sw_if_index0, len0; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + + /* udp leaves current_data pointing at the vxlan header */ + vlib_buffer_advance + (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t))); + + iuvn0 = vlib_buffer_get_current (b0); + + /* pop (ip, udp, vxlan) */ + vlib_buffer_advance (b0, sizeof (*iuvn0)); + + tunnel_index0 = ~0; + error0 = 0; + next0 = (iuvn0->vxlan.protocol < VXLAN_GPE_INPUT_N_NEXT) ? iuvn0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP; + + key0.local = iuvn0->ip4.dst_address.as_u32; + key0.remote = iuvn0->ip4.src_address.as_u32; + key0.vni = iuvn0->vxlan.vni_res; + key0.pad = 0; + + if (PREDICT_FALSE ((key0.as_u64[0] != last_key.as_u64[0]) + || (key0.as_u64[1] != last_key.as_u64[1]))) + { + p0 = hash_get_mem (ngm->vxlan_gpe_tunnel_by_key, &key0); + + if (p0 == 0) + { + error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL; + goto trace00; + } + + last_key.as_u64[0] = key0.as_u64[0]; + last_key.as_u64[1] = key0.as_u64[1]; + tunnel_index0 = last_tunnel_index = p0[0]; + } + else + tunnel_index0 = last_tunnel_index; + + t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0); + + next0 = t0->protocol; + + sw_if_index0 = t0->sw_if_index; + len0 = vlib_buffer_length_in_chain(vm, b0); + + /* Required to make the l2 tag push / pop code work on l2 subifs */ + vnet_update_l2_len (b0); + + /* + * ip[46] lookup in the configured FIB + */ + vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index; + + pkts_decapsulated ++; + stats_n_packets += 1; + stats_n_bytes += len0; + + /* Batch stats increment on the same vxlan-gpe tunnel so counter + is not incremented per packet */ + if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index)) + { + stats_n_packets -= 1; + stats_n_bytes -= len0; + if (stats_n_packets) + vlib_increment_combined_counter( + im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, + cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + stats_n_packets = 1; + stats_n_bytes = len0; + stats_sw_if_index = sw_if_index0; + } + + trace00: + b0->error = error0 ? node->errors[error0] : 0; + + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + vxlan_gpe_rx_trace_t *tr + = vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->next_index = next0; + tr->error = error0; + tr->tunnel_index = tunnel_index0; + } + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + vlib_node_increment_counter (vm, vxlan_gpe_input_node.index, + VXLAN_GPE_ERROR_DECAPSULATED, + pkts_decapsulated); + /* Increment any remaining batch stats */ + if (stats_n_packets) + { + vlib_increment_combined_counter( + im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, cpu_index, + stats_sw_if_index, stats_n_packets, stats_n_bytes); + node->runtime_data[0] = stats_sw_if_index; + } + return from_frame->n_vectors; +} + +static char * vxlan_gpe_error_strings[] = { +#define vxlan_gpe_error(n,s) s, +#include +#undef vxlan_gpe_error +#undef _ +}; + +VLIB_REGISTER_NODE (vxlan_gpe_input_node) = { + .function = vxlan_gpe_input, + .name = "vxlan-gpe-input", + /* Takes a vector of packets. */ + .vector_size = sizeof (u32), + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = ARRAY_LEN(vxlan_gpe_error_strings), + .error_strings = vxlan_gpe_error_strings, + + .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT, + .next_nodes = { +#define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n, + foreach_vxlan_gpe_input_next +#undef _ + }, + + .format_buffer = format_vxlan_gpe_with_length, + .format_trace = format_vxlan_gpe_rx_trace, + // $$$$ .unformat_buffer = unformat_vxlan_gpe_header, +}; + + diff --git a/vnet/vnet/vxlan-gpe/encap.c b/vnet/vnet/vxlan-gpe/encap.c new file mode 100644 index 00000000000..3ffe2a62db2 --- /dev/null +++ b/vnet/vnet/vxlan-gpe/encap.c @@ -0,0 +1,425 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include + +/* Statistics (not really errors) */ +#define foreach_vxlan_gpe_encap_error \ +_(ENCAPSULATED, "good packets encapsulated") + +static char * vxlan_gpe_encap_error_strings[] = { +#define _(sym,string) string, + foreach_vxlan_gpe_encap_error +#undef _ +}; + +typedef enum { +#define _(sym,str) VXLAN_GPE_ENCAP_ERROR_##sym, + foreach_vxlan_gpe_encap_error +#undef _ + VXLAN_GPE_ENCAP_N_ERROR, +} vxlan_gpe_encap_error_t; + +typedef enum { + VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP, + VXLAN_GPE_ENCAP_NEXT_DROP, + VXLAN_GPE_ENCAP_N_NEXT +} vxlan_gpe_encap_next_t; + +typedef struct { + u32 tunnel_index; +} vxlan_gpe_encap_trace_t; + + +u8 * format_vxlan_gpe_encap_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + vxlan_gpe_encap_trace_t * t + = va_arg (*args, vxlan_gpe_encap_trace_t *); + + s = format (s, "VXLAN-GPE-ENCAP: tunnel %d", t->tunnel_index); + return s; +} + +#define foreach_fixed_header_offset \ +_(0) _(1) _(2) _(3) _(4) _(5) _(6) + +static uword +vxlan_gpe_encap (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + u32 n_left_from, next_index, * from, * to_next; + vxlan_gpe_main_t * ngm = &vxlan_gpe_main; + vnet_main_t * vnm = ngm->vnet_main; + vnet_interface_main_t * im = &vnm->interface_main; + u32 pkts_encapsulated = 0; + u16 old_l0 = 0, old_l1 = 0; + u32 cpu_index = os_get_cpu_number(); + u32 stats_sw_if_index, stats_n_packets, stats_n_bytes; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + stats_sw_if_index = node->runtime_data[0]; + stats_n_packets = stats_n_bytes = 0; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, + to_next, n_left_to_next); + + while (n_left_from >= 4 && n_left_to_next >= 2) + { + u32 bi0, bi1; + vlib_buffer_t * b0, * b1; + u32 next0 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP; + u32 next1 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP; + u32 sw_if_index0, sw_if_index1, len0, len1; + vnet_hw_interface_t * hi0, * hi1; + ip4_header_t * ip0, * ip1; + udp_header_t * udp0, * udp1; + u64 * copy_src0, * copy_dst0; + u64 * copy_src1, * copy_dst1; + u32 * copy_src_last0, * copy_dst_last0; + u32 * copy_src_last1, * copy_dst_last1; + vxlan_gpe_tunnel_t * t0, * t1; + u16 new_l0, new_l1; + ip_csum_t sum0, sum1; + + /* Prefetch next iteration. */ + { + vlib_buffer_t * p2, * p3; + + p2 = vlib_get_buffer (vm, from[2]); + p3 = vlib_get_buffer (vm, from[3]); + + vlib_prefetch_buffer_header (p2, LOAD); + vlib_prefetch_buffer_header (p3, LOAD); + + CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); + } + + bi0 = from[0]; + bi1 = from[1]; + to_next[0] = bi0; + to_next[1] = bi1; + from += 2; + to_next += 2; + n_left_to_next -= 2; + n_left_from -= 2; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + + /* 1-wide cache? */ + sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX]; + sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX]; + hi0 = vnet_get_sup_hw_interface + (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]); + hi1 = vnet_get_sup_hw_interface + (vnm, vnet_buffer(b1)->sw_if_index[VLIB_TX]); + + t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance); + t1 = pool_elt_at_index (ngm->tunnels, hi1->dev_instance); + + ASSERT(vec_len(t0->rewrite) >= 24); + ASSERT(vec_len(t1->rewrite) >= 24); + + /* Apply the rewrite string. $$$$ vnet_rewrite? */ + vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite)); + vlib_buffer_advance (b1, -(word)_vec_len(t1->rewrite)); + + ip0 = vlib_buffer_get_current(b0); + ip1 = vlib_buffer_get_current(b1); + /* Copy the fixed header */ + copy_dst0 = (u64 *) ip0; + copy_src0 = (u64 *) t0->rewrite; + copy_dst1 = (u64 *) ip1; + copy_src1 = (u64 *) t1->rewrite; + + ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36); + + /* Copy first 36 octets 8-bytes at a time */ +#define _(offs) copy_dst0[offs] = copy_src0[offs]; + foreach_fixed_header_offset; +#undef _ +#define _(offs) copy_dst1[offs] = copy_src1[offs]; + foreach_fixed_header_offset; +#undef _ + + /* Last 4 octets. Hopefully gcc will be our friend */ + copy_dst_last0 = (u32 *)(©_dst0[7]); + copy_src_last0 = (u32 *)(©_src0[7]); + copy_dst_last1 = (u32 *)(©_dst1[7]); + copy_src_last1 = (u32 *)(©_src1[7]); + + copy_dst_last0[0] = copy_src_last0[0]; + copy_dst_last1[0] = copy_src_last1[0]; + + /* If there are TLVs to copy, do so */ + if (PREDICT_FALSE (_vec_len(t0->rewrite) > 64)) + clib_memcpy (©_dst0[3], t0->rewrite + 64 , + _vec_len (t0->rewrite)-64); + + if (PREDICT_FALSE (_vec_len(t1->rewrite) > 64)) + clib_memcpy (©_dst0[3], t1->rewrite + 64 , + _vec_len (t1->rewrite)-64); + + /* fix the ing outer-IP checksum */ + sum0 = ip0->checksum; + /* old_l0 always 0, see the rewrite setup */ + new_l0 = + clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)); + + sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t, + length /* changed member */); + ip0->checksum = ip_csum_fold (sum0); + ip0->length = new_l0; + + sum1 = ip1->checksum; + /* old_l1 always 0, see the rewrite setup */ + new_l1 = + clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)); + + sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t, + length /* changed member */); + ip1->checksum = ip_csum_fold (sum1); + ip1->length = new_l1; + + /* Fix UDP length */ + udp0 = (udp_header_t *)(ip0+1); + new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) + - sizeof (*ip0)); + udp1 = (udp_header_t *)(ip1+1); + new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1) + - sizeof (*ip1)); + + udp0->length = new_l0; + udp1->length = new_l1; + + /* Reset to look up tunnel partner in the configured FIB */ + vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index; + vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index; + vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0; + vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1; + pkts_encapsulated += 2; + + len0 = vlib_buffer_length_in_chain(vm, b0); + len1 = vlib_buffer_length_in_chain(vm, b0); + stats_n_packets += 2; + stats_n_bytes += len0 + len1; + + /* Batch stats increment on the same vxlan tunnel so counter is not + incremented per packet. Note stats are still incremented for deleted + and admin-down tunnel where packets are dropped. It is not worthwhile + to check for this rare case and affect normal path performance. */ + if (PREDICT_FALSE( + (sw_if_index0 != stats_sw_if_index) + || (sw_if_index1 != stats_sw_if_index))) { + stats_n_packets -= 2; + stats_n_bytes -= len0 + len1; + if (sw_if_index0 == sw_if_index1) { + if (stats_n_packets) + vlib_increment_combined_counter( + im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, + cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + stats_sw_if_index = sw_if_index0; + stats_n_packets = 2; + stats_n_bytes = len0 + len1; + } else { + vlib_increment_combined_counter( + im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, + cpu_index, sw_if_index0, 1, len0); + vlib_increment_combined_counter( + im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, + cpu_index, sw_if_index1, 1, len1); + } + } + + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + vxlan_gpe_encap_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->tunnel_index = t0 - ngm->tunnels; + } + + if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) + { + vxlan_gpe_encap_trace_t *tr = + vlib_add_trace (vm, node, b1, sizeof (*tr)); + tr->tunnel_index = t1 - ngm->tunnels; + } + + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, + to_next, n_left_to_next, + bi0, bi1, next0, next1); + } + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t * b0; + u32 next0 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP; + u32 sw_if_index0, len0; + vnet_hw_interface_t * hi0; + ip4_header_t * ip0; + udp_header_t * udp0; + u64 * copy_src0, * copy_dst0; + u32 * copy_src_last0, * copy_dst_last0; + vxlan_gpe_tunnel_t * t0; + u16 new_l0; + ip_csum_t sum0; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + + /* 1-wide cache? */ + sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX]; + hi0 = vnet_get_sup_hw_interface + (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]); + + t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance); + + ASSERT(vec_len(t0->rewrite) >= 24); + + /* Apply the rewrite string. $$$$ vnet_rewrite? */ + vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite)); + + ip0 = vlib_buffer_get_current(b0); + /* Copy the fixed header */ + copy_dst0 = (u64 *) ip0; + copy_src0 = (u64 *) t0->rewrite; + + ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36); + + /* Copy first 36 octets 8-bytes at a time */ +#define _(offs) copy_dst0[offs] = copy_src0[offs]; + foreach_fixed_header_offset; +#undef _ + /* Last 4 octets. Hopefully gcc will be our friend */ + copy_dst_last0 = (u32 *)(©_dst0[7]); + copy_src_last0 = (u32 *)(©_src0[7]); + + copy_dst_last0[0] = copy_src_last0[0]; + + /* If there are TLVs to copy, do so */ + if (PREDICT_FALSE (_vec_len(t0->rewrite) > 64)) + clib_memcpy (©_dst0[3], t0->rewrite + 64 , + _vec_len (t0->rewrite)-64); + + /* fix the ing outer-IP checksum */ + sum0 = ip0->checksum; + /* old_l0 always 0, see the rewrite setup */ + new_l0 = + clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)); + + sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t, + length /* changed member */); + ip0->checksum = ip_csum_fold (sum0); + ip0->length = new_l0; + + /* Fix UDP length */ + udp0 = (udp_header_t *)(ip0+1); + new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) + - sizeof (*ip0)); + + udp0->length = new_l0; + + /* Reset to look up tunnel partner in the configured FIB */ + vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index; + vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0; + pkts_encapsulated ++; + + len0 = vlib_buffer_length_in_chain(vm, b0); + stats_n_packets += 1; + stats_n_bytes += len0; + + /* Batch stats increment on the same vxlan tunnel so counter is not + * incremented per packet. Note stats are still incremented for deleted + * and admin-down tunnel where packets are dropped. It is not worthwhile + * to check for this rare case and affect normal path performance. */ + if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index)) + { + stats_n_packets -= 1; + stats_n_bytes -= len0; + if (stats_n_packets) + vlib_increment_combined_counter( + im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, + cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + stats_n_packets = 1; + stats_n_bytes = len0; + stats_sw_if_index = sw_if_index0; + } + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + vxlan_gpe_encap_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->tunnel_index = t0 - ngm->tunnels; + } + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + vlib_node_increment_counter (vm, node->node_index, + VXLAN_GPE_ENCAP_ERROR_ENCAPSULATED, + pkts_encapsulated); + /* Increment any remaining batch stats */ + if (stats_n_packets) { + vlib_increment_combined_counter( + im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, cpu_index, + stats_sw_if_index, stats_n_packets, stats_n_bytes); + node->runtime_data[0] = stats_sw_if_index; + } + + return from_frame->n_vectors; +} + +VLIB_REGISTER_NODE (vxlan_gpe_encap_node) = { + .function = vxlan_gpe_encap, + .name = "vxlan-gpe-encap", + .vector_size = sizeof (u32), + .format_trace = format_vxlan_gpe_encap_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(vxlan_gpe_encap_error_strings), + .error_strings = vxlan_gpe_encap_error_strings, + + .n_next_nodes = VXLAN_GPE_ENCAP_N_NEXT, + + .next_nodes = { + [VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup", + [VXLAN_GPE_ENCAP_NEXT_DROP] = "error-drop", + }, +}; + diff --git a/vnet/vnet/vxlan-gpe/vxlan-gpe-rfc.txt b/vnet/vnet/vxlan-gpe/vxlan-gpe-rfc.txt new file mode 100644 index 00000000000..35cee50f573 --- /dev/null +++ b/vnet/vnet/vxlan-gpe/vxlan-gpe-rfc.txt @@ -0,0 +1,868 @@ +Network Working Group P. Quinn +Internet-Draft Cisco Systems, Inc. +Intended status: Experimental P. Agarwal +Expires: January 4, 2015 Broadcom + R. Fernando + L. Kreeger + D. Lewis + F. Maino + M. Smith + N. Yadav + Cisco Systems, Inc. + L. Yong + Huawei USA + X. Xu + Huawei Technologies + U. Elzur + Intel + P. Garg + Microsoft + July 3, 2014 + + + Generic Protocol Extension for VXLAN + draft-quinn-vxlan-gpe-03.txt + +Abstract + + This draft describes extending Virtual eXtensible Local Area Network + (VXLAN), via changes to the VXLAN header, with three new + capabilities: support for multi-protocol encapsulation, operations, + administration and management (OAM) signaling and explicit + versioning. + +Status of this Memo + + This Internet-Draft is submitted in full conformance with the + provisions of BCP 78 and BCP 79. + + Internet-Drafts are working documents of the Internet Engineering + Task Force (IETF). Note that other groups may also distribute + working documents as Internet-Drafts. The list of current Internet- + Drafts is at http://datatracker.ietf.org/drafts/current/. + + Internet-Drafts are draft documents valid for a maximum of six months + and may be updated, replaced, or obsoleted by other documents at any + time. It is inappropriate to use Internet-Drafts as reference + material or to cite them other than as "work in progress." + + + + +Quinn, et al. Expires January 4, 2015 [Page 1] + +Internet-Draft Generic Protocol Extension for VXLAN July 2014 + + + This Internet-Draft will expire on January 4, 2015. + +Copyright Notice + + Copyright (c) 2014 IETF Trust and the persons identified as the + document authors. All rights reserved. + + This document is subject to BCP 78 and the IETF Trust's Legal + Provisions Relating to IETF Documents + (http://trustee.ietf.org/license-info) in effect on the date of + publication of this document. Please review these documents + carefully, as they describe your rights and restrictions with respect + to this document. Code Components extracted from this document must + include Simplified BSD License text as described in Section 4.e of + the Trust Legal Provisions and are provided without warranty as + described in the Simplified BSD License. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Quinn, et al. Expires January 4, 2015 [Page 2] + +Internet-Draft Generic Protocol Extension for VXLAN July 2014 + + +Table of Contents + + 1. Introduction . . . . . . . . . . . . . . . . . . . . . . . . . 4 + 2. VXLAN Without Protocol Extension . . . . . . . . . . . . . . . 5 + 3. Generic Protocol Extension VXLAN (VXLAN-gpe) . . . . . . . . . 6 + 3.1. Multi Protocol Support . . . . . . . . . . . . . . . . . . 6 + 3.2. OAM Support . . . . . . . . . . . . . . . . . . . . . . . 7 + 3.3. Version Bits . . . . . . . . . . . . . . . . . . . . . . . 7 + 4. Backward Compatibility . . . . . . . . . . . . . . . . . . . . 8 + 4.1. VXLAN VTEP to VXLAN-gpe VTEP . . . . . . . . . . . . . . . 8 + 4.2. VXLAN-gpe VTEP to VXLAN VTEP . . . . . . . . . . . . . . . 8 + 4.3. VXLAN-gpe UDP Ports . . . . . . . . . . . . . . . . . . . 8 + 4.4. VXLAN-gpe and Encapsulated IP Header Fields . . . . . . . 8 + 5. VXLAN-gpe Examples . . . . . . . . . . . . . . . . . . . . . . 9 + 6. Security Considerations . . . . . . . . . . . . . . . . . . . 11 + 7. Acknowledgments . . . . . . . . . . . . . . . . . . . . . . . 12 + 8. IANA Considerations . . . . . . . . . . . . . . . . . . . . . 13 + 8.1. UDP Port . . . . . . . . . . . . . . . . . . . . . . . . . 13 + 8.2. VXLAN-gpe Next Protocol . . . . . . . . . . . . . . . . . 13 + 8.3. VXLAN-gpe Reserved Bits . . . . . . . . . . . . . . . . . 13 + 9. References . . . . . . . . . . . . . . . . . . . . . . . . . . 14 + 9.1. Normative References . . . . . . . . . . . . . . . . . . . 14 + 9.2. Informative References . . . . . . . . . . . . . . . . . . 14 + Authors' Addresses . . . . . . . . . . . . . . . . . . . . . . . . 15 + + + + + + + + + + + + + + + + + + + + + + + + + + + +Quinn, et al. Expires January 4, 2015 [Page 3] + +Internet-Draft Generic Protocol Extension for VXLAN July 2014 + + +1. Introduction + + Virtual eXtensible Local Area Network [VXLAN] defines an + encapsulation format that encapsulates Ethernet frames in an outer + UDP/IP transport. As data centers evolve, the need to carry other + protocols encapsulated in an IP packet is required, as well as the + need to provide increased visibility and diagnostic capabilities + within the overlay. The VXLAN header does not specify the protocol + being encapsulated and therefore is currently limited to + encapsulating only Ethernet frame payload, nor does it provide the + ability to define OAM protocols. Rather than defining yet another + encapsulation, VXLAN is extended to provide protocol typing and OAM + capabilities. + + This document describes extending VXLAN via the following changes: + + Next Protocol Bit (P bit): A reserved flag bit is allocated, and set + in the VXLAN-gpe header to indicate that a next protocol field is + present. + + OAM Flag Bit (O bit): A reserved flag bit is allocated, and set in + the VXLAN-gpe header, to indicate that the packet is an OAM + packet. + + Version: Two reserved bits are allocated, and set in the VXLAN-gpe + header, to indicate VXLAN-gpe protocol version. + + Next Protocol: A 8 bit next protocol field is present in the VXLAN- + gpe header. + + + + + + + + + + + + + + + + + + + + + + +Quinn, et al. Expires January 4, 2015 [Page 4] + +Internet-Draft Generic Protocol Extension for VXLAN July 2014 + + +2. VXLAN Without Protocol Extension + + As described in the introduction, the VXLAN header has no protocol + identifier that indicates the type of payload being carried by VXLAN. + Because of this, VXLAN is limited to an Ethernet payload. + Furthermore, the VXLAN header has no mechanism to signal OAM packets. + + The VXLAN header defines bits 0-7 as flags (some defined, some + reserved), the VXLAN network identifier (VNI) field and several + reserved bits. The flags provide flexibility to define how the + reserved bits can be used to change the definition of the VXLAN + header. + + + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |R|R|R|R|I|R|R|R| Reserved | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | VXLAN Network Identifier (VNI) | Reserved | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + + Figure 1: VXLAN Header + + + + + + + + + + + + + + + + + + + + + + + + + + +Quinn, et al. Expires January 4, 2015 [Page 5] + +Internet-Draft Generic Protocol Extension for VXLAN July 2014 + + +3. Generic Protocol Extension VXLAN (VXLAN-gpe) + +3.1. Multi Protocol Support + + This draft defines the following two changes to the VXLAN header in + order to support multi-protocol encapsulation: + + P Bit: Flag bit 5 is defined as the Next Protocol bit. The P bit + MUST be set to 1 to indicate the presence of the 8 bit next + protocol field. + + P = 0 indicates that the payload MUST conform to VXLAN as defined + in [VXLAN]. + + Flag bit 5 was chosen as the P bit because this flag bit is + currently reserved in VXLAN. + + Next Protocol Field: The lower 8 bits of the first word are used to + carry a next protocol. This next protocol field contains the + protocol of the encapsulated payload packet. A new protocol + registry will be requested from IANA. + + This draft defines the following Next Protocol values: + + 0x1 : IPv4 + 0x2 : IPv6 + 0x3 : Ethernet + 0x4 : Network Service Header [NSH] + + + + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |R|R|R|R|I|P|R|R| Reserved |Next Protocol | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | VXLAN Network Identifier (VNI) | Reserved | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + + + Figure 2: VXLAN-gpe Next Protocol + + + + + + + + +Quinn, et al. Expires January 4, 2015 [Page 6] + +Internet-Draft Generic Protocol Extension for VXLAN July 2014 + + +3.2. OAM Support + + Flag bit 7 is defined as the O bit. When the O bit is set to 1, the + packet is an OAM packet and OAM processing MUST occur. The OAM + protocol details are out of scope for this document. As with the + P-bit, bit 7 is currently a reserved flag in VXLAN. + + + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |R|R|R|R|I|P|R|O| Reserved |Next Protocol | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | VXLAN Network Identifier (VNI) | Reserved | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + + + Figure 3: VXLAN-gpe OAM Bit + +3.3. Version Bits + + VXLAN-gpe bits 8 and 9 are defined as version bits. These bits are + reserved in VXLAN. The version field is used to ensure backward + compatibility going forward with future VXLAN-gpe updates. + + The initial version for VXLAN-gpe is 0. + + + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |R|R|R|R|I|P|R|O|Ver| Reserved |Next Protocol | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | VXLAN Network Identifier (VNI) | Reserved | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + + + + + Figure 4: VXLAN-gpe Version Bits + + + + + + + +Quinn, et al. Expires January 4, 2015 [Page 7] + +Internet-Draft Generic Protocol Extension for VXLAN July 2014 + + +4. Backward Compatibility + +4.1. VXLAN VTEP to VXLAN-gpe VTEP + + As per VXLAN, reserved bits 5 and 7, VXLAN-gpe P and O-bits + respectively must be set to zero. The remaining reserved bits must + be zero, including the VXLAN-gpe version field, bits 8 and 9. The + encapsulated payload MUST be Ethernet. + +4.2. VXLAN-gpe VTEP to VXLAN VTEP + + A VXLAN-gpe VTEP MUST NOT encapsulate non-Ethernet frames to a VXLAN + VTEP. When encapsulating Ethernet frames to a VXLAN VTEP, the VXLAN- + gpe VTEP will set the P bit to 0, the Next Protocol to 0 and use UDP + destination port 4789. A VXLAN-gpe VTEP MUST also set O = 0 and Ver + = 0 when encapsulating Ethernet frames to VXLAN VTEP. The receiving + VXLAN VTEP will threat this packet as a VXLAN packet. + + A method for determining the capabilities of a VXLAN VTEP (gpe or + non-gpe) is out of the scope of this draft. + +4.3. VXLAN-gpe UDP Ports + + VXLAN-gpe uses a new UDP destination port (to be assigned by IANA) + when sending traffic to VXLAN-gpe VTEPs. + +4.4. VXLAN-gpe and Encapsulated IP Header Fields + + When encapsulating and decapsulating IPv4 and IPv6 packets, certain + fields, such as IPv4 Time to Live (TTL) from the inner IP header need + to be considered. VXLAN-gpe IP encapsulation and decapsulation + utilizes the techniques described in [RFC6830], section 5.3. + + + + + + + + + + + + + + + + + + + +Quinn, et al. Expires January 4, 2015 [Page 8] + +Internet-Draft Generic Protocol Extension for VXLAN July 2014 + + +5. VXLAN-gpe Examples + + This section provides three examples of protocols encapsulated using + the Generic Protocol Extension for VXLAN described in this document. + + + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |R|R|R|R|I|1|R|0|0|0| Reserved | NP = IPv4 | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | VXLAN Network Identifier (VNI) | Reserved | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Original IPv4 Packet | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + + + Figure 5: IPv4 and VXLAN-gpe + + + + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |R|R|R|R|I|1|R|0|0|0| Reserved | NP = IPv6 | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | VXLAN Network Identifier (VNI) | Reserved | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Original IPv6 Packet | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + + + Figure 6: IPv6 and VXLAN-gpe + + + + + + + + + + + + + + +Quinn, et al. Expires January 4, 2015 [Page 9] + +Internet-Draft Generic Protocol Extension for VXLAN July 2014 + + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |R|R|R|R|I|1|R|0|0|0| Reserved |NP = Ethernet | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | VXLAN Network Identifier (VNI) | Reserved | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Original Ethernet Frame | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + + + Figure 7: Ethernet and VXLAN-gpe + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Quinn, et al. Expires January 4, 2015 [Page 10] + +Internet-Draft Generic Protocol Extension for VXLAN July 2014 + + +6. Security Considerations + + VXLAN's security is focused on issues around L2 encapsulation into + L3. With VXLAN-gpe, issues such as spoofing, flooding, and traffic + redirection are dependent on the particular protocol payload + encapsulated. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Quinn, et al. Expires January 4, 2015 [Page 11] + +Internet-Draft Generic Protocol Extension for VXLAN July 2014 + + +7. Acknowledgments + + A special thank you goes to Dino Farinacci for his guidance and + detailed review. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Quinn, et al. Expires January 4, 2015 [Page 12] + +Internet-Draft Generic Protocol Extension for VXLAN July 2014 + + +8. IANA Considerations + +8.1. UDP Port + + A new UDP port will be requested from IANA. + +8.2. VXLAN-gpe Next Protocol + + IANA is requested to set up a registry of "Next Protocol". These are + 8-bit values. Next Protocol values 0, 1, 2, 3 and 4 are defined in + this draft. New values are assigned via Standards Action [RFC5226]. + + +---------------+-------------+---------------+ + | Next Protocol | Description | Reference | + +---------------+-------------+---------------+ + | 0 | Reserved | This document | + | | | | + | 1 | IPv4 | This document | + | | | | + | 2 | IPv6 | This document | + | | | | + | 3 | Ethernet | This document | + | | | | + | 4 | NSH | This document | + | | | | + | 5..253 | Unassigned | | + +---------------+-------------+---------------+ + + Table 1 + +8.3. VXLAN-gpe Reserved Bits + + There are ten bits at the beginning of the VXLAN-gpe header. New + bits are assigned via Standards Action [RFC5226]. + + Bits 0-3 - Reserved + Bit 4 - Instance ID (I bit) + Bit 5 - Next Protocol (P bit) + Bit 6 - Reserved + Bit 7 - OAM (O bit) + Bits 8-9 - Version + + + + + + + + + + +Quinn, et al. Expires January 4, 2015 [Page 13] + +Internet-Draft Generic Protocol Extension for VXLAN July 2014 + + +9. References + +9.1. Normative References + + [RFC0768] Postel, J., "User Datagram Protocol", STD 6, RFC 768, + August 1980. + + [RFC0791] Postel, J., "Internet Protocol", STD 5, RFC 791, + September 1981. + + [RFC2119] Bradner, S., "Key words for use in RFCs to Indicate + Requirement Levels", BCP 14, RFC 2119, March 1997. + + [RFC5226] Narten, T. and H. Alvestrand, "Guidelines for Writing an + IANA Considerations Section in RFCs", BCP 26, RFC 5226, + May 2008. + +9.2. Informative References + + [NSH] Quinn, P. and et al. , "Network Service Header", 2014. + + [RFC1700] Reynolds, J. and J. Postel, "Assigned Numbers", RFC 1700, + October 1994. + + [RFC6830] Farinacci, D., Fuller, V., Meyer, D., and D. Lewis, "The + Locator/ID Separation Protocol (LISP)", RFC 6830, + January 2013. + + [VXLAN] Dutt, D., Mahalingam, M., Duda, K., Agarwal, P., Kreeger, + L., Sridhar, T., Bursell, M., and C. Wright, "VXLAN: A + Framework for Overlaying Virtualized Layer 2 Networks over + Layer 3 Networks", 2013. + + + + + + + + + + + + + + + + + + + +Quinn, et al. Expires January 4, 2015 [Page 14] + +Internet-Draft Generic Protocol Extension for VXLAN July 2014 + + +Authors' Addresses + + Paul Quinn + Cisco Systems, Inc. + + Email: paulq@cisco.com + + + Puneet Agarwal + Broadcom + + Email: pagarwal@broadcom.com + + + Rex Fernando + Cisco Systems, Inc. + + Email: rex@cisco.com + + + Larry Kreeger + Cisco Systems, Inc. + + Email: kreeger@cisco.com + + + Darrel Lewis + Cisco Systems, Inc. + + Email: darlewis@cisco.com + + + Fabio Maino + Cisco Systems, Inc. + + Email: kreeger@cisco.com + + + Michael Smith + Cisco Systems, Inc. + + Email: michsmit@cisco.com + + + + + + + + + +Quinn, et al. Expires January 4, 2015 [Page 15] + +Internet-Draft Generic Protocol Extension for VXLAN July 2014 + + + Navindra Yadav + Cisco Systems, Inc. + + Email: nyadav@cisco.com + + + Lucy Yong + Huawei USA + + Email: lucy.yong@huawei.com + + + Xiaohu Xu + Huawei Technologies + + Email: xuxiaohu@huawei.com + + + Uri Elzur + Intel + + Email: uri.elzur@intel.com + + + Pankaj Garg + Microsoft + + Email: Garg.Pankaj@microsoft.com diff --git a/vnet/vnet/vxlan-gpe/vxlan_gpe.c b/vnet/vnet/vxlan-gpe/vxlan_gpe.c new file mode 100644 index 00000000000..ef242d0bb8f --- /dev/null +++ b/vnet/vnet/vxlan-gpe/vxlan_gpe.c @@ -0,0 +1,467 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include + +vxlan_gpe_main_t vxlan_gpe_main; + +u8 * format_vxlan_gpe_tunnel (u8 * s, va_list * args) +{ + vxlan_gpe_tunnel_t * t = va_arg (*args, vxlan_gpe_tunnel_t *); + vxlan_gpe_main_t * gm = &vxlan_gpe_main; + + s = format (s, "[%d] local: %U remote: %U ", + t - gm->tunnels, + format_ip4_address, &t->local, + format_ip4_address, &t->remote); + + switch (t->protocol) + { + case VXLAN_GPE_PROTOCOL_IP4: + s = format (s, "next-protocol ip4"); + case VXLAN_GPE_PROTOCOL_IP6: + s = format (s, "next-protocol ip6"); + case VXLAN_GPE_PROTOCOL_ETHERNET: + s = format (s, "next-protocol ethernet"); + case VXLAN_GPE_PROTOCOL_NSH: + s = format (s, "next-protocol nsh"); + default: + s = format (s, "next-protocol unknown %d", t->protocol); + } + + s = format (s, " fibs: (encap %d, decap %d)", + t->encap_fib_index, + t->decap_fib_index); + + s = format (s, " vxlan VNI %d ", t->vni); + + return s; +} + +static u8 * format_vxlan_gpe_name (u8 * s, va_list * args) +{ + vxlan_gpe_main_t * gm = &vxlan_gpe_main; + u32 i = va_arg (*args, u32); + u32 show_dev_instance = ~0; + + if (i < vec_len (gm->dev_inst_by_real)) + show_dev_instance = gm->dev_inst_by_real[i]; + + if (show_dev_instance != ~0) + i = show_dev_instance; + + return format (s, "vxlan_gpe_tunnel%d", i); +} + +static int vxlan_gpe_name_renumber (vnet_hw_interface_t * hi, + u32 new_dev_instance) +{ + vxlan_gpe_main_t * gm = &vxlan_gpe_main; + + vec_validate_init_empty (gm->dev_inst_by_real, hi->dev_instance, ~0); + + gm->dev_inst_by_real [hi->dev_instance] = new_dev_instance; + + return 0; +} + +static uword dummy_interface_tx (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + clib_warning ("you shouldn't be here, leaking buffers..."); + return frame->n_vectors; +} + +static uword dummy_set_rewrite (vnet_main_t * vnm, + u32 sw_if_index, + u32 l3_type, + void * dst_address, + void * rewrite, + uword max_rewrite_bytes) +{ + return 0; +} + +VNET_DEVICE_CLASS (vxlan_gpe_device_class,static) = { + .name = "VXLAN_GPE", + .format_device_name = format_vxlan_gpe_name, + .format_tx_trace = format_vxlan_gpe_encap_trace, + .tx_function = dummy_interface_tx, + .name_renumber = vxlan_gpe_name_renumber, +}; + +static u8 * format_vxlan_gpe_header_with_length (u8 * s, va_list * args) +{ + u32 dev_instance = va_arg (*args, u32); + s = format (s, "unimplemented dev %u", dev_instance); + return s; +} + +VNET_HW_INTERFACE_CLASS (vxlan_gpe_hw_class) = { + .name = "VXLAN_GPE", + .format_header = format_vxlan_gpe_header_with_length, + .set_rewrite = dummy_set_rewrite, +}; + + +#define foreach_gpe_copy_field \ +_(local.as_u32) \ +_(remote.as_u32) \ +_(vni) \ +_(protocol) \ +_(encap_fib_index) \ +_(decap_fib_index) + +#define foreach_copy_field \ +_(src.as_u32) \ +_(dst.as_u32) \ +_(vni) \ +_(encap_fib_index) \ +_(decap_fib_index) \ +_(decap_next_index) + + + +static int vxlan_gpe_rewrite (vxlan_gpe_tunnel_t * t) +{ + u8 *rw = 0; + ip4_header_t * ip0; + ip4_vxlan_gpe_header_t * h0; + int len; + + len = sizeof (*h0); + + vec_validate_aligned (rw, len-1, CLIB_CACHE_LINE_BYTES); + + h0 = (ip4_vxlan_gpe_header_t *) rw; + + /* Fixed portion of the (outer) ip4 header */ + ip0 = &h0->ip4; + ip0->ip_version_and_header_length = 0x45; + ip0->ttl = 254; + ip0->protocol = IP_PROTOCOL_UDP; + + /* we fix up the ip4 header length and checksum after-the-fact */ + ip0->src_address.as_u32 = t->local.as_u32; + ip0->dst_address.as_u32 = t->remote.as_u32; + ip0->checksum = ip4_header_checksum (ip0); + + /* UDP header, randomize src port on something, maybe? */ + h0->udp.src_port = clib_host_to_net_u16 (4790); + h0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gpe); + + /* VXLAN header. Are we having fun yet? */ + h0->vxlan.flags = VXLAN_GPE_FLAGS_I | VXLAN_GPE_FLAGS_P; + h0->vxlan.ver_res = VXLAN_GPE_VERSION; + h0->vxlan.protocol = VXLAN_GPE_PROTOCOL_IP4; + h0->vxlan.vni_res = clib_host_to_net_u32 (t->vni<<8); + + t->rewrite = rw; + return (0); +} + +int vnet_vxlan_gpe_add_del_tunnel +(vnet_vxlan_gpe_add_del_tunnel_args_t *a, u32 * sw_if_indexp) +{ + vxlan_gpe_main_t * gm = &vxlan_gpe_main; + vxlan_gpe_tunnel_t *t = 0; + vnet_main_t * vnm = gm->vnet_main; + vnet_hw_interface_t * hi; + uword * p; + u32 hw_if_index = ~0; + u32 sw_if_index = ~0; + int rv; + vxlan_gpe_tunnel_key_t key, *key_copy; + hash_pair_t *hp; + + key.local = a->local.as_u32; + key.remote = a->remote.as_u32; + key.vni = clib_host_to_net_u32 (a->vni << 8); + key.pad = 0; + + p = hash_get_mem (gm->vxlan_gpe_tunnel_by_key, &key); + + if (a->is_add) + { + /* adding a tunnel: tunnel must not already exist */ + if (p) + return VNET_API_ERROR_INVALID_VALUE; + + if (a->decap_next_index >= VXLAN_GPE_INPUT_N_NEXT) + return VNET_API_ERROR_INVALID_DECAP_NEXT; + + pool_get_aligned (gm->tunnels, t, CLIB_CACHE_LINE_BYTES); + memset (t, 0, sizeof (*t)); + + /* copy from arg structure */ +#define _(x) t->x = a->x; + foreach_gpe_copy_field; +#undef _ + + rv = vxlan_gpe_rewrite (t); + + if (rv) + { + pool_put (gm->tunnels, t); + return rv; + } + + key_copy = clib_mem_alloc (sizeof (*key_copy)); + clib_memcpy (key_copy, &key, sizeof (*key_copy)); + + hash_set_mem (gm->vxlan_gpe_tunnel_by_key, key_copy, + t - gm->tunnels); + + if (vec_len (gm->free_vxlan_gpe_tunnel_hw_if_indices) > 0) + { + hw_if_index = gm->free_vxlan_gpe_tunnel_hw_if_indices + [vec_len (gm->free_vxlan_gpe_tunnel_hw_if_indices)-1]; + _vec_len (gm->free_vxlan_gpe_tunnel_hw_if_indices) -= 1; + + hi = vnet_get_hw_interface (vnm, hw_if_index); + hi->dev_instance = t - gm->tunnels; + hi->hw_instance = hi->dev_instance; + } + else + { + hw_if_index = vnet_register_interface + (vnm, vxlan_gpe_device_class.index, t - gm->tunnels, + vxlan_gpe_hw_class.index, t - gm->tunnels); + hi = vnet_get_hw_interface (vnm, hw_if_index); + hi->output_node_index = vxlan_gpe_encap_node.index; + } + + t->hw_if_index = hw_if_index; + t->sw_if_index = sw_if_index = hi->sw_if_index; + + vnet_sw_interface_set_flags (vnm, hi->sw_if_index, + VNET_SW_INTERFACE_FLAG_ADMIN_UP); + } + else + { + /* deleting a tunnel: tunnel must exist */ + if (!p) + return VNET_API_ERROR_NO_SUCH_ENTRY; + + t = pool_elt_at_index (gm->tunnels, p[0]); + + vnet_sw_interface_set_flags (vnm, t->sw_if_index, 0 /* down */); + vec_add1 (gm->free_vxlan_gpe_tunnel_hw_if_indices, t->hw_if_index); + + hp = hash_get_pair (gm->vxlan_gpe_tunnel_by_key, &key); + key_copy = (void *)(hp->key); + hash_unset_mem (gm->vxlan_gpe_tunnel_by_key, &key); + clib_mem_free (key_copy); + + vec_free (t->rewrite); + pool_put (gm->tunnels, t); + } + + if (sw_if_indexp) + *sw_if_indexp = sw_if_index; + + return 0; +} + +static u32 fib_index_from_fib_id (u32 fib_id) +{ + ip4_main_t * im = &ip4_main; + uword * p; + + p = hash_get (im->fib_index_by_table_id, fib_id); + if (!p) + return ~0; + + return p[0]; +} + +static uword unformat_gpe_decap_next (unformat_input_t * input, va_list * args) +{ + u32 * result = va_arg (*args, u32 *); + u32 tmp; + + if (unformat (input, "drop")) + *result = VXLAN_GPE_INPUT_NEXT_DROP; + else if (unformat (input, "ip4")) + *result = VXLAN_GPE_INPUT_NEXT_IP4_INPUT; + else if (unformat (input, "ip6")) + *result = VXLAN_GPE_INPUT_NEXT_IP6_INPUT; + else if (unformat (input, "ethernet")) + *result = VXLAN_GPE_INPUT_NEXT_ETHERNET_INPUT; + else if (unformat (input, "nsh")) + *result = VXLAN_GPE_INPUT_NEXT_NSH_INPUT; + else if (unformat (input, "%d", &tmp)) + *result = tmp; + else + return 0; + return 1; +} + +static clib_error_t * +vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, * line_input = &_line_input; + u8 is_add = 1; + ip4_address_t local, remote; + u8 local_set = 0; + u8 remote_set = 0; + u32 encap_fib_index = 0; + u32 decap_fib_index = 0; + u8 protocol = VXLAN_GPE_PROTOCOL_IP4; + u32 decap_next_index = VXLAN_GPE_INPUT_NEXT_IP4_INPUT; + u32 vni; + u8 vni_set = 0; + int rv; + u32 tmp; + vnet_vxlan_gpe_add_del_tunnel_args_t _a, * a = &_a; + + /* Get a line of input. */ + if (! unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { + if (unformat (line_input, "del")) + is_add = 0; + else if (unformat (line_input, "local %U", + unformat_ip4_address, &local)) + local_set = 1; + else if (unformat (line_input, "remote %U", + unformat_ip4_address, &remote)) + remote_set = 1; + else if (unformat (line_input, "encap-vrf-id %d", &tmp)) + { + encap_fib_index = fib_index_from_fib_id (tmp); + if (encap_fib_index == ~0) + return clib_error_return (0, "nonexistent encap fib id %d", tmp); + } + else if (unformat (line_input, "decap-vrf-id %d", &tmp)) + { + decap_fib_index = fib_index_from_fib_id (tmp); + if (decap_fib_index == ~0) + return clib_error_return (0, "nonexistent decap fib id %d", tmp); + } + else if (unformat (line_input, "decap-next %U", unformat_gpe_decap_next, + &decap_next_index)) + ; + else if (unformat (line_input, "vni %d", &vni)) + vni_set = 1; + else if (unformat(line_input, "next-ip4")) + protocol = VXLAN_GPE_PROTOCOL_IP4; + else if (unformat(line_input, "next-ip6")) + protocol = VXLAN_GPE_PROTOCOL_IP6; + else if (unformat(line_input, "next-ethernet")) + protocol = VXLAN_GPE_PROTOCOL_ETHERNET; + else if (unformat(line_input, "next-nsh")) + protocol = VXLAN_GPE_PROTOCOL_NSH; + else + return clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + } + + unformat_free (line_input); + + if (local_set == 0) + return clib_error_return (0, "tunnel local address not specified"); + + if (remote_set == 0) + return clib_error_return (0, "tunnel remote address not specified"); + + if (vni_set == 0) + return clib_error_return (0, "vni not specified"); + + memset (a, 0, sizeof (*a)); + + a->is_add = is_add; + +#define _(x) a->x = x; + foreach_gpe_copy_field; +#undef _ + + rv = vnet_vxlan_gpe_add_del_tunnel (a, 0 /* hw_if_indexp */); + + switch(rv) + { + case 0: + break; + case VNET_API_ERROR_INVALID_DECAP_NEXT: + return clib_error_return (0, "invalid decap-next..."); + + case VNET_API_ERROR_TUNNEL_EXIST: + return clib_error_return (0, "tunnel already exists..."); + + case VNET_API_ERROR_NO_SUCH_ENTRY: + return clib_error_return (0, "tunnel does not exist..."); + + default: + return clib_error_return + (0, "vnet_vxlan_gpe_add_del_tunnel returned %d", rv); + } + + return 0; +} + +VLIB_CLI_COMMAND (create_vxlan_gpe_tunnel_command, static) = { + .path = "create vxlan-gpe tunnel", + .short_help = + "create vxlan-gpe tunnel local remote " + " vni [next-ip4][next-ip6][next-ethernet][next-nsh]" + " [encap-vrf-id ] [decap-vrf-id ]" + " [del]\n", + .function = vxlan_gpe_add_del_tunnel_command_fn, +}; + +static clib_error_t * +show_vxlan_gpe_tunnel_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + vxlan_gpe_main_t * gm = &vxlan_gpe_main; + vxlan_gpe_tunnel_t * t; + + if (pool_elts (gm->tunnels) == 0) + vlib_cli_output (vm, "No vxlan-gpe tunnels configured."); + + pool_foreach (t, gm->tunnels, + ({ + vlib_cli_output (vm, "%U", format_vxlan_gpe_tunnel, t); + })); + + return 0; +} + +VLIB_CLI_COMMAND (show_vxlan_gpe_tunnel_command, static) = { + .path = "show vxlan-gpe", + .function = show_vxlan_gpe_tunnel_command_fn, +}; + +clib_error_t *vxlan_gpe_init (vlib_main_t *vm) +{ + vxlan_gpe_main_t *gm = &vxlan_gpe_main; + + gm->vnet_main = vnet_get_main(); + gm->vlib_main = vm; + + gm->vxlan_gpe_tunnel_by_key + = hash_create_mem (0, sizeof(vxlan_gpe_tunnel_key_t), sizeof (uword)); + + udp_register_dst_port (vm, UDP_DST_PORT_vxlan_gpe, + vxlan_gpe_input_node.index, 1 /* is_ip4 */); + return 0; +} + +VLIB_INIT_FUNCTION(vxlan_gpe_init); + diff --git a/vnet/vnet/vxlan-gpe/vxlan_gpe.h b/vnet/vnet/vxlan-gpe/vxlan_gpe.h new file mode 100644 index 00000000000..4c2ac444e34 --- /dev/null +++ b/vnet/vnet/vxlan-gpe/vxlan_gpe.h @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef included_vnet_vxlan_gpe_h +#define included_vnet_vxlan_gpe_h + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +typedef CLIB_PACKED (struct { + ip4_header_t ip4; /* 20 bytes */ + udp_header_t udp; /* 8 bytes */ + vxlan_gpe_header_t vxlan; /* 8 bytes */ +}) ip4_vxlan_gpe_header_t; + +typedef CLIB_PACKED(struct { + /* + * Key fields: local remote, vni + * all fields in NET byte order + */ + union { + struct { + u32 local; + u32 remote; + u32 vni; /* shifted 8 bits */ + u32 pad; + }; + u64 as_u64[2]; + }; +}) vxlan_gpe_tunnel_key_t; + +typedef struct { + /* Rewrite string. $$$$ embed vnet_rewrite header */ + u8 * rewrite; + + /* encapsulated protocol */ + u8 protocol; + + /* tunnel src and dst addresses */ + ip4_address_t local; + ip4_address_t remote; + + /* FIB indices */ + u32 encap_fib_index; /* tunnel partner lookup here */ + u32 decap_fib_index; /* inner IP lookup here */ + + /* vxlan VNI in HOST byte order, shifted left 8 bits */ + u32 vni; + + /* vnet intfc hw/sw_if_index */ + u32 hw_if_index; + u32 sw_if_index; + +} vxlan_gpe_tunnel_t; + +#define foreach_vxlan_gpe_input_next \ +_(DROP, "error-drop") \ +_(IP4_INPUT, "ip4-input") \ +_(IP6_INPUT, "ip6-input") \ +_(ETHERNET_INPUT, "ethernet-input") \ +_(NSH_INPUT, "nsh-input") + +typedef enum { +#define _(s,n) VXLAN_GPE_INPUT_NEXT_##s, + foreach_vxlan_gpe_input_next +#undef _ + VXLAN_GPE_INPUT_N_NEXT, +} vxlan_gpe_input_next_t; + +typedef enum { +#define vxlan_gpe_error(n,s) VXLAN_GPE_ERROR_##n, +#include +#undef vxlan_gpe_error + VXLAN_GPE_N_ERROR, +} vxlan_gpe_input_error_t; + +typedef struct { + /* vector of encap tunnel instances */ + vxlan_gpe_tunnel_t *tunnels; + + /* lookup tunnel by key */ + uword * vxlan_gpe_tunnel_by_key; + + /* Free vlib hw_if_indices */ + u32 * free_vxlan_gpe_tunnel_hw_if_indices; + + /* show device instance by real device instance */ + u32 * dev_inst_by_real; + + /* convenience */ + vlib_main_t * vlib_main; + vnet_main_t * vnet_main; +} vxlan_gpe_main_t; + +vxlan_gpe_main_t vxlan_gpe_main; + +extern vlib_node_registration_t vxlan_gpe_encap_node; +extern vlib_node_registration_t vxlan_gpe_input_node; + +u8 * format_vxlan_gpe_encap_trace (u8 * s, va_list * args); + +typedef struct { + u8 is_add; + ip4_address_t local, remote; + u8 protocol; + u32 encap_fib_index; + u32 decap_fib_index; + u32 decap_next_index; + u32 vni; +} vnet_vxlan_gpe_add_del_tunnel_args_t; + + +int vnet_vxlan_gpe_add_del_tunnel +(vnet_vxlan_gpe_add_del_tunnel_args_t *a, u32 * sw_if_indexp); + + + + + +#endif /* included_vnet_vxlan_gpe_h */ diff --git a/vnet/vnet/vxlan-gpe/vxlan_gpe_error.def b/vnet/vnet/vxlan-gpe/vxlan_gpe_error.def new file mode 100644 index 00000000000..9cf1b1cb656 --- /dev/null +++ b/vnet/vnet/vxlan-gpe/vxlan_gpe_error.def @@ -0,0 +1,16 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +vxlan_gpe_error (DECAPSULATED, "good packets decapsulated") +vxlan_gpe_error (NO_SUCH_TUNNEL, "no such tunnel packets") diff --git a/vnet/vnet/vxlan-gpe/vxlan_gpe_packet.h b/vnet/vnet/vxlan-gpe/vxlan_gpe_packet.h new file mode 100644 index 00000000000..3403cc9ebad --- /dev/null +++ b/vnet/vnet/vxlan-gpe/vxlan_gpe_packet.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef included_vxlan_gpe_packet_h +#define included_vxlan_gpe_packet_h + +/* + * From draft-quinn-vxlan-gpe-03.txt + * + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |R|R|R|R|I|P|R|O|Ver| Reserved |Next Protocol | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | VXLAN Network Identifier (VNI) | Reserved | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * I Bit: Flag bit 4 indicates that the VNI is valid. + * + * P Bit: Flag bit 5 is defined as the Next Protocol bit. The P bit + * MUST be set to 1 to indicate the presence of the 8 bit next + * protocol field. + * + * O Bit: Flag bit 7 is defined as the O bit. When the O bit is set to 1, + * + * the packet is an OAM packet and OAM processing MUST occur. The OAM + * protocol details are out of scope for this document. As with the + * P-bit, bit 7 is currently a reserved flag in VXLAN. + * + * VXLAN-gpe bits 8 and 9 are defined as version bits. These bits are + * reserved in VXLAN. The version field is used to ensure backward + * compatibility going forward with future VXLAN-gpe updates. + * + * The initial version for VXLAN-gpe is 0. + * + * This draft defines the following Next Protocol values: + * + * 0x1 : IPv4 + * 0x2 : IPv6 + * 0x3 : Ethernet + * 0x4 : Network Service Header [NSH] + */ + +#define foreach_vxlan_gpe_protocol \ +_ (0x01, IP4) \ +_ (0x02, IP6) \ +_ (0x03, ETHERNET) \ +_ (0x04, NSH) + + +typedef enum { +#define _(n,f) VXLAN_GPE_PROTOCOL_##f = n, + foreach_vxlan_gpe_protocol +#undef _ +} vxlan_gpe_protocol_t; + +typedef struct { + u8 flags; + u8 ver_res; + u8 res; + /* see vxlan_gpe_protocol_t */ + u8 protocol; + u32 vni_res; +} vxlan_gpe_header_t; + +#define VXLAN_GPE_FLAGS_I 0x08 +#define VXLAN_GPE_FLAGS_P 0x04 +#define VXLAN_GPE_FLAGS_O 0x01 +#define VXLAN_GPE_VERSION 0x0 + +#endif /* included_vxlan_gpe_packet_h */ diff --git a/vpp-api-test/vat/api_format.c b/vpp-api-test/vat/api_format.c index 554ee1b1217..496b2d60d76 100644 --- a/vpp-api-test/vat/api_format.c +++ b/vpp-api-test/vat/api_format.c @@ -27,8 +27,7 @@ #include #include #include -#include -#include +#include #include #include @@ -847,67 +846,6 @@ static void vl_api_mpls_gre_add_del_tunnel_reply_t_handler_json vam->result_ready = 1; } -static void vl_api_nsh_gre_add_del_tunnel_reply_t_handler -(vl_api_nsh_gre_add_del_tunnel_reply_t * mp) -{ - vat_main_t * vam = &vat_main; - i32 retval = ntohl(mp->retval); - u32 sw_if_index = ntohl(mp->sw_if_index); - - if (retval >= 0 && sw_if_index != (u32)~0) { - errmsg ("sw_if_index %d\n", ntohl(mp->sw_if_index)); - } - vam->retval = retval; - vam->result_ready = 1; -} - -static void vl_api_nsh_gre_add_del_tunnel_reply_t_handler_json -(vl_api_nsh_gre_add_del_tunnel_reply_t * mp) -{ - vat_main_t * vam = &vat_main; - vat_json_node_t node; - - vat_json_init_object(&node); - vat_json_object_add_int(&node, "retval", ntohl(mp->retval)); - vat_json_object_add_uint(&node, "sw_if_index", ntohl(mp->sw_if_index)); - - vat_json_print(vam->ofp, &node); - vat_json_free(&node); - - vam->retval = ntohl(mp->retval); - vam->result_ready = 1; -} - -static void vl_api_nsh_vxlan_gpe_add_del_tunnel_reply_t_handler -(vl_api_nsh_vxlan_gpe_add_del_tunnel_reply_t * mp) -{ - vat_main_t * vam = &vat_main; - i32 retval = ntohl(mp->retval); - u32 sw_if_index = ntohl(mp->sw_if_index); - - if (retval >= 0 && sw_if_index != (u32)~0) { - errmsg ("sw_if_index %d\n", ntohl(mp->sw_if_index)); - } - vam->retval = retval; - vam->result_ready = 1; -} - -static void vl_api_nsh_vxlan_gpe_add_del_tunnel_reply_t_handler_json -(vl_api_nsh_vxlan_gpe_add_del_tunnel_reply_t * mp) -{ - vat_main_t * vam = &vat_main; - vat_json_node_t node; - - vat_json_init_object(&node); - vat_json_object_add_int(&node, "retval", ntohl(mp->retval)); - vat_json_object_add_uint(&node, "sw_if_index", ntohl(mp->sw_if_index)); - - vat_json_print(vam->ofp, &node); - vat_json_free(&node); - - vam->retval = ntohl(mp->retval); - vam->result_ready = 1; -} static void vl_api_show_version_reply_t_handler (vl_api_show_version_reply_t * mp) @@ -2195,7 +2133,7 @@ _(set_arp_neighbor_limit_reply) \ _(l2_patch_add_del_reply) \ _(sr_tunnel_add_del_reply) \ _(sr_policy_add_del_reply) \ -_(sr_multicast_map_add_del_reply) \ +_(sr_multicast_map_add_del_reply) \ _(classify_add_del_session_reply) \ _(classify_set_interface_ip_table_reply) \ _(classify_set_interface_l2_tables_reply) \ @@ -2239,6 +2177,7 @@ _(lisp_add_del_map_resolver_reply) \ _(lisp_gpe_enable_disable_reply) \ _(lisp_gpe_add_del_iface_reply) \ _(lisp_enable_disable_reply) \ +_(vxlan_gpe_add_del_tunnel_reply) \ _(af_packet_create_reply) \ _(af_packet_delete_reply) @@ -2366,9 +2305,8 @@ _(CREATE_VHOST_USER_IF_REPLY, create_vhost_user_if_reply) \ _(MODIFY_VHOST_USER_IF_REPLY, modify_vhost_user_if_reply) \ _(DELETE_VHOST_USER_IF_REPLY, delete_vhost_user_if_reply) \ _(SHOW_VERSION_REPLY, show_version_reply) \ -_(NSH_GRE_ADD_DEL_TUNNEL_REPLY, nsh_gre_add_del_tunnel_reply) \ _(L2_FIB_TABLE_ENTRY, l2_fib_table_entry) \ -_(NSH_VXLAN_GPE_ADD_DEL_TUNNEL_REPLY, nsh_vxlan_gpe_add_del_tunnel_reply) \ +_(VXLAN_GPE_ADD_DEL_TUNNEL_REPLY, vxlan_gpe_add_del_tunnel_reply) \ _(INTERFACE_NAME_RENUMBER_REPLY, interface_name_renumber_reply) \ _(WANT_IP4_ARP_EVENTS_REPLY, want_ip4_arp_events_reply) \ _(IP4_ARP_EVENT, ip4_arp_event) \ @@ -7935,312 +7873,75 @@ static int api_show_version (vat_main_t * vam) return 0; } -static uword unformat_nsh_gre_decap_next -(unformat_input_t * input, va_list * args) -{ - u32 * result = va_arg (*args, u32 *); - u32 tmp; - - if (unformat (input, "drop")) - *result = NSH_GRE_INPUT_NEXT_DROP; - else if (unformat (input, "ip4")) - *result = NSH_GRE_INPUT_NEXT_IP4_INPUT; - else if (unformat (input, "ip6")) - *result = NSH_GRE_INPUT_NEXT_IP6_INPUT; - else if (unformat (input, "ethernet")) - *result = NSH_GRE_INPUT_NEXT_ETHERNET_INPUT; - else if (unformat (input, "%d", &tmp)) - *result = tmp; - else - return 0; - return 1; -} - -static int api_nsh_gre_add_del_tunnel (vat_main_t * vam) -{ - unformat_input_t * line_input = vam->input; - vl_api_nsh_gre_add_del_tunnel_t *mp; - f64 timeout; - ip4_address_t src, dst; - u8 is_add = 1; - u8 src_set = 0; - u8 dst_set = 0; - u32 encap_vrf_id = 0; - u32 decap_vrf_id = 0; - u8 ver_o_c = 0; - u8 md_type = 0; - u8 next_protocol = 1; /* ip4 */ - u32 spi; - u8 spi_set = 0; - u32 si; - u8 si_set = 0; - u32 spi_si; - u32 c1 = 0; - u32 c2 = 0; - u32 c3 = 0; - u32 c4 = 0; - u32 *tlvs = 0; - u32 decap_next_index = NSH_GRE_INPUT_NEXT_IP4_INPUT; - u32 tmp; - int i; - - while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { - if (unformat (line_input, "del")) - is_add = 0; - else if (unformat (line_input, "src %U", - unformat_ip4_address, &src)) - src_set = 1; - else if (unformat (line_input, "dst %U", - unformat_ip4_address, &dst)) - dst_set = 1; - else if (unformat (line_input, "encap-vrf-id %d", &encap_vrf_id)) - ; - else if (unformat (line_input, "decap-vrf-id %d", &decap_vrf_id)) - ; - else if (unformat (line_input, "decap-next %U", - unformat_nsh_gre_decap_next, &decap_next_index)) - ; - else if (unformat (line_input, "version %d", &tmp)) - ver_o_c |= (tmp & 3) << 6; - else if (unformat (line_input, "o-bit %d", &tmp)) - ver_o_c |= (tmp & 1) << 5; - else if (unformat (line_input, "c-bit %d", &tmp)) - ver_o_c |= (tmp & 1) << 4; - else if (unformat (line_input, "md-type %d", &tmp)) - md_type = tmp; - else if (unformat(line_input, "next-ip4")) - next_protocol = 1; - else if (unformat(line_input, "next-ip6")) - next_protocol = 2; - else if (unformat(line_input, "next-ethernet")) - next_protocol = 3; - else if (unformat (line_input, "c1 %d", &c1)) - ; - else if (unformat (line_input, "c2 %d", &c2)) - ; - else if (unformat (line_input, "c3 %d", &c3)) - ; - else if (unformat (line_input, "c4 %d", &c4)) - ; - else if (unformat (line_input, "spi %d", &spi)) - spi_set = 1; - else if (unformat (line_input, "si %d", &si)) - si_set = 1; - else if (unformat (line_input, "tlv %x")) - vec_add1 (tlvs, tmp); - else { - errmsg ("parse error '%U'\n", format_unformat_error, line_input); - return -99; - } - } - - if (src_set == 0) { - errmsg ("tunnel src address not specified\n"); - return -99; - } - if (dst_set == 0) { - errmsg ("tunnel dst address not specified\n"); - return -99; - } - - if (spi_set == 0) { - errmsg ("spi not specified\n"); - return -99; - } - - if (si_set == 0) { - errmsg ("si not specified\n"); - return -99; - } - - M2 (NSH_GRE_ADD_DEL_TUNNEL, nsh_gre_add_del_tunnel, - sizeof(u32) * vec_len (tlvs)); - - spi_si = (spi<<8) | si; - - mp->src = src.as_u32; - mp->dst = dst.as_u32; - mp->encap_vrf_id = ntohl(encap_vrf_id); - mp->decap_vrf_id = ntohl(decap_vrf_id); - mp->decap_next_index = ntohl(decap_next_index); - mp->tlv_len_in_words = vec_len (tlvs); - mp->is_add = is_add; - mp->ver_o_c = ver_o_c; - mp->length = 6 + vec_len(tlvs); - mp->md_type = md_type; - mp->next_protocol = next_protocol; - mp->spi_si = ntohl(spi_si); - mp->c1 = ntohl(c1); - mp->c2 = ntohl(c2); - mp->c3 = ntohl(c3); - mp->c4 = ntohl(c4); - - for (i = 0; i < vec_len(tlvs); i++) - mp->tlvs[i] = ntohl(tlvs[i]); - - vec_free (tlvs); - - S; W; - /* NOTREACHED */ - return 0; -} - -static uword unformat_nsh_vxlan_gpe_decap_next -(unformat_input_t * input, va_list * args) -{ - u32 * result = va_arg (*args, u32 *); - u32 tmp; - - if (unformat (input, "drop")) - *result = NSH_VXLAN_GPE_INPUT_NEXT_DROP; - else if (unformat (input, "ip4")) - *result = NSH_VXLAN_GPE_INPUT_NEXT_IP4_INPUT; - else if (unformat (input, "ip6")) - *result = NSH_VXLAN_GPE_INPUT_NEXT_IP6_INPUT; - else if (unformat (input, "ethernet")) - *result = NSH_VXLAN_GPE_INPUT_NEXT_ETHERNET_INPUT; - else if (unformat (input, "nsh-vxlan-gpe")) - *result = NSH_VXLAN_GPE_INPUT_NEXT_ETHERNET_INPUT; - else if (unformat (input, "%d", &tmp)) - *result = tmp; - else - return 0; - return 1; -} -static int api_nsh_vxlan_gpe_add_del_tunnel (vat_main_t * vam) +static int api_vxlan_gpe_add_del_tunnel (vat_main_t * vam) { unformat_input_t * line_input = vam->input; - vl_api_nsh_vxlan_gpe_add_del_tunnel_t *mp; + vl_api_vxlan_gpe_add_del_tunnel_t *mp; f64 timeout; - ip4_address_t src, dst; + ip4_address_t local, remote; u8 is_add = 1; - u8 src_set = 0; - u8 dst_set = 0; + u8 local_set = 0; + u8 remote_set = 0; u32 encap_vrf_id = 0; u32 decap_vrf_id = 0; - u8 ver_o_c = 0; - u8 md_type = 0; - u8 next_protocol = 1; /* ip4 */ - u32 spi; - u8 spi_set = 0; - u32 si; - u8 si_set = 0; - u32 spi_si; - u32 c1 = 0; - u32 c2 = 0; - u32 c3 = 0; - u32 c4 = 0; - u32 *tlvs = 0; - u32 decap_next_index = NSH_GRE_INPUT_NEXT_IP4_INPUT; + u8 protocol = ~0; u32 vni; u8 vni_set = 0; - u32 tmp; - int i; while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { if (unformat (line_input, "del")) is_add = 0; - else if (unformat (line_input, "src %U", - unformat_ip4_address, &src)) - src_set = 1; - else if (unformat (line_input, "dst %U", - unformat_ip4_address, &dst)) - dst_set = 1; + else if (unformat (line_input, "local %U", + unformat_ip4_address, &local)) + local_set = 1; + else if (unformat (line_input, "remote %U", + unformat_ip4_address, &remote)) + remote_set = 1; else if (unformat (line_input, "encap-vrf-id %d", &encap_vrf_id)) ; else if (unformat (line_input, "decap-vrf-id %d", &decap_vrf_id)) ; - else if (unformat (line_input, "decap-next %U", - unformat_nsh_vxlan_gpe_decap_next, - &decap_next_index)) - ; else if (unformat (line_input, "vni %d", &vni)) vni_set = 1; - else if (unformat (line_input, "version %d", &tmp)) - ver_o_c |= (tmp & 3) << 6; - else if (unformat (line_input, "o-bit %d", &tmp)) - ver_o_c |= (tmp & 1) << 5; - else if (unformat (line_input, "c-bit %d", &tmp)) - ver_o_c |= (tmp & 1) << 4; - else if (unformat (line_input, "md-type %d", &tmp)) - md_type = tmp; else if (unformat(line_input, "next-ip4")) - next_protocol = 1; + protocol = 1; else if (unformat(line_input, "next-ip6")) - next_protocol = 2; + protocol = 2; else if (unformat(line_input, "next-ethernet")) - next_protocol = 3; - else if (unformat (line_input, "c1 %d", &c1)) - ; - else if (unformat (line_input, "c2 %d", &c2)) - ; - else if (unformat (line_input, "c3 %d", &c3)) - ; - else if (unformat (line_input, "c4 %d", &c4)) - ; - else if (unformat (line_input, "spi %d", &spi)) - spi_set = 1; - else if (unformat (line_input, "si %d", &si)) - si_set = 1; - else if (unformat (line_input, "tlv %x")) - vec_add1 (tlvs, tmp); + protocol = 3; + else if (unformat(line_input, "next-nsh")) + protocol = 4; else { errmsg ("parse error '%U'\n", format_unformat_error, line_input); return -99; } } - if (src_set == 0) { - errmsg ("tunnel src address not specified\n"); + if (local_set == 0) { + errmsg ("tunnel local address not specified\n"); return -99; } - if (dst_set == 0) { - errmsg ("tunnel dst address not specified\n"); - return -99; - } - - if (spi_set == 0) { - errmsg ("spi not specified\n"); + if (remote_set == 0) { + errmsg ("tunnel remote address not specified\n"); return -99; } - if (si_set == 0) { - errmsg ("si not specified\n"); - return -99; - } if (vni_set == 0) { errmsg ("vni not specified\n"); return -99; } - M2 (NSH_VXLAN_GPE_ADD_DEL_TUNNEL, nsh_vxlan_gpe_add_del_tunnel, - sizeof(u32) * vec_len (tlvs)); + M(VXLAN_GPE_ADD_DEL_TUNNEL, vxlan_gpe_add_del_tunnel); - spi_si = (spi<<8) | si; - - mp->src = src.as_u32; - mp->dst = dst.as_u32; + mp->local = local.as_u32; + mp->remote = remote.as_u32; mp->encap_vrf_id = ntohl(encap_vrf_id); mp->decap_vrf_id = ntohl(decap_vrf_id); - mp->decap_next_index = ntohl(decap_next_index); - mp->tlv_len_in_words = vec_len (tlvs); + mp->protocol = ntohl(protocol); mp->vni = ntohl(vni); mp->is_add = is_add; - mp->ver_o_c = ver_o_c; - mp->length = 6 + vec_len(tlvs); - mp->md_type = md_type; - mp->next_protocol = next_protocol; - mp->spi_si = ntohl(spi_si); - mp->c1 = ntohl(c1); - mp->c2 = ntohl(c2); - mp->c3 = ntohl(c3); - mp->c4 = ntohl(c4); - - for (i = 0; i < vec_len(tlvs); i++) - mp->tlvs[i] = ntohl(tlvs[i]); - vec_free (tlvs); S; W; /* NOTREACHED */ @@ -10785,18 +10486,10 @@ _(modify_vhost_user_if, \ _(delete_vhost_user_if, " | sw_if_index ") \ _(sw_interface_vhost_user_dump, "") \ _(show_version, "") \ -_(nsh_gre_add_del_tunnel, \ - "src dst " \ - "c1 c2 c3 c4 spi si \n" \ - "[encap-fib-id ] [decap-fib-id ] [o-bit <1|0>]\n" \ - "[c-bit <1|0>] [md-type ][next-ip4][next-ip6][next-ethernet]\n" \ - "[tlv ][del]") \ -_(nsh_vxlan_gpe_add_del_tunnel, \ - "src dst vni \n" \ - "c1 c2 c3 c4 spi si \n" \ - "[encap-vrf-id ] [decap-vrf-id ] [o-bit <1|0>]\n" \ - "[c-bit <1|0>] [md-type ][next-ip4][next-ip6][next-ethernet]\n" \ - "[tlv ][del]") \ +_(vxlan_gpe_add_del_tunnel, \ + "local remote vni \n" \ + "[encap-vrf-id ] [decap-vrf-id ] [next-ip4][next-ip6]" \ + "[next-ethernet] [next-nsh]\n") \ _(l2_fib_table_dump, "bd_id ") \ _(interface_name_renumber, \ " | sw_if_index new_show_dev_instance ") \ diff --git a/vpp/api/api.c b/vpp/api/api.c index 9bf4d3c957a..24f7d9c0e19 100644 --- a/vpp/api/api.c +++ b/vpp/api/api.c @@ -67,8 +67,7 @@ #include #include #include -#include -#include +#include #include #include #include @@ -288,10 +287,9 @@ _(IP_ADDRESS_DUMP, ip_address_dump) \ _(IP_DUMP, ip_dump) \ _(SW_INTERFACE_VHOST_USER_DETAILS, sw_interface_vhost_user_details) \ _(SHOW_VERSION, show_version) \ -_(NSH_GRE_ADD_DEL_TUNNEL, nsh_gre_add_del_tunnel) \ _(L2_FIB_TABLE_DUMP, l2_fib_table_dump) \ _(L2_FIB_TABLE_ENTRY, l2_fib_table_entry) \ -_(NSH_VXLAN_GPE_ADD_DEL_TUNNEL, nsh_vxlan_gpe_add_del_tunnel) \ +_(VXLAN_GPE_ADD_DEL_TUNNEL, vxlan_gpe_add_del_tunnel) \ _(INTERFACE_NAME_RENUMBER, interface_name_renumber) \ _(WANT_IP4_ARP_EVENTS, want_ip4_arp_events) \ _(INPUT_ACL_SET_INTERFACE, input_acl_set_interface) \ @@ -4528,88 +4526,18 @@ vl_api_l2_patch_add_del_t_handler (vl_api_l2_patch_add_del_t *mp) } static void -vl_api_nsh_gre_add_del_tunnel_t_handler -(vl_api_nsh_gre_add_del_tunnel_t * mp) +vl_api_vxlan_gpe_add_del_tunnel_t_handler +(vl_api_vxlan_gpe_add_del_tunnel_t * mp) { - vl_api_nsh_gre_add_del_tunnel_reply_t * rmp; + vl_api_vxlan_gpe_add_del_tunnel_reply_t * rmp; int rv = 0; - vnet_nsh_gre_add_del_tunnel_args_t _a, *a = &_a; + vnet_vxlan_gpe_add_del_tunnel_args_t _a, *a = &_a; u32 encap_fib_index, decap_fib_index; - u32 decap_next_index; + u8 protocol; uword * p; ip4_main_t * im = &ip4_main; - u32 * tlvs = 0; u32 sw_if_index = ~0; - int i; - - p = hash_get (im->fib_index_by_table_id, ntohl(mp->encap_vrf_id)); - if (! p) { - rv = VNET_API_ERROR_NO_SUCH_FIB; - goto out; - } - encap_fib_index = p[0]; - - decap_next_index = ntohl(mp->decap_next_index); - - /* Interpret decap_vrf_id as an opaque if sending to other-than-ip4-input */ - if (decap_next_index == NSH_GRE_INPUT_NEXT_IP4_INPUT) { - p = hash_get (im->fib_index_by_table_id, ntohl(mp->decap_vrf_id)); - if (! p) { - rv = VNET_API_ERROR_NO_SUCH_INNER_FIB; - goto out; - } - decap_fib_index = p[0]; - } else { - decap_fib_index = ntohl(mp->decap_vrf_id); - } - - memset (a, 0, sizeof (*a)); - a->is_add = mp->is_add; - /* ip addresses sent in network byte order */ - a->src.as_u32 = ntohl(mp->src); - a->dst.as_u32 = ntohl(mp->dst); - a->encap_fib_index = encap_fib_index; - a->decap_fib_index = decap_fib_index; - a->decap_next_index = decap_next_index; - a->nsh_hdr.ver_o_c = mp->ver_o_c; - a->nsh_hdr.length = mp->length; - a->nsh_hdr.md_type = mp->md_type; - a->nsh_hdr.next_protocol = mp->next_protocol; - a->nsh_hdr.spi_si = ntohl(mp->spi_si); - a->nsh_hdr.c1 = ntohl(mp->c1); - a->nsh_hdr.c2 = ntohl(mp->c2); - a->nsh_hdr.c3 = ntohl(mp->c3); - a->nsh_hdr.c4 = ntohl(mp->c4); - - for (i = 0; i < mp->tlv_len_in_words; i++) - vec_add1 (tlvs, ntohl(mp->tlvs[i])); - - a->nsh_hdr.tlvs = tlvs; - - rv = vnet_nsh_gre_add_del_tunnel (a, &sw_if_index); - -out: - REPLY_MACRO2(VL_API_NSH_GRE_ADD_DEL_TUNNEL_REPLY, - ({ - rmp->sw_if_index = ntohl (sw_if_index); - })); -} - -static void -vl_api_nsh_vxlan_gpe_add_del_tunnel_t_handler -(vl_api_nsh_vxlan_gpe_add_del_tunnel_t * mp) -{ - vl_api_nsh_vxlan_gpe_add_del_tunnel_reply_t * rmp; - int rv = 0; - vnet_nsh_vxlan_gpe_add_del_tunnel_args_t _a, *a = &_a; - u32 encap_fib_index, decap_fib_index; - u32 decap_next_index; - uword * p; - ip4_main_t * im = &ip4_main; - u32 * tlvs = 0; - u32 sw_if_index = ~0; - int i; p = hash_get (im->fib_index_by_table_id, ntohl(mp->encap_vrf_id)); if (! p) { @@ -4618,10 +4546,10 @@ vl_api_nsh_vxlan_gpe_add_del_tunnel_t_handler } encap_fib_index = p[0]; - decap_next_index = ntohl(mp->decap_next_index); + protocol = ntohl(mp->protocol); /* Interpret decap_vrf_id as an opaque if sending to other-than-ip4-input */ - if (decap_next_index == NSH_GRE_INPUT_NEXT_IP4_INPUT) { + if (protocol == VXLAN_GPE_INPUT_NEXT_IP4_INPUT) { p = hash_get (im->fib_index_by_table_id, ntohl(mp->decap_vrf_id)); if (! p) { rv = VNET_API_ERROR_NO_SUCH_INNER_FIB; @@ -4636,31 +4564,16 @@ vl_api_nsh_vxlan_gpe_add_del_tunnel_t_handler a->is_add = mp->is_add; /* ip addresses sent in network byte order */ - a->src.as_u32 = ntohl(mp->src); - a->dst.as_u32 = ntohl(mp->dst); + a->local.as_u32 = ntohl(mp->local); + a->remote.as_u32 = ntohl(mp->remote); a->encap_fib_index = encap_fib_index; a->decap_fib_index = decap_fib_index; - a->decap_next_index = decap_next_index; + a->protocol = protocol; a->vni = ntohl(mp->vni); - a->nsh_hdr.ver_o_c = mp->ver_o_c; - a->nsh_hdr.length = mp->length; - a->nsh_hdr.md_type = mp->md_type; - a->nsh_hdr.next_protocol = mp->next_protocol; - a->nsh_hdr.spi_si = ntohl(mp->spi_si); - a->nsh_hdr.c1 = ntohl(mp->c1); - a->nsh_hdr.c2 = ntohl(mp->c2); - a->nsh_hdr.c3 = ntohl(mp->c3); - a->nsh_hdr.c4 = ntohl(mp->c4); - - for (i = 0; i < mp->tlv_len_in_words; i++) - vec_add1 (tlvs, ntohl(mp->tlvs[i])); - - a->nsh_hdr.tlvs = tlvs; - - rv = vnet_nsh_vxlan_gpe_add_del_tunnel (a, &sw_if_index); + rv = vnet_vxlan_gpe_add_del_tunnel (a, &sw_if_index); out: - REPLY_MACRO2(VL_API_NSH_VXLAN_GPE_ADD_DEL_TUNNEL_REPLY, + REPLY_MACRO2(VL_API_VXLAN_GPE_ADD_DEL_TUNNEL_REPLY, ({ rmp->sw_if_index = ntohl (sw_if_index); })); @@ -6044,12 +5957,6 @@ vpe_api_hookup (vlib_main_t *vm) am->api_trace_cfg [VL_API_VXLAN_ADD_DEL_TUNNEL].size += 16 * sizeof (u32); - /* - * trace space for 4 nsh-gre variable TLV words - */ - am->api_trace_cfg [VL_API_NSH_GRE_ADD_DEL_TUNNEL].size - += 4 * sizeof (u32); - /* * Thread-safe API messages */ diff --git a/vpp/api/custom_dump.c b/vpp/api/custom_dump.c index f77a41cc93f..ba41b1fb603 100644 --- a/vpp/api/custom_dump.c +++ b/vpp/api/custom_dump.c @@ -26,8 +26,9 @@ #include #include #include -#include -#include +//#include //alagalah testing nsh-gre removal +#include //alagalah testing nsh-gre removal +#include #include #include #include @@ -1574,77 +1575,17 @@ static void *vl_api_show_version_t_print FINISH; } -static void *vl_api_nsh_gre_add_del_tunnel_t_print -(vl_api_nsh_gre_add_del_tunnel_t * mp, void *handle) +static void *vl_api_vxlan_gpe_add_del_tunnel_t_print +(vl_api_vxlan_gpe_add_del_tunnel_t * mp, void *handle) { u8 * s; - int i; - u32 spi_si; - - s = format (0, "SCRIPT: nsh_gre_add_del_tunnel "); - - s = format (s, "src %U dst %U ", format_ip4_address, &mp->src, - format_ip4_address, &mp->dst); - - spi_si = ntohl(mp->spi_si); - - s = format (s, "spi %d si %d ", (spi_si>>8), (spi_si & 0xff)); - - s = format (s, "decap-next %d ", ntohl(mp->decap_next_index)); - - if (mp->is_add == 0) - s = format (s, "del "); - - if (mp->encap_vrf_id) - s = format (s, "encap-vrf-id %d ", ntohl(mp->encap_vrf_id)); - - if (mp->decap_vrf_id) - s = format (s, "decap-vrf-id %d ", ntohl(mp->decap_vrf_id)); - - if (mp->ver_o_c & 0xc) - s = format (s, "version %d ", mp->ver_o_c>>6); - if (mp->ver_o_c & NSH_O_BIT) - s = format (s, "o-bit 1 "); - if (mp->ver_o_c & NSH_C_BIT) - s = format (s, "c-bit 1 "); - if (mp->md_type) - s = format (s, "md-type %d ", mp->md_type); - if (mp->next_protocol == 1) - s = format (s, "next-ip4 "); - else if (mp->next_protocol == 2) - s = format (s, "next-ip6 "); - else if (mp->next_protocol == 3) - s = format (s, "next-ethernet"); - - s = format (s, "c1 %d ", ntohl(mp->c1)); - s = format (s, "c2 %d ", ntohl(mp->c2)); - s = format (s, "c3 %d ", ntohl(mp->c3)); - s = format (s, "c4 %d ", ntohl(mp->c4)); - - for (i = 0; i < mp->tlv_len_in_words; i++) { - s = format (s, "tlv %x ", ntohl(mp->tlvs[i])); - } - - FINISH; -} -static void *vl_api_nsh_vxlan_gpe_add_del_tunnel_t_print -(vl_api_nsh_vxlan_gpe_add_del_tunnel_t * mp, void *handle) -{ - u8 * s; - int i; - u32 spi_si; + s = format (0, "SCRIPT: vxlan_gpe_add_del_tunnel "); - s = format (0, "SCRIPT: nsh_vxlan_gpe_add_del_tunnel "); + s = format (s, "local %U remote %U ", format_ip4_address, &mp->local, + format_ip4_address, &mp->remote); - s = format (s, "src %U dst %U ", format_ip4_address, &mp->src, - format_ip4_address, &mp->dst); - - spi_si = ntohl(mp->spi_si); - - s = format (s, "spi %d si %d ", (spi_si>>8), (spi_si & 0xff)); - - s = format (s, "decap-next %d ", ntohl(mp->decap_next_index)); + s = format (s, "protocol %d ", ntohl(mp->protocol)); s = format (s, "vni %d ", ntohl(mp->vni)); @@ -1657,30 +1598,6 @@ static void *vl_api_nsh_vxlan_gpe_add_del_tunnel_t_print if (mp->decap_vrf_id) s = format (s, "decap-vrf-id %d ", ntohl(mp->decap_vrf_id)); - if (mp->ver_o_c & 0xc) - s = format (s, "version %d ", mp->ver_o_c>>6); - if (mp->ver_o_c & NSH_O_BIT) - s = format (s, "o-bit 1 "); - if (mp->ver_o_c & NSH_C_BIT) - s = format (s, "c-bit 1 "); - if (mp->md_type) - s = format (s, "md-type %d ", mp->md_type); - if (mp->next_protocol == 1) - s = format (s, "next-ip4 "); - else if (mp->next_protocol == 2) - s = format (s, "next-ip6 "); - else if (mp->next_protocol == 3) - s = format (s, "next-ethernet"); - - s = format (s, "c1 %d ", ntohl(mp->c1)); - s = format (s, "c2 %d ", ntohl(mp->c2)); - s = format (s, "c3 %d ", ntohl(mp->c3)); - s = format (s, "c4 %d ", ntohl(mp->c4)); - - for (i = 0; i < mp->tlv_len_in_words; i++) { - s = format (s, "tlv %x ", ntohl(mp->tlvs[i])); - } - FINISH; } @@ -1872,9 +1789,8 @@ _(CLI_REQUEST, cli_request) \ _(MEMCLNT_CREATE, memclnt_create) \ _(SW_INTERFACE_VHOST_USER_DUMP, sw_interface_vhost_user_dump) \ _(SHOW_VERSION, show_version) \ -_(NSH_GRE_ADD_DEL_TUNNEL, nsh_gre_add_del_tunnel) \ _(L2_FIB_TABLE_DUMP, l2_fib_table_dump) \ -_(NSH_VXLAN_GPE_ADD_DEL_TUNNEL, nsh_vxlan_gpe_add_del_tunnel) \ +_(VXLAN_GPE_ADD_DEL_TUNNEL, vxlan_gpe_add_del_tunnel) \ _(INTERFACE_NAME_RENUMBER, interface_name_renumber) \ _(WANT_IP4_ARP_EVENTS, want_ip4_arp_events) \ _(INPUT_ACL_SET_INTERFACE, input_acl_set_interface) \ diff --git a/vpp/api/vpe.api b/vpp/api/vpe.api index 8b5b21ca02c..fadecfb1966 100644 --- a/vpp/api/vpe.api +++ b/vpp/api/vpe.api @@ -2139,15 +2139,27 @@ manual_java define l2_fib_table_dump { u32 bd_id; }; -define nsh_vxlan_gpe_add_del_tunnel { +define vxlan_gpe_add_del_tunnel { u32 client_index; u32 context; - u32 src; - u32 dst; + u32 local; + u32 remote; u32 encap_vrf_id; u32 decap_vrf_id; - u32 decap_next_index; + u8 protocol; u32 vni; + u8 is_add; +}; + +define vxlan_gpe_add_del_tunnel_reply { + u32 context; + i32 retval; + u32 sw_if_index; +}; + +define nsh_add_del_entry { + u32 client_index; + u32 context; u8 tlv_len_in_words; u8 is_add; u8 ver_o_c; @@ -2155,7 +2167,7 @@ define nsh_vxlan_gpe_add_del_tunnel { u8 md_type; u8 next_protocol; /* in network byte order */ - u32 spi_si; + u32 nsp_nsi; u32 c1; u32 c2; u32 c3; @@ -2163,7 +2175,7 @@ define nsh_vxlan_gpe_add_del_tunnel { u32 tlvs[0]; }; -define nsh_vxlan_gpe_add_del_tunnel_reply { +define nsh_add_del_entry_reply { u32 context; i32 retval; u32 sw_if_index; -- cgit 1.2.3-korg