summaryrefslogtreecommitdiffstats
path: root/vnet
diff options
context:
space:
mode:
authorKeith Burns (alagalah) <alagalah@gmail.com>2016-05-05 18:16:50 -0700
committerDave Barach <openvpp@barachs.net>2016-05-13 16:07:41 +0000
commit94b1442a6731bb879797c016a8febaec2f2ae7c9 (patch)
tree7c874e6fc3c102cae06133430c4b9799f073df3a /vnet
parent3e53fc56645f4b99d014031995bd00d16c051a9b (diff)
VPP43 - NSH refactoring: Added nsh-map nodes
- removed vnet/vnet/nsh-gre - removed all nsh from vnet/vnet/nsh_vxlan_gpe to vnet/vnet/nsh - moved vnet/vnet/nsh_vxlan_gpe to vnet/vnet/vxlan_gpe - added cli and binary api for VXLAN GPE tunnels - plan to move vnet/vnet/nsh to new repo (sfc_nsh) and make plugin - added cli for NSH (binary API will be done in sfc_nsh) - vnet/vnet/gre will be extended in VPP-54 Change-Id: I1d27def916532321577ccd68cb982ae0d0a07e6f Signed-off-by: Keith Burns (alagalah) <alagalah@gmail.com>
Diffstat (limited to 'vnet')
-rw-r--r--vnet/Makefile.am35
-rw-r--r--vnet/vnet/gre/node.c3
-rw-r--r--vnet/vnet/nsh-gre/encap.c376
-rw-r--r--vnet/vnet/nsh-gre/nsh_gre.c552
-rw-r--r--vnet/vnet/nsh-gre/nsh_gre.h110
-rw-r--r--vnet/vnet/nsh-vxlan-gpe/decap.c537
-rw-r--r--vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.c577
-rw-r--r--vnet/vnet/nsh/nsh.c770
-rw-r--r--vnet/vnet/nsh/nsh.h108
-rw-r--r--vnet/vnet/nsh/nsh_error.def6
-rw-r--r--vnet/vnet/nsh/nsh_gre_error.def17
-rw-r--r--vnet/vnet/nsh/nsh_packet.h11
-rw-r--r--vnet/vnet/vxlan-gpe/decap.c (renamed from vnet/vnet/nsh-gre/decap.c)283
-rw-r--r--vnet/vnet/vxlan-gpe/encap.c (renamed from vnet/vnet/nsh-vxlan-gpe/encap.c)147
-rw-r--r--vnet/vnet/vxlan-gpe/vxlan-gpe-rfc.txt (renamed from vnet/vnet/nsh-vxlan-gpe/vxlan-gpe-rfc.txt)0
-rw-r--r--vnet/vnet/vxlan-gpe/vxlan_gpe.c467
-rw-r--r--vnet/vnet/vxlan-gpe/vxlan_gpe.h (renamed from vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.h)87
-rw-r--r--vnet/vnet/vxlan-gpe/vxlan_gpe_error.def (renamed from vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe_error.def)4
-rw-r--r--vnet/vnet/vxlan-gpe/vxlan_gpe_packet.h (renamed from vnet/vnet/nsh-vxlan-gpe/vxlan_gpe_packet.h)22
19 files changed, 1672 insertions, 2440 deletions
diff --git a/vnet/Makefile.am b/vnet/Makefile.am
index 1239a8b8df4..8a9f214ca70 100644
--- a/vnet/Makefile.am
+++ b/vnet/Makefile.am
@@ -399,37 +399,40 @@ nobase_include_HEADERS += \
########################################
# NSH Map: nsh
########################################
+ libvnet_la_SOURCES += \
+ vnet/nsh/nsh.c
-nobase_include_HEADERS += \
- vnet/nsh/nsh_packet.h \
- vnet/nsh/nsh_error.def
+ nobase_include_HEADERS += \
+ vnet/nsh/nsh_packet.h \
+ vnet/nsh/nsh.h \
+ vnet/nsh/nsh_error.def
########################################
# Tunnel protocol: nsh-gre
########################################
-libvnet_la_SOURCES += \
- vnet/nsh-gre/nsh_gre.c \
- vnet/nsh-gre/encap.c \
- vnet/nsh-gre/decap.c
+# libvnet_la_SOURCES += \
+# vnet/nsh-gre/nsh_gre.c \
+# vnet/nsh-gre/encap.c \
+# vnet/nsh-gre/decap.c
-nobase_include_HEADERS += \
- vnet/nsh-gre/nsh_gre.h
+# nobase_include_HEADERS += \
+# vnet/nsh-gre/nsh_gre.h
########################################
-# Tunnel protocol: nsh-vxlan-gpe
+# Tunnel protocol: vxlan-gpe
########################################
libvnet_la_SOURCES += \
- vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.c \
- vnet/nsh-vxlan-gpe/encap.c \
- vnet/nsh-vxlan-gpe/decap.c
+ vnet/vxlan-gpe/vxlan_gpe.c \
+ vnet/vxlan-gpe/encap.c \
+ vnet/vxlan-gpe/decap.c
nobase_include_HEADERS += \
- vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.h \
- vnet/nsh-vxlan-gpe/vxlan_gpe_packet.h \
- vnet/nsh-vxlan-gpe/nsh_vxlan_gpe_error.def
+ vnet/vxlan-gpe/vxlan_gpe.h \
+ vnet/vxlan-gpe/vxlan_gpe_packet.h \
+ vnet/vxlan-gpe/vxlan_gpe_error.def
########################################
# LISP control plane: lisp-cp
diff --git a/vnet/vnet/gre/node.c b/vnet/vnet/gre/node.c
index 7d07223fc71..5809c5d3e6f 100644
--- a/vnet/vnet/gre/node.c
+++ b/vnet/vnet/gre/node.c
@@ -24,7 +24,8 @@
_(PUNT, "error-punt") \
_(DROP, "error-drop") \
_(IP4_INPUT, "ip4-input") \
-_(IP6_INPUT, "ip6-input")
+_(IP6_INPUT, "ip6-input") \
+_(NSH_INPUT, "nsh-input")
typedef enum {
#define _(s,n) GRE_INPUT_NEXT_##s,
diff --git a/vnet/vnet/nsh-gre/encap.c b/vnet/vnet/nsh-gre/encap.c
deleted file mode 100644
index 78b02178263..00000000000
--- a/vnet/vnet/nsh-gre/encap.c
+++ /dev/null
@@ -1,376 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include <vppinfra/error.h>
-#include <vppinfra/hash.h>
-#include <vnet/vnet.h>
-#include <vnet/ip/ip.h>
-#include <vnet/ethernet/ethernet.h>
-#include <vnet/nsh-gre/nsh_gre.h>
-
-/* Statistics (not really errors) */
-#define foreach_nsh_gre_encap_error \
-_(ENCAPSULATED, "good packets encapsulated")
-
-static char * nsh_gre_encap_error_strings[] = {
-#define _(sym,string) string,
- foreach_nsh_gre_encap_error
-#undef _
-};
-
-typedef enum {
-#define _(sym,str) NSH_GRE_ENCAP_ERROR_##sym,
- foreach_nsh_gre_encap_error
-#undef _
- NSH_GRE_ENCAP_N_ERROR,
-} nsh_gre_encap_error_t;
-
-typedef enum {
- NSH_GRE_ENCAP_NEXT_IP4_LOOKUP,
- NSH_GRE_ENCAP_NEXT_DROP,
- NSH_GRE_ENCAP_N_NEXT,
-} nsh_gre_encap_next_t;
-
-typedef struct {
- u32 tunnel_index;
-} nsh_gre_encap_trace_t;
-
-u8 * format_nsh_gre_encap_trace (u8 * s, va_list * args)
-{
- CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
- CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- nsh_gre_encap_trace_t * t = va_arg (*args, nsh_gre_encap_trace_t *);
-
- s = format (s, "NSH-GRE-ENCAP: tunnel %d", t->tunnel_index);
- return s;
-}
-
-static uword
-nsh_gre_encap (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
-{
- u32 n_left_from, next_index, * from, * to_next;
- nsh_gre_main_t * ngm = &nsh_gre_main;
- vnet_main_t * vnm = ngm->vnet_main;
- vnet_interface_main_t * im = &vnm->interface_main;
- u32 pkts_encapsulated = 0;
- u16 old_l0 = 0, old_l1 = 0;
- u32 cpu_index = os_get_cpu_number();
- u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
-
- from = vlib_frame_vector_args (from_frame);
- n_left_from = from_frame->n_vectors;
-
- next_index = node->cached_next_index;
- stats_sw_if_index = node->runtime_data[0];
- stats_n_packets = stats_n_bytes = 0;
-
- while (n_left_from > 0)
- {
- u32 n_left_to_next;
-
- vlib_get_next_frame (vm, node, next_index,
- to_next, n_left_to_next);
-
- while (n_left_from >= 4 && n_left_to_next >= 2)
- {
- u32 bi0, bi1;
- vlib_buffer_t * b0, * b1;
- u32 next0 = NSH_GRE_ENCAP_NEXT_IP4_LOOKUP;
- u32 next1 = NSH_GRE_ENCAP_NEXT_IP4_LOOKUP;
- u32 sw_if_index0, sw_if_index1, len0, len1;
- vnet_hw_interface_t * hi0, * hi1;
- ip4_header_t * ip0, * ip1;
- u64 * copy_src0, * copy_dst0;
- u64 * copy_src1, * copy_dst1;
- nsh_gre_tunnel_t * t0, * t1;
- u16 new_l0, new_l1;
- ip_csum_t sum0, sum1;
-
- /* Prefetch next iteration. */
- {
- vlib_buffer_t * p2, * p3;
-
- p2 = vlib_get_buffer (vm, from[2]);
- p3 = vlib_get_buffer (vm, from[3]);
-
- vlib_prefetch_buffer_header (p2, LOAD);
- vlib_prefetch_buffer_header (p3, LOAD);
-
- CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
- }
-
- bi0 = from[0];
- bi1 = from[1];
- to_next[0] = bi0;
- to_next[1] = bi1;
- from += 2;
- to_next += 2;
- n_left_to_next -= 2;
- n_left_from -= 2;
-
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
-
- /* 1-wide cache? */
- sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
- sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
- hi0 = vnet_get_sup_hw_interface
- (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
- hi1 = vnet_get_sup_hw_interface
- (vnm, vnet_buffer(b1)->sw_if_index[VLIB_TX]);
-
- t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
- t1 = pool_elt_at_index (ngm->tunnels, hi1->dev_instance);
-
- ASSERT(vec_len(t0->rewrite) >= 24);
- ASSERT(vec_len(t1->rewrite) >= 24);
-
- /* Apply the rewrite string. $$$$ vnet_rewrite? */
- vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
- vlib_buffer_advance (b1, -(word)_vec_len(t1->rewrite));
-
- ip0 = vlib_buffer_get_current(b0);
- ip1 = vlib_buffer_get_current(b1);
- /* Copy the fixed header */
- copy_dst0 = (u64 *) ip0;
- copy_src0 = (u64 *) t0->rewrite;
- copy_dst1 = (u64 *) ip1;
- copy_src1 = (u64 *) t1->rewrite;
-
- copy_dst0[0] = copy_src0[0];
- copy_dst0[1] = copy_src0[1];
- copy_dst0[2] = copy_src0[2];
-
- copy_dst1[0] = copy_src1[0];
- copy_dst1[1] = copy_src1[1];
- copy_dst1[2] = copy_src1[2];
-
- /* If there are TLVs to copy, do so */
- if (PREDICT_FALSE (_vec_len(t0->rewrite) > 24))
- clib_memcpy (&copy_dst0[3], t0->rewrite + 24 ,
- _vec_len (t0->rewrite)-24);
-
- if (PREDICT_FALSE (_vec_len(t1->rewrite) > 24))
- clib_memcpy (&copy_dst1[3], t1->rewrite + 24 ,
- _vec_len (t1->rewrite)-24);
-
- /* fix the <bleep>ing outer-IP checksums */
- sum0 = ip0->checksum;
- /* old_l0 always 0, see the rewrite setup */
- new_l0 =
- clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
-
- sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
- length /* changed member */);
- ip0->checksum = ip_csum_fold (sum0);
- ip0->length = new_l0;
-
- sum1 = ip1->checksum;
- /* old_l1 always 1, see the rewrite setup */
- new_l1 =
- clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
-
- sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
- length /* changed member */);
- ip1->checksum = ip_csum_fold (sum1);
- ip1->length = new_l1;
-
- /* Reset to look up tunnel partner in the configured FIB */
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
- vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index;
- vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
- vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
- pkts_encapsulated += 2;
-
- len0 = vlib_buffer_length_in_chain(vm, b0);
- len1 = vlib_buffer_length_in_chain(vm, b0);
- stats_n_packets += 2;
- stats_n_bytes += len0 + len1;
-
- /* Batch stats increment on the same vxlan tunnel so counter is not
- incremented per packet. Note stats are still incremented for deleted
- and admin-down tunnel where packets are dropped. It is not worthwhile
- to check for this rare case and affect normal path performance. */
- if (PREDICT_FALSE(
- (sw_if_index0 != stats_sw_if_index)
- || (sw_if_index1 != stats_sw_if_index))) {
- stats_n_packets -= 2;
- stats_n_bytes -= len0 + len1;
- if (sw_if_index0 == sw_if_index1) {
- if (stats_n_packets)
- vlib_increment_combined_counter(
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
- cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
- stats_sw_if_index = sw_if_index0;
- stats_n_packets = 2;
- stats_n_bytes = len0 + len1;
- } else {
- vlib_increment_combined_counter(
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
- cpu_index, sw_if_index0, 1, len0);
- vlib_increment_combined_counter(
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
- cpu_index, sw_if_index1, 1, len1);
- }
- }
-
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
- {
- nsh_gre_encap_trace_t *tr =
- vlib_add_trace (vm, node, b0, sizeof (*tr));
- tr->tunnel_index = t0 - ngm->tunnels;
- }
-
- if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
- {
- nsh_gre_encap_trace_t *tr =
- vlib_add_trace (vm, node, b1, sizeof (*tr));
- tr->tunnel_index = t1 - ngm->tunnels;
- }
-
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, bi1, next0, next1);
- }
-
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- u32 bi0;
- vlib_buffer_t * b0;
- u32 next0 = NSH_GRE_ENCAP_NEXT_IP4_LOOKUP;
- u32 sw_if_index0, len0;
- vnet_hw_interface_t * hi0;
- ip4_header_t * ip0;
- u64 * copy_src0, * copy_dst0;
- nsh_gre_tunnel_t * t0;
- u16 new_l0;
- ip_csum_t sum0;
-
- bi0 = from[0];
- to_next[0] = bi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
-
- /* 1-wide cache? */
- sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
- hi0 = vnet_get_sup_hw_interface
- (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
-
- t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
-
- ASSERT(vec_len(t0->rewrite) >= 24);
-
- /* Apply the rewrite string. $$$$ vnet_rewrite? */
- vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
-
- ip0 = vlib_buffer_get_current(b0);
- /* Copy the fixed header */
- copy_dst0 = (u64 *) ip0;
- copy_src0 = (u64 *) t0->rewrite;
- copy_dst0[0] = copy_src0[0];
- copy_dst0[1] = copy_src0[1];
- copy_dst0[2] = copy_src0[2];
-
- /* If there are TLVs to copy, do so */
- if (PREDICT_FALSE (_vec_len(t0->rewrite) > 24))
- clib_memcpy (&copy_dst0[3], t0->rewrite + 24 ,
- _vec_len (t0->rewrite)-24);
-
- /* fix the <bleep>ing outer-IP checksum */
- sum0 = ip0->checksum;
- /* old_l0 always 0, see the rewrite setup */
- new_l0 =
- clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
-
- sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
- length /* changed member */);
- ip0->checksum = ip_csum_fold (sum0);
- ip0->length = new_l0;
-
- /* Reset to look up tunnel partner in the configured FIB */
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
- vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
- pkts_encapsulated ++;
-
- len0 = vlib_buffer_length_in_chain(vm, b0);
- stats_n_packets += 1;
- stats_n_bytes += len0;
-
- /* Batch stats increment on the same vxlan tunnel so counter is not
- incremented per packet. Note stats are still incremented for deleted
- and admin-down tunnel where packets are dropped. It is not worthwhile
- to check for this rare case and affect normal path performance. */
- if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index)) {
- stats_n_packets -= 1;
- stats_n_bytes -= len0;
- if (stats_n_packets)
- vlib_increment_combined_counter(
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
- cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
- stats_n_packets = 1;
- stats_n_bytes = len0;
- stats_sw_if_index = sw_if_index0;
- }
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
- {
- nsh_gre_encap_trace_t *tr =
- vlib_add_trace (vm, node, b0, sizeof (*tr));
- tr->tunnel_index = t0 - ngm->tunnels;
- }
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
- }
-
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
- }
- vlib_node_increment_counter (vm, node->node_index,
- NSH_GRE_ENCAP_ERROR_ENCAPSULATED,
- pkts_encapsulated);
- /* Increment any remaining batch stats */
- if (stats_n_packets) {
- vlib_increment_combined_counter(
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, cpu_index,
- stats_sw_if_index, stats_n_packets, stats_n_bytes);
- node->runtime_data[0] = stats_sw_if_index;
- }
-
- return from_frame->n_vectors;
-}
-
-VLIB_REGISTER_NODE (nsh_gre_encap_node) = {
- .function = nsh_gre_encap,
- .name = "nsh-gre-encap",
- .vector_size = sizeof (u32),
- .format_trace = format_nsh_gre_encap_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
-
- .n_errors = ARRAY_LEN(nsh_gre_encap_error_strings),
- .error_strings = nsh_gre_encap_error_strings,
-
- .n_next_nodes = NSH_GRE_ENCAP_N_NEXT,
-
- // add dispositions here
- .next_nodes = {
- [NSH_GRE_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
- [NSH_GRE_ENCAP_NEXT_DROP] = "error-drop",
- },
-};
diff --git a/vnet/vnet/nsh-gre/nsh_gre.c b/vnet/vnet/nsh-gre/nsh_gre.c
deleted file mode 100644
index e75ed9dd862..00000000000
--- a/vnet/vnet/nsh-gre/nsh_gre.c
+++ /dev/null
@@ -1,552 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include <vppinfra/error.h>
-#include <vppinfra/hash.h>
-#include <vnet/vnet.h>
-#include <vnet/ip/ip.h>
-#include <vnet/l2/l2_input.h>
-#include <vnet/ethernet/ethernet.h>
-#include <vnet/nsh-gre/nsh_gre.h>
-
-nsh_gre_main_t nsh_gre_main;
-
-static u8 * format_decap_next (u8 * s, va_list * args)
-{
- u32 next_index = va_arg (*args, u32);
-
- switch (next_index)
- {
- case NSH_GRE_INPUT_NEXT_DROP:
- return format (s, "drop");
- case NSH_GRE_INPUT_NEXT_IP4_INPUT:
- return format (s, "ip4");
- case NSH_GRE_INPUT_NEXT_IP6_INPUT:
- return format (s, "ip6");
- case NSH_GRE_INPUT_NEXT_ETHERNET_INPUT:
- return format (s, "ethernet");
- default:
- return format (s, "index %d", next_index);
- }
- return s;
-}
-
-
-u8 * format_nsh_gre_tunnel (u8 * s, va_list * args)
-{
- nsh_gre_tunnel_t * t = va_arg (*args, nsh_gre_tunnel_t *);
- nsh_gre_main_t * ngm = &nsh_gre_main;
-
- s = format (s, "[%d] %U (src) %U (dst) fibs: (encap %d, decap %d)",
- t - ngm->tunnels,
- format_ip4_address, &t->src,
- format_ip4_address, &t->dst,
- t->encap_fib_index,
- t->decap_fib_index);
-
- s = format (s, " decap-next %U\n", format_decap_next, t->decap_next_index);
-
- s = format (s, " ver %d ", (t->nsh_hdr.ver_o_c>>6));
- if (t->nsh_hdr.ver_o_c & NSH_O_BIT)
- s = format (s, "O-set ");
-
- if (t->nsh_hdr.ver_o_c & NSH_C_BIT)
- s = format (s, "C-set ");
-
- s = format (s, "len %d (%d bytes) md_type %d next_protocol %d\n",
- t->nsh_hdr.length, t->nsh_hdr.length * 4, t->nsh_hdr.md_type, t->nsh_hdr.next_protocol);
-
- s = format (s, " service path %d service index %d\n",
- (t->nsh_hdr.spi_si>>NSH_SPI_SHIFT) & NSH_SPI_MASK,
- t->nsh_hdr.spi_si & NSH_SINDEX_MASK);
-
- s = format (s, " c1 %d c2 %d c3 %d c4 %d\n",
- t->nsh_hdr.c1, t->nsh_hdr.c2, t->nsh_hdr.c3, t->nsh_hdr.c4);
-
- return s;
-}
-
-static u8 * format_nsh_gre_name (u8 * s, va_list * args)
-{
- nsh_gre_main_t * ngm = &nsh_gre_main;
- u32 i = va_arg (*args, u32);
- u32 show_dev_instance = ~0;
-
- if (i < vec_len (ngm->dev_inst_by_real))
- show_dev_instance = ngm->dev_inst_by_real[i];
-
- if (show_dev_instance != ~0)
- i = show_dev_instance;
-
- return format (s, "nsh_gre_tunnel%d", i);
-}
-
-static int nsh_gre_name_renumber (vnet_hw_interface_t * hi,
- u32 new_dev_instance)
-{
- nsh_gre_main_t * ngm = &nsh_gre_main;
-
- vec_validate_init_empty (ngm->dev_inst_by_real, hi->dev_instance, ~0);
-
- ngm->dev_inst_by_real [hi->dev_instance] = new_dev_instance;
-
- return 0;
-}
-
-static uword dummy_interface_tx (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- clib_warning ("you shouldn't be here, leaking buffers...");
- return frame->n_vectors;
-}
-
-VNET_DEVICE_CLASS (nsh_gre_device_class,static) = {
- .name = "NSH_GRE",
- .format_device_name = format_nsh_gre_name,
- .format_tx_trace = format_nsh_gre_encap_trace,
- .tx_function = dummy_interface_tx,
- .name_renumber = nsh_gre_name_renumber,
-};
-
-static uword dummy_set_rewrite (vnet_main_t * vnm,
- u32 sw_if_index,
- u32 l3_type,
- void * dst_address,
- void * rewrite,
- uword max_rewrite_bytes)
-{
- return 0;
-}
-
-static u8 * format_nsh_gre_header_with_length (u8 * s, va_list * args)
-{
- u32 dev_instance = va_arg (*args, u32);
- s = format (s, "unimplemented dev %u", dev_instance);
- return s;
-}
-
-VNET_HW_INTERFACE_CLASS (nsh_gre_hw_class) = {
- .name = "NSH_GRE",
- .format_header = format_nsh_gre_header_with_length,
- .set_rewrite = dummy_set_rewrite,
-};
-
-#define foreach_copy_field \
-_(src.as_u32) \
-_(dst.as_u32) \
-_(encap_fib_index) \
-_(decap_fib_index) \
-_(decap_next_index)
-
-
-#define foreach_copy_nshhdr_field \
-_(ver_o_c) \
-_(length) \
-_(md_type) \
-_(next_protocol) \
-_(spi_si) \
-_(c1) \
-_(c2) \
-_(c3) \
-_(c4) \
-_(tlvs)
-
-#define foreach_32bit_field \
-_(spi_si) \
-_(c1) \
-_(c2) \
-_(c3) \
-_(c4)
-
-static int nsh_gre_rewrite (nsh_gre_tunnel_t * t)
-{
- u8 *rw = 0;
- ip4_header_t * ip0;
- nsh_header_t * nsh0;
- ip4_gre_and_nsh_header_t * h0;
- int len;
-
- len = sizeof (*h0) + vec_len(t->nsh_hdr.tlvs)*4;
-
- vec_validate_aligned (rw, len-1, CLIB_CACHE_LINE_BYTES);
-
- h0 = (ip4_gre_and_nsh_header_t *) rw;
-
- /* Fixed portion of the (outer) ip4 header */
- ip0 = &h0->ip4;
- ip0->ip_version_and_header_length = 0x45;
- ip0->ttl = 254;
- ip0->protocol = IP_PROTOCOL_GRE;
- /* we fix up the ip4 header length and checksum after-the-fact */
- ip0->src_address.as_u32 = t->src.as_u32;
- ip0->dst_address.as_u32 = t->dst.as_u32;
- ip0->checksum = ip4_header_checksum (ip0);
-
- /* GRE header, zero execpt for the NSH ethertype */
- h0->gre.protocol = clib_host_to_net_u16(GRE_PROTOCOL_nsh);
-
- /* NSH header */
- nsh0 = &h0->nsh;
- nsh0->ver_o_c = t->nsh_hdr.ver_o_c;
- nsh0->md_type = t->nsh_hdr.md_type;
- nsh0->next_protocol = t->nsh_hdr.next_protocol;
- nsh0->spi_si = t->nsh_hdr.spi_si;
- nsh0->c1 = t->nsh_hdr.c1;
- nsh0->c2 = t->nsh_hdr.c2;
- nsh0->c3 = t->nsh_hdr.c3;
- nsh0->c4 = t->nsh_hdr.c4;
-
- /* Endian swap 32-bit fields */
-#define _(x) nsh0->x = clib_host_to_net_u32(nsh0->x);
- foreach_32bit_field;
-#undef _
-
- /* fix nsh header length */
- t->nsh_hdr.length = 6 + vec_len(t->nsh_hdr.tlvs);
- nsh0->length = t->nsh_hdr.length;
-
- /* Copy any TLVs */
- if (vec_len(t->nsh_hdr.tlvs))
- clib_memcpy (nsh0->tlvs, t->nsh_hdr.tlvs, 4*vec_len(t->nsh_hdr.tlvs));
-
- t->rewrite = rw;
- return (0);
-}
-
-int vnet_nsh_gre_add_del_tunnel (vnet_nsh_gre_add_del_tunnel_args_t *a,
- u32 * sw_if_indexp)
-{
- nsh_gre_main_t * ngm = &nsh_gre_main;
- nsh_gre_tunnel_t *t = 0;
- vnet_main_t * vnm = ngm->vnet_main;
- vnet_hw_interface_t * hi;
- uword * p;
- u32 hw_if_index = ~0;
- u32 sw_if_index = ~0;
- int rv;
- u64 key;
- u32 spi_si_net_byte_order;
-
- spi_si_net_byte_order = clib_host_to_net_u32(a->nsh_hdr.spi_si);
-
- key = (((u64)(a->src.as_u32))<<32) | spi_si_net_byte_order;
-
- p = hash_get (ngm->nsh_gre_tunnel_by_src_address, key);
-
- if (a->is_add)
- {
- /* adding a tunnel: tunnel must not already exist */
- if (p)
- return VNET_API_ERROR_INVALID_VALUE;
-
- if (a->decap_next_index >= NSH_GRE_INPUT_N_NEXT)
- return VNET_API_ERROR_INVALID_DECAP_NEXT;
-
- pool_get_aligned (ngm->tunnels, t, CLIB_CACHE_LINE_BYTES);
- memset (t, 0, sizeof (*t));
-
- /* copy from arg structure */
-#define _(x) t->x = a->x;
- foreach_copy_field;
-#undef _
-
- /* copy from arg structure */
-#define _(x) t->nsh_hdr.x = a->nsh_hdr.x;
- foreach_copy_nshhdr_field;
-#undef _
-
- rv = nsh_gre_rewrite (t);
-
- if (rv)
- {
- pool_put (ngm->tunnels, t);
- return rv;
- }
-
- hash_set (ngm->nsh_gre_tunnel_by_src_address, key, t - ngm->tunnels);
-
- if (vec_len (ngm->free_nsh_gre_tunnel_hw_if_indices) > 0)
- {
- hw_if_index = ngm->free_nsh_gre_tunnel_hw_if_indices
- [vec_len (ngm->free_nsh_gre_tunnel_hw_if_indices)-1];
- _vec_len (ngm->free_nsh_gre_tunnel_hw_if_indices) -= 1;
-
- hi = vnet_get_hw_interface (vnm, hw_if_index);
- hi->dev_instance = t - ngm->tunnels;
- hi->hw_instance = hi->dev_instance;
- }
- else
- {
- hw_if_index = vnet_register_interface
- (vnm, nsh_gre_device_class.index, t - ngm->tunnels,
- nsh_gre_hw_class.index, t - ngm->tunnels);
- hi = vnet_get_hw_interface (vnm, hw_if_index);
- hi->output_node_index = nsh_gre_encap_node.index;
- }
-
- t->hw_if_index = hw_if_index;
- t->sw_if_index = sw_if_index = hi->sw_if_index;
-
- vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
- VNET_SW_INTERFACE_FLAG_ADMIN_UP);
- }
- else
- {
- /* deleting a tunnel: tunnel must exist */
- if (!p)
- return VNET_API_ERROR_NO_SUCH_ENTRY;
-
- t = pool_elt_at_index (ngm->tunnels, p[0]);
-
- vnet_sw_interface_set_flags (vnm, t->sw_if_index, 0 /* down */);
- vec_add1 (ngm->free_nsh_gre_tunnel_hw_if_indices, t->hw_if_index);
-
- hash_unset (ngm->nsh_gre_tunnel_by_src_address, key);
- vec_free (t->rewrite);
- pool_put (ngm->tunnels, t);
- }
-
- if (sw_if_indexp)
- *sw_if_indexp = sw_if_index;
-
- return 0;
-}
-
-static u32 fib_index_from_fib_id (u32 fib_id)
-{
- ip4_main_t * im = &ip4_main;
- uword * p;
-
- p = hash_get (im->fib_index_by_table_id, fib_id);
- if (!p)
- return ~0;
-
- return p[0];
-}
-
-static uword unformat_decap_next (unformat_input_t * input, va_list * args)
-{
- u32 * result = va_arg (*args, u32 *);
- u32 tmp;
-
- if (unformat (input, "drop"))
- *result = NSH_GRE_INPUT_NEXT_DROP;
- else if (unformat (input, "ip4"))
- *result = NSH_GRE_INPUT_NEXT_IP4_INPUT;
- else if (unformat (input, "ip6"))
- *result = NSH_GRE_INPUT_NEXT_IP6_INPUT;
- else if (unformat (input, "ethernet"))
- *result = NSH_GRE_INPUT_NEXT_ETHERNET_INPUT;
- else if (unformat (input, "%d", &tmp))
- *result = tmp;
- else
- return 0;
- return 1;
-}
-
-static clib_error_t *
-nsh_gre_add_del_tunnel_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
-{
- unformat_input_t _line_input, * line_input = &_line_input;
- ip4_address_t src, dst;
- u8 is_add = 1;
- u8 src_set = 0;
- u8 dst_set = 0;
- u32 encap_fib_index = 0;
- u32 decap_fib_index = 0;
- u8 ver_o_c = 0;
- u8 length = 0;
- u8 md_type = 0;
- u8 next_protocol = 1; /* ip4 */
- u32 spi;
- u8 spi_set = 0;
- u32 si;
- u8 si_set = 0;
- u32 spi_si;
- u32 c1 = 0;
- u32 c2 = 0;
- u32 c3 = 0;
- u32 c4 = 0;
- u32 decap_next_index = 1; /* ip4_input */
- u32 *tlvs = 0;
- u32 tmp;
- int rv;
- vnet_nsh_gre_add_del_tunnel_args_t _a, * a = &_a;
-
- /* Get a line of input. */
- if (! unformat_user (input, unformat_line_input, line_input))
- return 0;
-
- while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
- if (unformat (line_input, "del"))
- is_add = 0;
- else if (unformat (line_input, "src %U",
- unformat_ip4_address, &src))
- src_set = 1;
- else if (unformat (line_input, "dst %U",
- unformat_ip4_address, &dst))
- dst_set = 1;
- else if (unformat (line_input, "encap-vrf-id %d", &tmp))
- {
- encap_fib_index = fib_index_from_fib_id (tmp);
- if (encap_fib_index == ~0)
- return clib_error_return (0, "nonexistent encap fib id %d", tmp);
- }
- else if (unformat (line_input, "decap-vrf-id %d", &tmp))
- {
- decap_fib_index = fib_index_from_fib_id (tmp);
- if (decap_fib_index == ~0)
- return clib_error_return (0, "nonexistent decap fib id %d", tmp);
- }
- else if (unformat (line_input, "decap-next %U", unformat_decap_next,
- &decap_next_index))
- ;
- else if (unformat (line_input, "version %d", &tmp))
- ver_o_c |= (tmp & 3) << 6;
- else if (unformat (line_input, "o-bit %d", &tmp))
- ver_o_c |= (tmp & 1) << 5;
- else if (unformat (line_input, "c-bit %d", &tmp))
- ver_o_c |= (tmp & 1) << 4;
- else if (unformat (line_input, "md-type %d", &tmp))
- md_type = tmp;
- else if (unformat(line_input, "next-ip4"))
- next_protocol = 1;
- else if (unformat(line_input, "next-ip6"))
- next_protocol = 2;
- else if (unformat(line_input, "next-ethernet"))
- next_protocol = 3;
- else if (unformat (line_input, "c1 %d", &c1))
- ;
- else if (unformat (line_input, "c2 %d", &c2))
- ;
- else if (unformat (line_input, "c3 %d", &c3))
- ;
- else if (unformat (line_input, "c4 %d", &c4))
- ;
- else if (unformat (line_input, "spi %d", &spi))
- spi_set = 1;
- else if (unformat (line_input, "si %d", &si))
- si_set = 1;
- else if (unformat (line_input, "tlv %x"))
- vec_add1 (tlvs, tmp);
- else
- return clib_error_return (0, "parse error: '%U'",
- format_unformat_error, line_input);
- }
-
- unformat_free (line_input);
-
- if (src_set == 0)
- return clib_error_return (0, "tunnel src address not specified");
-
- if (dst_set == 0)
- return clib_error_return (0, "tunnel dst address not specified");
-
- if (spi_set == 0)
- return clib_error_return (0, "spi not specified");
-
- if (si_set == 0)
- return clib_error_return (0, "si not specified");
-
- spi_si = (spi<<8) | si;
-
- memset (a, 0, sizeof (*a));
-
- a->is_add = is_add;
-
-#define _(x) a->x = x;
- foreach_copy_field;
-#undef _
-
- /* copy from arg structure */
-#define _(x) a->nsh_hdr.x = x;
- foreach_copy_nshhdr_field;
-#undef _
-
- rv = vnet_nsh_gre_add_del_tunnel (a, 0 /* hw_if_indexp */);
-
- switch(rv)
- {
- case 0:
- break;
- case VNET_API_ERROR_INVALID_DECAP_NEXT:
- return clib_error_return (0, "invalid decap-next...");
-
- case VNET_API_ERROR_TUNNEL_EXIST:
- return clib_error_return (0, "tunnel already exists...");
-
- case VNET_API_ERROR_NO_SUCH_ENTRY:
- return clib_error_return (0, "session does not exist...");
-
- default:
- return clib_error_return
- (0, "vnet_nsh_gre_add_del_tunnel returned %d", rv);
- }
-
- return 0;
-}
-
-VLIB_CLI_COMMAND (create_nsh_gre_tunnel_command, static) = {
- .path = "nsh gre tunnel",
- .short_help =
- "nsh gre tunnel src <ip4-addr> dst <ip4-addr>"
- " c1 <nn> c2 <nn> c3 <nn> c4 <nn> spi <nn> si <nn>\n"
- " [encap-vrf-id <nn>] [decap-vrf-id <nn>] [o-bit <1|0>] [c-bit <1|0>]\n"
- " [md-type <nn>][next-ip4][next-ip6][next-ethernet]\n"
- " [tlv <xx>][decap-next [ip4|ip6|ethernet]][del]\n",
- .function = nsh_gre_add_del_tunnel_command_fn,
-};
-
-static clib_error_t *
-show_nsh_gre_tunnel_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
-{
- nsh_gre_main_t * ngm = &nsh_gre_main;
- nsh_gre_tunnel_t * t;
-
- if (pool_elts (ngm->tunnels) == 0)
- vlib_cli_output (vm, "No nsh-gre tunnels configured...");
-
- pool_foreach (t, ngm->tunnels,
- ({
- vlib_cli_output (vm, "%U", format_nsh_gre_tunnel, t);
- }));
-
- return 0;
-}
-
-VLIB_CLI_COMMAND (show_nsh_gre_tunnel_command, static) = {
- .path = "show nsh gre tunnel",
- .function = show_nsh_gre_tunnel_command_fn,
-};
-
-clib_error_t *nsh_gre_init (vlib_main_t *vm)
-{
- nsh_gre_main_t *ngm = &nsh_gre_main;
-
- ngm->vnet_main = vnet_get_main();
- ngm->vlib_main = vm;
-
- ngm->nsh_gre_tunnel_by_src_address = hash_create (0, sizeof (uword));
- gre_register_input_protocol (vm, GRE_PROTOCOL_nsh,
- nsh_gre_input_node.index);
- return 0;
-}
-
-VLIB_INIT_FUNCTION(nsh_gre_init);
-
diff --git a/vnet/vnet/nsh-gre/nsh_gre.h b/vnet/vnet/nsh-gre/nsh_gre.h
deleted file mode 100644
index abe115580cb..00000000000
--- a/vnet/vnet/nsh-gre/nsh_gre.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef included_vnet_nsh_gre_h
-#define included_vnet_nsh_gre_h
-
-#include <vnet/vnet.h>
-#include <vnet/gre/gre.h>
-#include <vnet/nsh/nsh_packet.h>
-#include <vnet/ip/ip4_packet.h>
-
-typedef CLIB_PACKED (struct {
- ip4_header_t ip4; /* 20 bytes */
- gre_header_t gre; /* 4 bytes */
- nsh_header_t nsh; /* 28 bytes */
-}) ip4_gre_and_nsh_header_t;
-
-typedef struct {
- /* Rewrite string. $$$$ embed vnet_rewrite header */
- u8 * rewrite;
-
- /* tunnel src and dst addresses */
- ip4_address_t src;
- ip4_address_t dst;
-
- /* FIB indices */
- u32 encap_fib_index; /* tunnel partner lookup here */
- u32 decap_fib_index; /* inner IP lookup here */
-
- /* when decapsulating, send pkts here */
- u32 decap_next_index;
-
- /* vnet intfc hw/sw_if_index */
- u32 hw_if_index;
- u32 sw_if_index;
-
- /* NSH header fields in HOST byte order */
- nsh_header_t nsh_hdr;
-
-} nsh_gre_tunnel_t;
-
-#define foreach_nsh_gre_input_next \
- _ (DROP, "error-drop") \
- _ (IP4_INPUT, "ip4-input") \
- _ (IP6_INPUT, "ip6-input") \
- _ (ETHERNET_INPUT, "ethernet-input")
-
-typedef enum {
-#define _(s,n) NSH_GRE_INPUT_NEXT_##s,
- foreach_nsh_gre_input_next
-#undef _
- NSH_GRE_INPUT_N_NEXT,
-} nsh_gre_input_next_t;
-
-typedef enum {
-#define nsh_gre_error(n,s) NSH_GRE_ERROR_##n,
-#include <vnet/nsh/nsh_error.def>
-#undef nsh_gre_error
- NSH_GRE_N_ERROR,
-} nsh_gre_input_error_t;
-
-typedef struct {
- /* vector of encap tunnel instances */
- nsh_gre_tunnel_t *tunnels;
-
- /* lookup tunnel by tunnel partner src address */
- uword * nsh_gre_tunnel_by_src_address;
-
- /* Free vlib hw_if_indices */
- u32 * free_nsh_gre_tunnel_hw_if_indices;
-
- /* show device instance by real device instance */
- u32 * dev_inst_by_real;
-
- /* convenience */
- vlib_main_t * vlib_main;
- vnet_main_t * vnet_main;
-} nsh_gre_main_t;
-
-nsh_gre_main_t nsh_gre_main;
-
-extern vlib_node_registration_t nsh_gre_input_node;
-extern vlib_node_registration_t nsh_gre_encap_node;
-
-u8 * format_nsh_gre_encap_trace (u8 * s, va_list * args);
-
-typedef struct {
- u8 is_add;
- ip4_address_t src, dst;
- u32 encap_fib_index;
- u32 decap_fib_index;
- u32 decap_next_index;
- nsh_header_t nsh_hdr;
-} vnet_nsh_gre_add_del_tunnel_args_t;
-
-int vnet_nsh_gre_add_del_tunnel (vnet_nsh_gre_add_del_tunnel_args_t *a,
- u32 * sw_if_indexp);
-
-#endif /* included_vnet_nsh_gre_h */
diff --git a/vnet/vnet/nsh-vxlan-gpe/decap.c b/vnet/vnet/nsh-vxlan-gpe/decap.c
deleted file mode 100644
index 76003e6003f..00000000000
--- a/vnet/vnet/nsh-vxlan-gpe/decap.c
+++ /dev/null
@@ -1,537 +0,0 @@
-/*
- * nsh.c: nsh packet processing
- *
- * Copyright (c) 2013 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <vlib/vlib.h>
-#include <vnet/pg/pg.h>
-#include <vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.h>
-
-vlib_node_registration_t nsh_vxlan_gpe_input_node;
-
-/* From nsh-gre */
-u8 * format_nsh_header_with_length (u8 * s, va_list * args);
-
-typedef struct {
- u32 next_index;
- u32 tunnel_index;
- u32 error;
- nsh_header_t h;
-} nsh_vxlan_gpe_rx_trace_t;
-
-static u8 * format_nsh_vxlan_gpe_rx_trace (u8 * s, va_list * args)
-{
- CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
- CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- nsh_vxlan_gpe_rx_trace_t * t = va_arg (*args, nsh_vxlan_gpe_rx_trace_t *);
-
- if (t->tunnel_index != ~0)
- {
- s = format (s, "NSH-VXLAN: tunnel %d next %d error %d", t->tunnel_index,
- t->next_index, t->error);
- }
- else
- {
- s = format (s, "NSH-VXLAN: no tunnel next %d error %d\n", t->next_index,
- t->error);
- }
- s = format (s, "\n %U", format_nsh_header_with_length, &t->h,
- (u32) sizeof (t->h) /* max size */);
- return s;
-}
-
-static uword
-nsh_vxlan_gpe_input (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
-{
- u32 n_left_from, next_index, * from, * to_next;
- nsh_vxlan_gpe_main_t * ngm = &nsh_vxlan_gpe_main;
- vnet_main_t * vnm = ngm->vnet_main;
- vnet_interface_main_t * im = &vnm->interface_main;
- u32 last_tunnel_index = ~0;
- nsh_vxlan_gpe_tunnel_key_t last_key;
- u32 pkts_decapsulated = 0;
- u32 cpu_index = os_get_cpu_number();
- u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
-
- memset (&last_key, 0xff, sizeof (last_key));
-
- from = vlib_frame_vector_args (from_frame);
- n_left_from = from_frame->n_vectors;
-
- next_index = node->cached_next_index;
- stats_sw_if_index = node->runtime_data[0];
- stats_n_packets = stats_n_bytes = 0;
-
- while (n_left_from > 0)
- {
- u32 n_left_to_next;
-
- vlib_get_next_frame (vm, node, next_index,
- to_next, n_left_to_next);
-
- while (n_left_from >= 4 && n_left_to_next >= 2)
- {
- u32 bi0, bi1;
- vlib_buffer_t * b0, * b1;
- u32 next0, next1;
- ip4_vxlan_gpe_and_nsh_header_t * iuvn0, * iuvn1;
- uword * p0, * p1;
- u32 tunnel_index0, tunnel_index1;
- nsh_vxlan_gpe_tunnel_t * t0, * t1;
- nsh_vxlan_gpe_tunnel_key_t key0, key1;
- u32 error0, error1;
- u32 sw_if_index0, sw_if_index1, len0, len1;
-
- /* Prefetch next iteration. */
- {
- vlib_buffer_t * p2, * p3;
-
- p2 = vlib_get_buffer (vm, from[2]);
- p3 = vlib_get_buffer (vm, from[3]);
-
- vlib_prefetch_buffer_header (p2, LOAD);
- vlib_prefetch_buffer_header (p3, LOAD);
-
- CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
- }
-
- bi0 = from[0];
- bi1 = from[1];
- to_next[0] = bi0;
- to_next[1] = bi1;
- from += 2;
- to_next += 2;
- n_left_to_next -= 2;
- n_left_from -= 2;
-
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
-
- /* udp leaves current_data pointing at the vxlan header */
- vlib_buffer_advance
- (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
- vlib_buffer_advance
- (b1, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
-
- iuvn0 = vlib_buffer_get_current (b0);
- iuvn1 = vlib_buffer_get_current (b1);
-
- /* pop (ip, udp, vxlan, nsh) */
- vlib_buffer_advance (b0, sizeof (*iuvn0));
- vlib_buffer_advance (b1, sizeof (*iuvn1));
-
- tunnel_index0 = ~0;
- error0 = 0;
- next0 = NSH_VXLAN_GPE_INPUT_NEXT_DROP;
-
- tunnel_index1 = ~0;
- error1 = 0;
- next1 = NSH_VXLAN_GPE_INPUT_NEXT_DROP;
-
- key0.src = iuvn0->ip4.src_address.as_u32;
- key0.vni = iuvn0->vxlan.vni_res;
- key0.spi_si = iuvn0->nsh.spi_si;
- key0.pad = 0;
-
- if (PREDICT_FALSE ((key0.as_u64[0] != last_key.as_u64[0])
- || (key0.as_u64[1] != last_key.as_u64[1])))
- {
- p0 = hash_get_mem (ngm->nsh_vxlan_gpe_tunnel_by_key, &key0);
-
- if (p0 == 0)
- {
- error0 = NSH_VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
- goto trace0;
- }
-
- last_key.as_u64[0] = key0.as_u64[0];
- last_key.as_u64[1] = key0.as_u64[1];
- tunnel_index0 = last_tunnel_index = p0[0];
- }
- else
- tunnel_index0 = last_tunnel_index;
-
- t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0);
-
- next0 = t0->decap_next_index;
- sw_if_index0 = t0->sw_if_index;
- len0 = vlib_buffer_length_in_chain(vm, b0);
-
- /* Required to make the l2 tag push / pop code work on l2 subifs */
- vnet_update_l2_len (b0);
-
- if (next0 == NSH_VXLAN_GPE_INPUT_NEXT_NSH_VXLAN_GPE_ENCAP)
- {
- /*
- * Functioning as SFF (ie "half NSH tunnel mode")
- * If ingress (we are in decap.c) with NSH header, and 'decap next nsh-vxlan-gpe' then "NSH switch"
- * 1. Take DST, remap to SRC, remap other keys in place
- * 2. Look up new t0 as per above
- * 3. Set sw_if_index[VLIB_TX] to be t0->sw_if_index
- */
- uword * next_p0;
- nsh_vxlan_gpe_tunnel_t * next_t0;
- nsh_vxlan_gpe_tunnel_key_t next_key0;
-
- next_key0.src = iuvn0->ip4.dst_address.as_u32;
- next_key0.vni = iuvn0->vxlan.vni_res;
- next_key0.spi_si = iuvn0->nsh.spi_si;
- next_key0.pad = 0;
-
- next_p0 = hash_get_mem (ngm->nsh_vxlan_gpe_tunnel_by_key, &next_key0);
-
- if (next_p0 == 0)
- {
- error0 = NSH_VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
- goto trace0;
- }
- next_t0 = pool_elt_at_index (ngm->tunnels, next_p0[0]);
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = next_t0->sw_if_index;
-
- }
- else
- {
- /*
- * ip[46] lookup in the configured FIB
- * nsh-vxlan-gpe-encap, here's the encap tunnel sw_if_index
- */
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
- }
-
- pkts_decapsulated++;
- stats_n_packets += 1;
- stats_n_bytes += len0;
-
- if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
- {
- stats_n_packets -= 1;
- stats_n_bytes -= len0;
- if (stats_n_packets)
- vlib_increment_combined_counter(
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
- cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
- stats_n_packets = 1;
- stats_n_bytes = len0;
- stats_sw_if_index = sw_if_index0;
- }
-
- trace0:
- b0->error = error0 ? node->errors[error0] : 0;
-
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
- {
- nsh_vxlan_gpe_rx_trace_t *tr
- = vlib_add_trace (vm, node, b0, sizeof (*tr));
- tr->next_index = next0;
- tr->error = error0;
- tr->tunnel_index = tunnel_index0;
- tr->h = iuvn0->nsh;
- }
-
- key1.src = iuvn1->ip4.src_address.as_u32;
- key1.vni = iuvn1->vxlan.vni_res;
- key1.spi_si = iuvn1->nsh.spi_si;
- key1.pad = 0;
-
- if (PREDICT_FALSE ((key1.as_u64[0] != last_key.as_u64[0])
- || (key1.as_u64[1] != last_key.as_u64[1])))
- {
- p1 = hash_get_mem (ngm->nsh_vxlan_gpe_tunnel_by_key, &key1);
-
- if (p1 == 0)
- {
- error1 = NSH_VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
- goto trace1;
- }
-
- last_key.as_u64[0] = key1.as_u64[0];
- last_key.as_u64[1] = key1.as_u64[1];
- tunnel_index1 = last_tunnel_index = p1[0];
- }
- else
- tunnel_index1 = last_tunnel_index;
-
- t1 = pool_elt_at_index (ngm->tunnels, tunnel_index1);
-
- next1 = t1->decap_next_index;
- sw_if_index1 = t1->sw_if_index;
- len1 = vlib_buffer_length_in_chain(vm, b1);
-
- /* Required to make the l2 tag push / pop code work on l2 subifs */
- vnet_update_l2_len (b1);
-
- if (next1 == NSH_VXLAN_GPE_INPUT_NEXT_NSH_VXLAN_GPE_ENCAP)
- {
- /*
- * Functioning as SFF (ie "half NSH tunnel mode")
- * If ingress (we are in decap.c) with NSH header, and 'decap next nsh-vxlan-gpe' then "NSH switch"
- * 1. Take DST, remap to SRC, remap other keys in place
- * 2. Look up new t0 as per above
- * 3. Set sw_if_index[VLIB_TX] to be t0->sw_if_index
- */
- uword * next_p1;
- nsh_vxlan_gpe_tunnel_t * next_t1;
- nsh_vxlan_gpe_tunnel_key_t next_key1;
-
- next_key1.src = iuvn1->ip4.dst_address.as_u32;
- next_key1.vni = iuvn1->vxlan.vni_res;
- next_key1.spi_si = iuvn1->nsh.spi_si;
- next_key1.pad = 0;
-
- next_p1 = hash_get_mem (ngm->nsh_vxlan_gpe_tunnel_by_key, &next_key1);
-
- if (next_p1 == 0)
- {
- error1 = NSH_VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
- goto trace1;
- }
- next_t1 = pool_elt_at_index (ngm->tunnels, next_p1[0]);
- vnet_buffer(b1)->sw_if_index[VLIB_TX] = next_t1->sw_if_index;
-
- }
- else
- {
- /*
- * ip[46] lookup in the configured FIB
- * nsh-vxlan-gpe-encap, here's the encap tunnel sw_if_index
- */
- vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
- }
-
- pkts_decapsulated++;
- stats_n_packets += 1;
- stats_n_bytes += len1;
- /* Batch stats increment on the same vxlan tunnel so counter
- is not incremented per packet */
- if (PREDICT_FALSE(sw_if_index1 != stats_sw_if_index))
- {
- stats_n_packets -= 1;
- stats_n_bytes -= len1;
- if (stats_n_packets)
- vlib_increment_combined_counter(
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
- cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
- stats_n_packets = 1;
- stats_n_bytes = len1;
- stats_sw_if_index = sw_if_index1;
- }
- vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
-
- trace1:
- b1->error = error1 ? node->errors[error1] : 0;
-
- if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
- {
- nsh_vxlan_gpe_rx_trace_t *tr
- = vlib_add_trace (vm, node, b1, sizeof (*tr));
- tr->next_index = next1;
- tr->error = error1;
- tr->tunnel_index = tunnel_index1;
- tr->h = iuvn1->nsh;
- }
-
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, bi1, next0, next1);
- }
-
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- u32 bi0;
- vlib_buffer_t * b0;
- u32 next0;
- ip4_vxlan_gpe_and_nsh_header_t * iuvn0;
- uword * p0;
- u32 tunnel_index0;
- nsh_vxlan_gpe_tunnel_t * t0;
- nsh_vxlan_gpe_tunnel_key_t key0;
- u32 error0;
- u32 sw_if_index0, len0;
-
- bi0 = from[0];
- to_next[0] = bi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
-
- /* udp leaves current_data pointing at the vxlan header */
- vlib_buffer_advance
- (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
-
- iuvn0 = vlib_buffer_get_current (b0);
-
- /* pop (ip, udp, vxlan, nsh) */
- vlib_buffer_advance (b0, sizeof (*iuvn0));
-
- tunnel_index0 = ~0;
- error0 = 0;
- next0 = NSH_VXLAN_GPE_INPUT_NEXT_DROP;
-
- key0.src = iuvn0->ip4.src_address.as_u32;
- key0.vni = iuvn0->vxlan.vni_res;
- key0.spi_si = iuvn0->nsh.spi_si;
- key0.pad = 0;
-
- if (PREDICT_FALSE ((key0.as_u64[0] != last_key.as_u64[0])
- || (key0.as_u64[1] != last_key.as_u64[1])))
- {
- p0 = hash_get_mem (ngm->nsh_vxlan_gpe_tunnel_by_key, &key0);
-
- if (p0 == 0)
- {
- error0 = NSH_VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
- goto trace00;
- }
-
- last_key.as_u64[0] = key0.as_u64[0];
- last_key.as_u64[1] = key0.as_u64[1];
- tunnel_index0 = last_tunnel_index = p0[0];
- }
- else
- tunnel_index0 = last_tunnel_index;
-
- t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0);
-
- next0 = t0->decap_next_index;
- sw_if_index0 = t0->sw_if_index;
- len0 = vlib_buffer_length_in_chain(vm, b0);
-
- /* Required to make the l2 tag push / pop code work on l2 subifs */
- vnet_update_l2_len (b0);
-
- if (next0 == NSH_VXLAN_GPE_INPUT_NEXT_NSH_VXLAN_GPE_ENCAP)
- {
- /*
- * Functioning as SFF (ie "half NSH tunnel mode")
- * If ingress (we are in decap.c) with NSH header, and 'decap next nsh-vxlan-gpe' then "NSH switch"
- * 1. Take DST, remap to SRC, remap other keys in place
- * 2. Look up new t0 as per above
- * 3. Set sw_if_index[VLIB_TX] to be t0->sw_if_index
- */
- uword * next_p0;
- nsh_vxlan_gpe_tunnel_t * next_t0;
- nsh_vxlan_gpe_tunnel_key_t next_key0;
-
- next_key0.src = iuvn0->ip4.dst_address.as_u32;
- next_key0.vni = iuvn0->vxlan.vni_res;
- next_key0.spi_si = iuvn0->nsh.spi_si;
- next_key0.pad = 0;
-
- next_p0 = hash_get_mem (ngm->nsh_vxlan_gpe_tunnel_by_key, &next_key0);
-
- if (next_p0 == 0)
- {
- error0 = NSH_VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
- goto trace00;
- }
- next_t0 = pool_elt_at_index (ngm->tunnels, next_p0[0]);
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = next_t0->sw_if_index;
-
- }
- else
- {
- /*
- * ip[46] lookup in the configured FIB
- * nsh-vxlan-gpe-encap, here's the encap tunnel sw_if_index
- */
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
- }
-
- pkts_decapsulated ++;
-
- stats_n_packets += 1;
- stats_n_bytes += len0;
-
- /* Batch stats increment on the same nsh-vxlan-gpe tunnel so counter
- is not incremented per packet */
- if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
- {
- stats_n_packets -= 1;
- stats_n_bytes -= len0;
- if (stats_n_packets)
- vlib_increment_combined_counter(
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
- cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
- stats_n_packets = 1;
- stats_n_bytes = len0;
- stats_sw_if_index = sw_if_index0;
- }
-
- trace00:
- b0->error = error0 ? node->errors[error0] : 0;
-
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
- {
- nsh_vxlan_gpe_rx_trace_t *tr
- = vlib_add_trace (vm, node, b0, sizeof (*tr));
- tr->next_index = next0;
- tr->error = error0;
- tr->tunnel_index = tunnel_index0;
- tr->h = iuvn0->nsh;
- }
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
- }
-
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
- }
- vlib_node_increment_counter (vm, nsh_vxlan_gpe_input_node.index,
- NSH_VXLAN_GPE_ERROR_DECAPSULATED,
- pkts_decapsulated);
- /* Increment any remaining batch stats */
- if (stats_n_packets)
- {
- vlib_increment_combined_counter(
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, cpu_index,
- stats_sw_if_index, stats_n_packets, stats_n_bytes);
- node->runtime_data[0] = stats_sw_if_index;
- }
- return from_frame->n_vectors;
-}
-
-static char * nsh_vxlan_gpe_error_strings[] = {
-#define nsh_vxlan_gpe_error(n,s) s,
-#include <vnet/nsh-vxlan-gpe/nsh_vxlan_gpe_error.def>
-#undef nsh_vxlan_gpe_error
-#undef _
-};
-
-VLIB_REGISTER_NODE (nsh_vxlan_gpe_input_node) = {
- .function = nsh_vxlan_gpe_input,
- .name = "nsh-vxlan-gpe-input",
- /* Takes a vector of packets. */
- .vector_size = sizeof (u32),
-
- .n_errors = NSH_VXLAN_GPE_N_ERROR,
- .error_strings = nsh_vxlan_gpe_error_strings,
-
- .n_next_nodes = NSH_VXLAN_GPE_INPUT_N_NEXT,
- .next_nodes = {
-#define _(s,n) [NSH_VXLAN_GPE_INPUT_NEXT_##s] = n,
- foreach_nsh_vxlan_gpe_input_next
-#undef _
- },
-
- .format_buffer = format_nsh_header_with_length,
- .format_trace = format_nsh_vxlan_gpe_rx_trace,
- // $$$$ .unformat_buffer = unformat_nsh_vxlan_gpe_header,
-};
diff --git a/vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.c b/vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.c
deleted file mode 100644
index 88945cd8762..00000000000
--- a/vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.c
+++ /dev/null
@@ -1,577 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include <vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.h>
-
-nsh_vxlan_gpe_main_t nsh_vxlan_gpe_main;
-
-static u8 * format_decap_next (u8 * s, va_list * args)
-{
- u32 next_index = va_arg (*args, u32);
-
- switch (next_index)
- {
- case NSH_VXLAN_GPE_INPUT_NEXT_DROP:
- return format (s, "drop");
- case NSH_VXLAN_GPE_INPUT_NEXT_IP4_INPUT:
- return format (s, "ip4");
- case NSH_VXLAN_GPE_INPUT_NEXT_IP6_INPUT:
- return format (s, "ip6");
- case NSH_VXLAN_GPE_INPUT_NEXT_ETHERNET_INPUT:
- return format (s, "ethernet");
- case NSH_VXLAN_GPE_INPUT_NEXT_NSH_VXLAN_GPE_ENCAP:
- return format (s, "nsh-vxlan-gpe");
- default:
- return format (s, "unknown %d", next_index);
- }
- return s;
-}
-
-u8 * format_nsh_vxlan_gpe_tunnel (u8 * s, va_list * args)
-{
- nsh_vxlan_gpe_tunnel_t * t = va_arg (*args, nsh_vxlan_gpe_tunnel_t *);
- nsh_vxlan_gpe_main_t * ngm = &nsh_vxlan_gpe_main;
-
- s = format (s,
- "[%d] %U (src) %U (dst) fibs: encap %d, decap %d",
- t - ngm->tunnels,
- format_ip4_address, &t->src,
- format_ip4_address, &t->dst,
- t->encap_fib_index,
- t->decap_fib_index);
- s = format (s, " decap next %U\n", format_decap_next, t->decap_next_index);
- s = format (s, " vxlan VNI %d ", t->vni);
- s = format (s, "nsh ver %d ", (t->nsh_hdr.ver_o_c>>6));
- if (t->nsh_hdr.ver_o_c & NSH_O_BIT)
- s = format (s, "O-set ");
-
- if (t->nsh_hdr.ver_o_c & NSH_C_BIT)
- s = format (s, "C-set ");
-
- s = format (s, "len %d (%d bytes) md_type %d next_protocol %d\n",
- t->nsh_hdr.length, t->nsh_hdr.length * 4, t->nsh_hdr.md_type, t->nsh_hdr.next_protocol);
-
- s = format (s, " service path %d service index %d\n",
- (t->nsh_hdr.spi_si>>NSH_SPI_SHIFT) & NSH_SPI_MASK,
- t->nsh_hdr.spi_si & NSH_SINDEX_MASK);
-
- s = format (s, " c1 %d c2 %d c3 %d c4 %d\n",
- t->nsh_hdr.c1, t->nsh_hdr.c2, t->nsh_hdr.c3, t->nsh_hdr.c4);
-
- return s;
-}
-
-static u8 * format_nsh_vxlan_gpe_name (u8 * s, va_list * args)
-{
- nsh_vxlan_gpe_main_t * ngm = &nsh_vxlan_gpe_main;
- u32 i = va_arg (*args, u32);
- u32 show_dev_instance = ~0;
-
- if (i < vec_len (ngm->dev_inst_by_real))
- show_dev_instance = ngm->dev_inst_by_real[i];
-
- if (show_dev_instance != ~0)
- i = show_dev_instance;
-
- return format (s, "nsh_vxlan_gpe_tunnel%d", i);
-}
-
-static int nsh_vxlan_gpe_name_renumber (vnet_hw_interface_t * hi,
- u32 new_dev_instance)
-{
- nsh_vxlan_gpe_main_t * ngm = &nsh_vxlan_gpe_main;
-
- vec_validate_init_empty (ngm->dev_inst_by_real, hi->dev_instance, ~0);
-
- ngm->dev_inst_by_real [hi->dev_instance] = new_dev_instance;
-
- return 0;
-}
-
-static uword dummy_interface_tx (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- clib_warning ("you shouldn't be here, leaking buffers...");
- return frame->n_vectors;
-}
-
-VNET_DEVICE_CLASS (nsh_vxlan_gpe_device_class,static) = {
- .name = "NSH_VXLAN_GPE",
- .format_device_name = format_nsh_vxlan_gpe_name,
- .format_tx_trace = format_nsh_vxlan_gpe_encap_trace,
- .tx_function = dummy_interface_tx,
- .name_renumber = nsh_vxlan_gpe_name_renumber,
-};
-
-static uword dummy_set_rewrite (vnet_main_t * vnm,
- u32 sw_if_index,
- u32 l3_type,
- void * dst_address,
- void * rewrite,
- uword max_rewrite_bytes)
-{
- return 0;
-}
-
-static u8 * format_nsh_vxlan_gpe_header_with_length (u8 * s, va_list * args)
-{
- u32 dev_instance = va_arg (*args, u32);
- s = format (s, "unimplemented dev %u", dev_instance);
- return s;
-}
-
-VNET_HW_INTERFACE_CLASS (nsh_vxlan_gpe_hw_class) = {
- .name = "NSH_VXLAN_GPE",
- .format_header = format_nsh_vxlan_gpe_header_with_length,
- .set_rewrite = dummy_set_rewrite,
-};
-
-#define foreach_copy_field \
-_(src.as_u32) \
-_(dst.as_u32) \
-_(vni) \
-_(encap_fib_index) \
-_(decap_fib_index) \
-_(decap_next_index)
-
-
-#define foreach_copy_nshhdr_field \
-_(ver_o_c) \
-_(length) \
-_(md_type) \
-_(next_protocol) \
-_(spi_si) \
-_(c1) \
-_(c2) \
-_(c3) \
-_(c4) \
-_(tlvs)
-
-#define foreach_32bit_field \
-_(spi_si) \
-_(c1) \
-_(c2) \
-_(c3) \
-_(c4)
-
-static int nsh_vxlan_gpe_rewrite (nsh_vxlan_gpe_tunnel_t * t)
-{
- u8 *rw = 0;
- ip4_header_t * ip0;
- nsh_header_t * nsh0;
- ip4_vxlan_gpe_and_nsh_header_t * h0;
- int len;
-
- len = sizeof (*h0) + vec_len(t->nsh_hdr.tlvs)*4;
-
- vec_validate_aligned (rw, len-1, CLIB_CACHE_LINE_BYTES);
-
- h0 = (ip4_vxlan_gpe_and_nsh_header_t *) rw;
-
- /* Fixed portion of the (outer) ip4 header */
- ip0 = &h0->ip4;
- ip0->ip_version_and_header_length = 0x45;
- ip0->ttl = 254;
- ip0->protocol = IP_PROTOCOL_UDP;
-
- /* we fix up the ip4 header length and checksum after-the-fact */
- ip0->src_address.as_u32 = t->src.as_u32;
- ip0->dst_address.as_u32 = t->dst.as_u32;
- ip0->checksum = ip4_header_checksum (ip0);
-
- /* UDP header, randomize src port on something, maybe? */
- h0->udp.src_port = clib_host_to_net_u16 (4790);
- h0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gpe);
-
- /* VXLAN header. Are we having fun yet? */
- h0->vxlan.flags = VXLAN_GPE_FLAGS_I | VXLAN_GPE_FLAGS_P;
- h0->vxlan.ver_res = VXLAN_GPE_VERSION;
- h0->vxlan.next_protocol = VXLAN_NEXT_PROTOCOL_NSH;
- h0->vxlan.vni_res = clib_host_to_net_u32 (t->vni<<8);
-
- /* NSH header */
- nsh0 = &h0->nsh;
- nsh0->ver_o_c = t->nsh_hdr.ver_o_c;
- nsh0->md_type = t->nsh_hdr.md_type;
- nsh0->next_protocol = t->nsh_hdr.next_protocol;
- nsh0->spi_si = t->nsh_hdr.spi_si;
- nsh0->c1 = t->nsh_hdr.c1;
- nsh0->c2 = t->nsh_hdr.c2;
- nsh0->c3 = t->nsh_hdr.c3;
- nsh0->c4 = t->nsh_hdr.c4;
-
- /* Endian swap 32-bit fields */
-#define _(x) nsh0->x = clib_host_to_net_u32(nsh0->x);
- foreach_32bit_field;
-#undef _
-
- /* fix nsh header length */
- t->nsh_hdr.length = 6 + vec_len(t->nsh_hdr.tlvs);
- nsh0->length = t->nsh_hdr.length;
-
- /* Copy any TLVs */
- if (vec_len(t->nsh_hdr.tlvs))
- clib_memcpy (nsh0->tlvs, t->nsh_hdr.tlvs, 4*vec_len(t->nsh_hdr.tlvs));
-
- t->rewrite = rw;
- return (0);
-}
-
-int vnet_nsh_vxlan_gpe_add_del_tunnel
-(vnet_nsh_vxlan_gpe_add_del_tunnel_args_t *a, u32 * sw_if_indexp)
-{
- nsh_vxlan_gpe_main_t * ngm = &nsh_vxlan_gpe_main;
- nsh_vxlan_gpe_tunnel_t *t = 0;
- vnet_main_t * vnm = ngm->vnet_main;
- vnet_hw_interface_t * hi;
- uword * p;
- u32 hw_if_index = ~0;
- u32 sw_if_index = ~0;
- int rv;
- nsh_vxlan_gpe_tunnel_key_t key, *key_copy;
- hash_pair_t *hp;
-
- key.src = a->dst.as_u32; /* decap src in key is encap dst in config */
- key.vni = clib_host_to_net_u32 (a->vni << 8);
- key.spi_si = clib_host_to_net_u32(a->nsh_hdr.spi_si);
- key.pad = 0;
-
- p = hash_get_mem (ngm->nsh_vxlan_gpe_tunnel_by_key, &key);
-
- if (a->is_add)
- {
- /* adding a tunnel: tunnel must not already exist */
- if (p)
- return VNET_API_ERROR_INVALID_VALUE;
-
- if (a->decap_next_index >= NSH_VXLAN_GPE_INPUT_N_NEXT)
- return VNET_API_ERROR_INVALID_DECAP_NEXT;
-
- pool_get_aligned (ngm->tunnels, t, CLIB_CACHE_LINE_BYTES);
- memset (t, 0, sizeof (*t));
-
- /* copy from arg structure */
-#define _(x) t->x = a->x;
- foreach_copy_field;
-#undef _
-
- /* copy from arg structure */
-#define _(x) t->nsh_hdr.x = a->nsh_hdr.x;
- foreach_copy_nshhdr_field;
-#undef _
-
- rv = nsh_vxlan_gpe_rewrite (t);
-
- if (rv)
- {
- pool_put (ngm->tunnels, t);
- return rv;
- }
-
- key_copy = clib_mem_alloc (sizeof (*key_copy));
- clib_memcpy (key_copy, &key, sizeof (*key_copy));
-
- hash_set_mem (ngm->nsh_vxlan_gpe_tunnel_by_key, key_copy,
- t - ngm->tunnels);
-
- if (vec_len (ngm->free_nsh_vxlan_gpe_tunnel_hw_if_indices) > 0)
- {
- hw_if_index = ngm->free_nsh_vxlan_gpe_tunnel_hw_if_indices
- [vec_len (ngm->free_nsh_vxlan_gpe_tunnel_hw_if_indices)-1];
- _vec_len (ngm->free_nsh_vxlan_gpe_tunnel_hw_if_indices) -= 1;
-
- hi = vnet_get_hw_interface (vnm, hw_if_index);
- hi->dev_instance = t - ngm->tunnels;
- hi->hw_instance = hi->dev_instance;
- }
- else
- {
- hw_if_index = vnet_register_interface
- (vnm, nsh_vxlan_gpe_device_class.index, t - ngm->tunnels,
- nsh_vxlan_gpe_hw_class.index, t - ngm->tunnels);
- hi = vnet_get_hw_interface (vnm, hw_if_index);
- hi->output_node_index = nsh_vxlan_gpe_encap_node.index;
- }
-
- t->hw_if_index = hw_if_index;
- t->sw_if_index = sw_if_index = hi->sw_if_index;
-
- vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
- VNET_SW_INTERFACE_FLAG_ADMIN_UP);
- }
- else
- {
- /* deleting a tunnel: tunnel must exist */
- if (!p)
- return VNET_API_ERROR_NO_SUCH_ENTRY;
-
- t = pool_elt_at_index (ngm->tunnels, p[0]);
-
- vnet_sw_interface_set_flags (vnm, t->sw_if_index, 0 /* down */);
- vec_add1 (ngm->free_nsh_vxlan_gpe_tunnel_hw_if_indices, t->hw_if_index);
-
- hp = hash_get_pair (ngm->nsh_vxlan_gpe_tunnel_by_key, &key);
- key_copy = (void *)(hp->key);
- hash_unset_mem (ngm->nsh_vxlan_gpe_tunnel_by_key, &key);
- clib_mem_free (key_copy);
-
- vec_free (t->rewrite);
- pool_put (ngm->tunnels, t);
- }
-
- if (sw_if_indexp)
- *sw_if_indexp = sw_if_index;
-
- return 0;
-}
-
-static u32 fib_index_from_fib_id (u32 fib_id)
-{
- ip4_main_t * im = &ip4_main;
- uword * p;
-
- p = hash_get (im->fib_index_by_table_id, fib_id);
- if (!p)
- return ~0;
-
- return p[0];
-}
-
-static uword unformat_decap_next (unformat_input_t * input, va_list * args)
-{
- u32 * result = va_arg (*args, u32 *);
- u32 tmp;
-
- if (unformat (input, "drop"))
- *result = NSH_VXLAN_GPE_INPUT_NEXT_DROP;
- else if (unformat (input, "ip4"))
- *result = NSH_VXLAN_GPE_INPUT_NEXT_IP4_INPUT;
- else if (unformat (input, "ip6"))
- *result = NSH_VXLAN_GPE_INPUT_NEXT_IP6_INPUT;
- else if (unformat (input, "ethernet"))
- *result = NSH_VXLAN_GPE_INPUT_NEXT_ETHERNET_INPUT;
- else if (unformat (input, "nsh-vxlan-gpe"))
- *result = NSH_VXLAN_GPE_INPUT_NEXT_NSH_VXLAN_GPE_ENCAP;
- else if (unformat (input, "%d", &tmp))
- *result = tmp;
- else
- return 0;
- return 1;
-}
-
-static clib_error_t *
-nsh_vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
-{
- unformat_input_t _line_input, * line_input = &_line_input;
- ip4_address_t src, dst;
- u8 is_add = 1;
- u8 src_set = 0;
- u8 dst_set = 0;
- u32 encap_fib_index = 0;
- u32 decap_fib_index = 0;
- u8 ver_o_c = 0;
- u8 length = 0;
- u8 md_type = 0;
- u8 next_protocol = 1; /* default: ip4 */
- u32 decap_next_index = NSH_VXLAN_GPE_INPUT_NEXT_IP4_INPUT;
- u32 spi;
- u8 spi_set = 0;
- u32 si;
- u32 vni;
- u8 vni_set = 0;
- u8 si_set = 0;
- u32 spi_si;
- u32 c1 = 0;
- u32 c2 = 0;
- u32 c3 = 0;
- u32 c4 = 0;
- u32 *tlvs = 0;
- u32 tmp;
- int rv;
- vnet_nsh_vxlan_gpe_add_del_tunnel_args_t _a, * a = &_a;
-
- /* Get a line of input. */
- if (! unformat_user (input, unformat_line_input, line_input))
- return 0;
-
- while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
- if (unformat (line_input, "del"))
- is_add = 0;
- else if (unformat (line_input, "src %U",
- unformat_ip4_address, &src))
- src_set = 1;
- else if (unformat (line_input, "dst %U",
- unformat_ip4_address, &dst))
- dst_set = 1;
- else if (unformat (line_input, "encap-vrf-id %d", &tmp))
- {
- encap_fib_index = fib_index_from_fib_id (tmp);
- if (encap_fib_index == ~0)
- return clib_error_return (0, "nonexistent encap fib id %d", tmp);
- }
- else if (unformat (line_input, "decap-vrf-id %d", &tmp))
- {
- decap_fib_index = fib_index_from_fib_id (tmp);
- if (decap_fib_index == ~0)
- return clib_error_return (0, "nonexistent decap fib id %d", tmp);
- }
- else if (unformat (line_input, "decap-next %U", unformat_decap_next,
- &decap_next_index))
- ;
- else if (unformat (line_input, "vni %d", &vni))
- vni_set = 1;
- else if (unformat (line_input, "version %d", &tmp))
- ver_o_c |= (tmp & 3) << 6;
- else if (unformat (line_input, "o-bit %d", &tmp))
- ver_o_c |= (tmp & 1) << 5;
- else if (unformat (line_input, "c-bit %d", &tmp))
- ver_o_c |= (tmp & 1) << 4;
- else if (unformat (line_input, "md-type %d", &tmp))
- md_type = tmp;
- else if (unformat(line_input, "next-ip4"))
- next_protocol = 1;
- else if (unformat(line_input, "next-ip6"))
- next_protocol = 2;
- else if (unformat(line_input, "next-ethernet"))
- next_protocol = 3;
- else if (unformat(line_input, "next-nsh"))
- next_protocol = 4;
- else if (unformat (line_input, "c1 %d", &c1))
- ;
- else if (unformat (line_input, "c2 %d", &c2))
- ;
- else if (unformat (line_input, "c3 %d", &c3))
- ;
- else if (unformat (line_input, "c4 %d", &c4))
- ;
- else if (unformat (line_input, "spi %d", &spi))
- spi_set = 1;
- else if (unformat (line_input, "si %d", &si))
- si_set = 1;
- else if (unformat (line_input, "tlv %x"))
- vec_add1 (tlvs, tmp);
- else
- return clib_error_return (0, "parse error: '%U'",
- format_unformat_error, line_input);
- }
-
- unformat_free (line_input);
-
- if (src_set == 0)
- return clib_error_return (0, "tunnel src address not specified");
-
- if (dst_set == 0)
- return clib_error_return (0, "tunnel dst address not specified");
-
- if (vni_set == 0)
- return clib_error_return (0, "vni not specified");
-
- if (spi_set == 0)
- return clib_error_return (0, "spi not specified");
-
- if (si_set == 0)
- return clib_error_return (0, "si not specified");
-
- spi_si = (spi<<8) | si;
-
- memset (a, 0, sizeof (*a));
-
- a->is_add = is_add;
-
-#define _(x) a->x = x;
- foreach_copy_field;
-#undef _
-
-#define _(x) a->nsh_hdr.x = x;
- foreach_copy_nshhdr_field;
-#undef _
-
- rv = vnet_nsh_vxlan_gpe_add_del_tunnel (a, 0 /* hw_if_indexp */);
-
- switch(rv)
- {
- case 0:
- break;
- case VNET_API_ERROR_INVALID_DECAP_NEXT:
- return clib_error_return (0, "invalid decap-next...");
-
- case VNET_API_ERROR_TUNNEL_EXIST:
- return clib_error_return (0, "tunnel already exists...");
-
- case VNET_API_ERROR_NO_SUCH_ENTRY:
- return clib_error_return (0, "tunnel does not exist...");
-
- default:
- return clib_error_return
- (0, "vnet_nsh_vxlan_gpe_add_del_tunnel returned %d", rv);
- }
-
- return 0;
-}
-
-VLIB_CLI_COMMAND (create_nsh_vxlan_gpe_tunnel_command, static) = {
- .path = "nsh vxlan tunnel",
- .short_help =
- "nsh vxlan tunnel src <ip4-addr> dst <ip4-addr>"
- " c1 <nn> c2 <nn> c3 <nn> c4 <nn> spi <nn> si <nn> vni <nn>\n"
- " [encap-vrf-id <nn>] [decap-vrf-id <nn>] [o-bit <1|0>] [c-bit <1|0>]\n"
- " [md-type <nn>][next-ip4][next-ip6][next-ethernet][next-nsh]\n"
- " [tlv <xx>][decap-next [ip4|ip6|ethernet|nsh-vxlan-gpe]][del]\n",
- .function = nsh_vxlan_gpe_add_del_tunnel_command_fn,
-};
-
-static clib_error_t *
-show_nsh_vxlan_gpe_tunnel_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
-{
- nsh_vxlan_gpe_main_t * ngm = &nsh_vxlan_gpe_main;
- nsh_vxlan_gpe_tunnel_t * t;
-
- if (pool_elts (ngm->tunnels) == 0)
- vlib_cli_output (vm, "No nsh-vxlan-gpe tunnels configured...");
-
- pool_foreach (t, ngm->tunnels,
- ({
- vlib_cli_output (vm, "%U", format_nsh_vxlan_gpe_tunnel, t);
- }));
-
- return 0;
-}
-
-VLIB_CLI_COMMAND (show_nsh_vxlan_gpe_tunnel_command, static) = {
- .path = "show nsh vxlan tunnel",
- .function = show_nsh_vxlan_gpe_tunnel_command_fn,
-};
-
-clib_error_t *nsh_vxlan_gpe_init (vlib_main_t *vm)
-{
- nsh_vxlan_gpe_main_t *ngm = &nsh_vxlan_gpe_main;
-
- ngm->vnet_main = vnet_get_main();
- ngm->vlib_main = vm;
-
- ngm->nsh_vxlan_gpe_tunnel_by_key
- = hash_create_mem (0, sizeof(nsh_vxlan_gpe_tunnel_key_t), sizeof (uword));
-
- udp_register_dst_port (vm, UDP_DST_PORT_vxlan_gpe,
- nsh_vxlan_gpe_input_node.index, 1 /* is_ip4 */);
- return 0;
-}
-
-VLIB_INIT_FUNCTION(nsh_vxlan_gpe_init);
-
diff --git a/vnet/vnet/nsh/nsh.c b/vnet/vnet/nsh/nsh.c
new file mode 100644
index 00000000000..49edf711038
--- /dev/null
+++ b/vnet/vnet/nsh/nsh.c
@@ -0,0 +1,770 @@
+/*
+ * nsh.c - nsh mapping
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/nsh/nsh.h>
+
+
+typedef struct {
+ nsh_header_t nsh_header;
+} nsh_input_trace_t;
+
+u8 * format_nsh_header (u8 * s, va_list * args)
+{
+ nsh_header_t * nsh = va_arg (*args, nsh_header_t *);
+
+ s = format (s, "nsh ver %d ", (nsh->ver_o_c>>6));
+ if (nsh->ver_o_c & NSH_O_BIT)
+ s = format (s, "O-set ");
+
+ if (nsh->ver_o_c & NSH_C_BIT)
+ s = format (s, "C-set ");
+
+ s = format (s, "len %d (%d bytes) md_type %d next_protocol %d\n",
+ nsh->length, nsh->length * 4, nsh->md_type, nsh->next_protocol);
+
+ s = format (s, " service path %d service index %d\n",
+ (nsh->nsp_nsi>>NSH_NSP_SHIFT) & NSH_NSP_MASK,
+ nsh->nsp_nsi & NSH_NSI_MASK);
+
+ s = format (s, " c1 %d c2 %d c3 %d c4 %d\n",
+ nsh->c1, nsh->c2, nsh->c3, nsh->c4);
+
+ return s;
+}
+
+u8 * format_nsh_map (u8 * s, va_list * args)
+{
+ nsh_map_t * map = va_arg (*args, nsh_map_t *);
+
+ s = format (s, "nsh entry nsp: %d nsi: %d ",
+ (map->nsp_nsi>>NSH_NSP_SHIFT) & NSH_NSP_MASK,
+ map->nsp_nsi & NSH_NSI_MASK);
+ s = format (s, "maps to nsp: %d nsi: %d ",
+ (map->mapped_nsp_nsi>>NSH_NSP_SHIFT) & NSH_NSP_MASK,
+ map->mapped_nsp_nsi & NSH_NSI_MASK);
+
+ switch (map->next_node)
+ {
+ case NSH_INPUT_NEXT_ENCAP_GRE:
+ {
+ s = format (s, "encapped by GRE intf: %d", map->sw_if_index);
+ break;
+ }
+ case NSH_INPUT_NEXT_ENCAP_VXLANGPE:
+ {
+ s = format (s, "encapped by VXLAN GPE intf: %d", map->sw_if_index);
+ break;
+ }
+ default:
+ s = format (s, "only GRE and VXLANGPE support in this rev");
+ }
+
+ return s;
+}
+
+
+#define foreach_copy_nshhdr_field \
+_(ver_o_c) \
+_(length) \
+_(md_type) \
+_(next_protocol) \
+_(nsp_nsi) \
+_(c1) \
+_(c2) \
+_(c3) \
+_(c4)
+/* Temp killing tlvs as its causing pain - fix in NSH_SFC */
+
+
+#define foreach_32bit_field \
+_(nsp_nsi) \
+_(c1) \
+_(c2) \
+_(c3) \
+_(c4)
+
+
+u8 * format_nsh_header_with_length (u8 * s, va_list * args)
+{
+ nsh_header_t * h = va_arg (*args, nsh_header_t *);
+ u32 max_header_bytes = va_arg (*args, u32);
+ u32 tmp, header_bytes;
+
+ header_bytes = sizeof (h[0]);
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ return format (s, "nsh header truncated");
+
+ tmp = clib_net_to_host_u32 (h->nsp_nsi);
+ s = format (s, " nsp %d nsi %d ",
+ (tmp>>NSH_NSP_SHIFT) & NSH_NSP_MASK,
+ tmp & NSH_NSI_MASK);
+
+ s = format (s, "c1 %u c2 %u c3 %u c4 %u",
+ clib_net_to_host_u32 (h->c1),
+ clib_net_to_host_u32 (h->c2),
+ clib_net_to_host_u32 (h->c3),
+ clib_net_to_host_u32 (h->c4));
+
+ s = format (s, "ver %d ", h->ver_o_c>>6);
+
+ if (h->ver_o_c & NSH_O_BIT)
+ s = format (s, "O-set ");
+
+ if (h->ver_o_c & NSH_C_BIT)
+ s = format (s, "C-set ");
+
+ s = format (s, "len %d (%d bytes) md_type %d next_protocol %d\n",
+ h->length, h->length * 4, h->md_type, h->next_protocol);
+ return s;
+}
+
+u8 * format_nsh_input_map_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ nsh_input_trace_t * t
+ = va_arg (*args, nsh_input_trace_t *);
+
+ s = format (s, "\n %U", format_nsh_header, &t->nsh_header,
+ (u32) sizeof (t->nsh_header) );
+
+ return s;
+}
+
+static uword
+nsh_input_map (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, * from, * to_next;
+ nsh_main_t * nm = &nsh_main;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, * b1;
+ u32 next0 = NSH_INPUT_NEXT_DROP, next1 = NSH_INPUT_NEXT_DROP;
+ uword * entry0, * entry1;
+ nsh_header_t * hdr0 = 0, * hdr1 = 0;
+ u32 nsp_nsi0, nsp_nsi1;
+ u32 error0, error1;
+ nsh_map_t * map0 = 0, * map1 = 0;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ error0 = 0;
+ error1 = 0;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ hdr0 = vlib_buffer_get_current (b0);
+ nsp_nsi0 = clib_net_to_host_u32(hdr0->nsp_nsi);
+ entry0 = hash_get_mem (nm->nsh_mapping_by_key, &nsp_nsi0);
+
+ b1 = vlib_get_buffer (vm, bi1);
+ hdr1 = vlib_buffer_get_current (b1);
+ nsp_nsi1 = clib_net_to_host_u32(hdr1->nsp_nsi);
+ entry1 = hash_get_mem (nm->nsh_mapping_by_key, &nsp_nsi1);
+
+ if (PREDICT_FALSE(entry0 == 0))
+ {
+ error0 = NSH_INPUT_ERROR_NO_MAPPING;
+ goto trace0;
+ }
+
+ if (PREDICT_FALSE(entry1 == 0))
+ {
+ error1 = NSH_INPUT_ERROR_NO_MAPPING;
+ goto trace1;
+ }
+
+ /* Entry should point to a mapping ...*/
+ map0 = pool_elt_at_index (nm->nsh_mappings, entry0[0]);
+ map1 = pool_elt_at_index (nm->nsh_mappings, entry1[0]);
+
+ if (PREDICT_FALSE(map0 == 0))
+ {
+ error0 = NSH_INPUT_ERROR_NO_MAPPING;
+ goto trace0;
+ }
+
+ if (PREDICT_FALSE(map1 == 0))
+ {
+ error1 = NSH_INPUT_ERROR_NO_MAPPING;
+ goto trace1;
+ }
+
+ entry0 = hash_get_mem (nm->nsh_entry_by_key, &map0->mapped_nsp_nsi);
+ entry1 = hash_get_mem (nm->nsh_entry_by_key, &map1->mapped_nsp_nsi);
+
+ if (PREDICT_FALSE(entry0 == 0))
+ {
+ error0 = NSH_INPUT_ERROR_NO_MAPPING;
+ goto trace0;
+ }
+ if (PREDICT_FALSE(entry1 == 0))
+ {
+ error1 = NSH_INPUT_ERROR_NO_MAPPING;
+ goto trace1;
+ }
+
+ hdr0 = pool_elt_at_index (nm->nsh_entries, entry0[0]);
+ hdr1 = pool_elt_at_index (nm->nsh_entries, entry1[0]);
+
+ /* set up things for next node to transmit ie which node to handle it and where */
+ next0 = map0->next_node;
+ next1 = map1->next_node;
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = map0->sw_if_index;
+ vnet_buffer(b1)->sw_if_index[VLIB_TX] = map1->sw_if_index;
+
+ trace0:
+ b0->error = error0 ? node->errors[error0] : 0;
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ nsh_input_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->nsh_header = *hdr0;
+ }
+
+ trace1:
+ b1->error = error1 ? node->errors[error1] : 0;
+
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ nsh_input_trace_t *tr =
+ vlib_add_trace (vm, node, b1, sizeof (*tr));
+ tr->nsh_header = *hdr1;
+ }
+
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ u32 next0 = NSH_INPUT_NEXT_DROP;
+ uword * entry0;
+ nsh_header_t * hdr0 = 0;
+ u32 nsp_nsi0;
+ u32 error0;
+ nsh_map_t * map0 = 0;
+
+ next_index = next0;
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+ error0 = 0;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ hdr0 = vlib_buffer_get_current (b0);
+ nsp_nsi0 = clib_net_to_host_u32(hdr0->nsp_nsi);
+ entry0 = hash_get_mem (nm->nsh_mapping_by_key, &nsp_nsi0);
+
+ if (PREDICT_FALSE(entry0 == 0))
+ {
+ error0 = NSH_INPUT_ERROR_NO_MAPPING;
+ goto trace00;
+ }
+
+ /* Entry should point to a mapping ...*/
+ map0 = pool_elt_at_index (nm->nsh_mappings, entry0[0]);
+
+ if (PREDICT_FALSE(map0 == 0))
+ {
+ error0 = NSH_INPUT_ERROR_NO_MAPPING;
+ goto trace00;
+ }
+
+ entry0 = hash_get_mem (nm->nsh_entry_by_key, &map0->mapped_nsp_nsi);
+
+ if (PREDICT_FALSE(entry0 == 0))
+ {
+ error0 = NSH_INPUT_ERROR_NO_MAPPING;
+ goto trace00;
+ }
+
+ hdr0 = pool_elt_at_index (nm->nsh_entries, entry0[0]);
+
+ /* set up things for next node to transmit ie which node to handle it and where */
+ next0 = map0->next_node;
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = map0->sw_if_index;
+
+ trace00:
+ b0->error = error0 ? node->errors[error0] : 0;
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ nsh_input_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->nsh_header = *hdr0;
+ }
+
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+
+ }
+
+
+ return from_frame->n_vectors;
+}
+
+
+int vnet_nsh_add_del_map (vnet_nsh_add_del_map_args_t *a)
+{
+ nsh_main_t * nm = &nsh_main;
+ nsh_map_t *map = 0;
+ u32 key, *key_copy;
+ uword * entry;
+ hash_pair_t *hp;
+
+ key = a->map.nsp_nsi;
+
+ entry = hash_get_mem (nm->nsh_mapping_by_key, &key);
+
+ if (a->is_add)
+ {
+ /* adding an entry, must not already exist */
+ if (entry)
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ pool_get_aligned (nm->nsh_mappings, map, CLIB_CACHE_LINE_BYTES);
+ memset (map, 0, sizeof (*map));
+
+ /* copy from arg structure */
+ map->nsp_nsi = a->map.nsp_nsi;
+ map->mapped_nsp_nsi = a->map.mapped_nsp_nsi;
+ map->sw_if_index = a->map.sw_if_index;
+ map->next_node = a->map.next_node;
+
+
+ key_copy = clib_mem_alloc (sizeof (*key_copy));
+ clib_memcpy (key_copy, &key, sizeof (*key_copy));
+
+ hash_set_mem (nm->nsh_mapping_by_key, key_copy,
+ map - nm->nsh_mappings);
+ }
+ else
+ {
+ if (!entry)
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ map = pool_elt_at_index (nm->nsh_mappings, entry[0]);
+ hp = hash_get_pair (nm->nsh_mapping_by_key, &key);
+ key_copy = (void *)(hp->key);
+ hash_unset_mem (nm->nsh_mapping_by_key, &key);
+ clib_mem_free (key_copy);
+
+ pool_put (nm->nsh_mappings, map);
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+nsh_add_del_map_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, * line_input = &_line_input;
+ u8 is_add = 1;
+ u32 nsp, nsi, mapped_nsp, mapped_nsi;
+ int nsp_set = 0, nsi_set = 0, mapped_nsp_set = 0, mapped_nsi_set = 0;
+ u32 next_node = ~0;
+ u32 sw_if_index = ~0; // temporary requirement to get this moved over to NSHSFC
+ vnet_nsh_add_del_map_args_t _a, * a = &_a;
+ int rv;
+
+ /* Get a line of input. */
+ if (! unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "nsp %d", &nsp))
+ nsp_set = 1;
+ else if (unformat (line_input, "nsi %d", &nsi))
+ nsi_set = 1;
+ else if (unformat (line_input, "mapped-nsp %d", &mapped_nsp))
+ mapped_nsp_set = 1;
+ else if (unformat (line_input, "mapped-nsi %d", &mapped_nsi))
+ mapped_nsi_set = 1;
+ else if (unformat (line_input, "encap-gre-intf %d", &sw_if_index))
+ next_node = NSH_INPUT_NEXT_ENCAP_GRE;
+ else if (unformat (line_input, "encap-vxlan-gpe-intf %d", &sw_if_index))
+ next_node = NSH_INPUT_NEXT_ENCAP_VXLANGPE;
+ else if (unformat (line_input, "encap-none"))
+ next_node = NSH_INPUT_NEXT_DROP; // Once moved to NSHSFC see nsh.h:foreach_nsh_input_next to handle this case
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (nsp_set == 0 || nsi_set == 0)
+ return clib_error_return (0, "nsp nsi pair required. Key: for NSH entry");
+
+ if (mapped_nsp_set == 0 || mapped_nsi_set == 0)
+ return clib_error_return (0, "mapped-nsp mapped-nsi pair required. Key: for NSH entry");
+
+ if (next_node == ~0)
+ return clib_error_return (0, "must specific action: [encap-gre-intf <nn> | encap-vxlan-gpe-intf <nn> | encap-none]");
+
+ memset (a, 0, sizeof (*a));
+
+ /* set args structure */
+ a->is_add = is_add;
+ a->map.nsp_nsi = (nsp<< NSH_NSP_SHIFT) | nsi;
+ a->map.mapped_nsp_nsi = (mapped_nsp<< NSH_NSP_SHIFT) | mapped_nsi;
+ a->map.sw_if_index = sw_if_index;
+ a->map.next_node = next_node;
+
+
+ rv = vnet_nsh_add_del_map (a);
+
+ switch(rv)
+ {
+ case 0:
+ break;
+ case VNET_API_ERROR_INVALID_VALUE:
+ return clib_error_return (0, "mapping already exists. Remove it first.");
+
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ return clib_error_return (0, "mapping does not exist.");
+
+ default:
+ return clib_error_return
+ (0, "vnet_nsh_add_del_map returned %d", rv);
+ }
+ return 0;
+}
+
+
+VLIB_CLI_COMMAND (create_nsh_map_command, static) = {
+ .path = "create nsh map",
+ .short_help =
+ "create nsh map nsp <nn> nsi <nn> [del] map-nsp <nn> map-nsi <nn> [encap-gre-intf <nn> | encap-vxlan-gpe-intf <nn> | encap-none]\n",
+ .function = nsh_add_del_map_command_fn,
+};
+
+static clib_error_t *
+show_nsh_map_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ nsh_main_t * nm = &nsh_main;
+ nsh_map_t * map;
+
+ if (pool_elts (nm->nsh_mappings) == 0)
+ vlib_cli_output (vm, "No nsh maps configured.");
+
+ pool_foreach (map, nm->nsh_mappings,
+ ({
+ vlib_cli_output (vm, "%U", format_nsh_map, map);
+ }));
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (show_nsh_map_command, static) = {
+ .path = "show nsh map",
+ .function = show_nsh_map_command_fn,
+};
+
+
+int vnet_nsh_add_del_entry (vnet_nsh_add_del_entry_args_t *a)
+{
+ nsh_main_t * nm = &nsh_main;
+ nsh_header_t *hdr = 0;
+ u32 key, *key_copy;
+ uword * entry;
+ hash_pair_t *hp;
+
+ key = a->nsh.nsp_nsi;
+
+ entry = hash_get_mem (nm->nsh_entry_by_key, &key);
+
+ if (a->is_add)
+ {
+ /* adding an entry, must not already exist */
+ if (entry)
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ pool_get_aligned (nm->nsh_entries, hdr, CLIB_CACHE_LINE_BYTES);
+ memset (hdr, 0, sizeof (*hdr));
+
+ /* copy from arg structure */
+#define _(x) hdr->x = a->nsh.x;
+ foreach_copy_nshhdr_field;
+#undef _
+
+ key_copy = clib_mem_alloc (sizeof (*key_copy));
+ clib_memcpy (key_copy, &key, sizeof (*key_copy));
+
+ hash_set_mem (nm->nsh_entry_by_key, key_copy,
+ hdr - nm->nsh_entries);
+ }
+ else
+ {
+ if (!entry)
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ hdr = pool_elt_at_index (nm->nsh_entries, entry[0]);
+ hp = hash_get_pair (nm->nsh_entry_by_key, &key);
+ key_copy = (void *)(hp->key);
+ hash_unset_mem (nm->nsh_entry_by_key, &key);
+ clib_mem_free (key_copy);
+
+ pool_put (nm->nsh_entries, hdr);
+ }
+
+ return 0;
+}
+
+
+static clib_error_t *
+nsh_add_del_entry_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, * line_input = &_line_input;
+ u8 is_add = 1;
+ u8 ver_o_c = 0;
+ u8 length = 0;
+ u8 md_type = 0;
+ u8 next_protocol = 1; /* default: ip4 */
+ u32 nsp;
+ u8 nsp_set = 0;
+ u32 nsi;
+ u8 nsi_set = 0;
+ u32 nsp_nsi;
+ u32 c1 = 0;
+ u32 c2 = 0;
+ u32 c3 = 0;
+ u32 c4 = 0;
+ u32 *tlvs = 0;
+ u32 tmp;
+ int rv;
+ vnet_nsh_add_del_entry_args_t _a, * a = &_a;
+
+ /* Get a line of input. */
+ if (! unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "version %d", &tmp))
+ ver_o_c |= (tmp & 3) << 6;
+ else if (unformat (line_input, "o-bit %d", &tmp))
+ ver_o_c |= (tmp & 1) << 5;
+ else if (unformat (line_input, "c-bit %d", &tmp))
+ ver_o_c |= (tmp & 1) << 4;
+ else if (unformat (line_input, "md-type %d", &tmp))
+ md_type = tmp;
+ else if (unformat(line_input, "next-ip4"))
+ next_protocol = 1;
+ else if (unformat(line_input, "next-ip6"))
+ next_protocol = 2;
+ else if (unformat(line_input, "next-ethernet"))
+ next_protocol = 3;
+ else if (unformat (line_input, "c1 %d", &c1))
+ ;
+ else if (unformat (line_input, "c2 %d", &c2))
+ ;
+ else if (unformat (line_input, "c3 %d", &c3))
+ ;
+ else if (unformat (line_input, "c4 %d", &c4))
+ ;
+ else if (unformat (line_input, "nsp %d", &nsp))
+ nsp_set = 1;
+ else if (unformat (line_input, "nsi %d", &nsi))
+ nsi_set = 1;
+ else if (unformat (line_input, "tlv %x"))
+ vec_add1 (tlvs, tmp);
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (nsp_set == 0)
+ return clib_error_return (0, "nsp not specified");
+
+ if (nsi_set == 0)
+ return clib_error_return (0, "nsi not specified");
+
+ if (md_type != 1)
+ return clib_error_return (0, "md-type 1 only supported at this time");
+
+ md_type = 1;
+ length = 6;
+
+ nsp_nsi = (nsp<<8) | nsi;
+
+ memset (a, 0, sizeof (*a));
+
+ a->is_add = is_add;
+
+#define _(x) a->nsh.x = x;
+ foreach_copy_nshhdr_field;
+#undef _
+
+ a->nsh.tlvs[0] = 0 ; // TODO FIXME this shouldn't be set 0 - in NSH_SFC project
+
+ rv = vnet_nsh_add_del_entry (a);
+
+ switch(rv)
+ {
+ case 0:
+ break;
+ default:
+ return clib_error_return
+ (0, "vnet_nsh_add_del_entry returned %d", rv);
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (create_nsh_entry_command, static) = {
+ .path = "create nsh entry",
+ .short_help =
+ "create nsh entry {nsp <nn> nsi <nn>} c1 <nn> c2 <nn> c3 <nn> c4 <nn>"
+ " [md-type <nn>] [tlv <xx>] [del]\n",
+ .function = nsh_add_del_entry_command_fn,
+};
+
+static clib_error_t *
+show_nsh_entry_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ nsh_main_t * nm = &nsh_main;
+ nsh_header_t * hdr;
+
+ if (pool_elts (nm->nsh_entries) == 0)
+ vlib_cli_output (vm, "No nsh entries configured.");
+
+ pool_foreach (hdr, nm->nsh_entries,
+ ({
+ vlib_cli_output (vm, "%U", format_nsh_header, hdr);
+ }));
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (show_nsh_entry_command, static) = {
+ .path = "show nsh entry",
+ .function = show_nsh_entry_command_fn,
+};
+
+static char * nsh_input_error_strings[] = {
+#define _(sym,string) string,
+ foreach_nsh_input_error
+#undef _
+};
+
+VLIB_REGISTER_NODE (nsh_input_node) = {
+ .function = nsh_input_map,
+ .name = "nsh-input",
+ .vector_size = sizeof (u32),
+ .format_trace = format_nsh_input_map_trace,
+ .format_buffer = format_nsh_header_with_length,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(nsh_input_error_strings),
+ .error_strings = nsh_input_error_strings,
+
+ .n_next_nodes = NSH_INPUT_N_NEXT,
+
+ .next_nodes = {
+#define _(s,n) [NSH_INPUT_NEXT_##s] = n,
+ foreach_nsh_input_next
+#undef _
+ },
+};
+
+clib_error_t *nsh_init (vlib_main_t *vm)
+{
+ nsh_main_t *nm = &nsh_main;
+
+ nm->vnet_main = vnet_get_main();
+ nm->vlib_main = vm;
+
+ nm->nsh_mapping_by_key
+ = hash_create_mem (0, sizeof(u32), sizeof (uword));
+
+ nm->nsh_mapping_by_mapped_key
+ = hash_create_mem (0, sizeof(u32), sizeof (uword));
+
+ nm->nsh_entry_by_key
+ = hash_create_mem (0, sizeof(u32), sizeof (uword));
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION(nsh_init);
diff --git a/vnet/vnet/nsh/nsh.h b/vnet/vnet/nsh/nsh.h
new file mode 100644
index 00000000000..d1c46121d25
--- /dev/null
+++ b/vnet/vnet/nsh/nsh.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_vnet_nsh_h
+#define included_vnet_nsh_h
+
+#include <vnet/vnet.h>
+#include <vnet/nsh/nsh_packet.h>
+#include <vnet/ip/ip4_packet.h>
+
+typedef struct {
+
+ /** Key for nsh_header_t entry: 24bit NSP 8bit NSI */
+ u32 nsp_nsi;
+
+ /** Key for nsh_header_t entry to map to. : 24bit NSP 8bit NSI
+ * This may be ~0 if next action is to decap to NSH next protocol
+ * Note the following heuristic:
+ * if nsp_nsi == mapped_nsp_nsi then use-case is like SFC SFF
+ * if nsp_nsi != mapped_nsp_nsi then use-case is like SFC SF
+ * Note: these are heuristics. Rules about NSI decrement are out of scope
+ */
+ u32 mapped_nsp_nsi;
+
+ /* vnet intfc sw_if_index */
+ u32 sw_if_index;
+
+ u32 next_node;
+
+} nsh_map_t;
+
+typedef struct {
+ nsh_map_t map;
+ u32 is_add;
+} vnet_nsh_add_del_map_args_t;
+
+typedef struct {
+ u8 is_add;
+ nsh_header_t nsh;
+} vnet_nsh_add_del_entry_args_t;
+
+typedef struct {
+ /* vector of nsh_header entry instances */
+ nsh_header_t *nsh_entries;
+
+ /* hash lookup nsh header by key: {u32: nsp_nsi} */
+ uword * nsh_entry_by_key;
+
+ /* vector of nsh_mappings */
+ nsh_map_t *nsh_mappings;
+
+ /* hash lookup nsh mapping by key: {u32: nsp_nsi} */
+ uword * nsh_mapping_by_key;
+ uword * nsh_mapping_by_mapped_key; // for use in NSHSFC
+
+ /* convenience */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} nsh_main_t;
+
+nsh_main_t nsh_main;
+
+u8 * format_nsh_input_map_trace (u8 * s, va_list * args);
+u8 * format_nsh_header_with_length (u8 * s, va_list * args);
+
+/* Statistics (not really errors) */
+#define foreach_nsh_input_error \
+_(MAPPED, "NSH header found and mapped") \
+_(NO_MAPPING, "no mapping for nsh key") \
+_(INVALID_NEXT_PROTOCOL, "invalid next protocol") \
+
+typedef enum {
+#define _(sym,str) NSH_INPUT_ERROR_##sym,
+ foreach_nsh_input_error
+#undef _
+ NSH_INPUT_N_ERROR,
+
+} nsh_input_error_t;
+
+#define foreach_nsh_input_next \
+ _(DROP, "error-drop") \
+ _(ENCAP_GRE, "gre-input" ) \
+ _(ENCAP_VXLANGPE, "vxlan-gpe-encap" ) \
+/* /\* TODO once moved to Project:NSH_SFC *\/ */
+ /* _(ENCAP_ETHERNET, "*** TX TO ETHERNET ***") \ */
+/* _(DECAP_ETHERNET_LOOKUP, "ethernet-input" ) \ */
+/* _(DECAP_IP4_INPUT, "ip4-input") \ */
+/* _(DECAP_IP6_INPUT, "ip6-input" ) \ */
+
+typedef enum {
+#define _(s,n) NSH_INPUT_NEXT_##s,
+ foreach_nsh_input_next
+#undef _
+ NSH_INPUT_N_NEXT,
+} nsh_input_next_t;
+
+#endif /* included_vnet_nsh_h */
diff --git a/vnet/vnet/nsh/nsh_error.def b/vnet/vnet/nsh/nsh_error.def
index 532b02a6e89..c54e3b895c8 100644
--- a/vnet/vnet/nsh/nsh_error.def
+++ b/vnet/vnet/nsh/nsh_error.def
@@ -12,6 +12,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-nsh_gre_error (DECAPSULATED, "good packets decapsulated")
-nsh_gre_error (NO_SUCH_TUNNEL, "no such tunnel packets")
-nsh_gre_error (INVALID_NEXT_PROTOCOL, "invalid next protocol")
+nsh_input_error (DECAPSULATED, "good packets decapsulated")
+nsh_input_error (NO_MAPPING, "no mapping for nsh key")
+nsh_input_error (INVALID_NEXT_PROTOCOL, "invalid next protocol") \ No newline at end of file
diff --git a/vnet/vnet/nsh/nsh_gre_error.def b/vnet/vnet/nsh/nsh_gre_error.def
new file mode 100644
index 00000000000..45d8ef424b5
--- /dev/null
+++ b/vnet/vnet/nsh/nsh_gre_error.def
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+nsh_gre_error (DECAPSULATED, "good packets decapsulated")
+nsh_gre_error (NO_SUCH_TUNNEL, "no such tunnel packets")
+nsh_gre_error (INVALID_NEXT_PROTOCOL, "invalid next protocol") \ No newline at end of file
diff --git a/vnet/vnet/nsh/nsh_packet.h b/vnet/vnet/nsh/nsh_packet.h
index 87d46a93b6d..cbe4f1e4193 100644
--- a/vnet/vnet/nsh/nsh_packet.h
+++ b/vnet/vnet/nsh/nsh_packet.h
@@ -73,20 +73,21 @@ typedef CLIB_PACKED(struct {
u8 length;
u8 md_type;
u8 next_protocol;
- u32 spi_si;
+ u32 nsp_nsi; // nsp 24 bits, nsi 8 bits
/* Context headers, always present */
u32 c1; u32 c2; u32 c3; u32 c4;
/* Optional variable length metadata */
- u32 * tlvs;
+ u32 tlvs[0];
}) nsh_header_t;
+#define NSH_VERSION (0<<6)
#define NSH_O_BIT (1<<5)
#define NSH_C_BIT (1<<4)
/* Network byte order shift / mask */
-#define NSH_SINDEX_MASK 0xFF
-#define NSH_SPI_MASK (0x00FFFFFF)
-#define NSH_SPI_SHIFT 8
+#define NSH_NSI_MASK 0xFF
+#define NSH_NSP_MASK (0x00FFFFFF)
+#define NSH_NSP_SHIFT 8
#endif /* included_vnet_nsh_packet_h */
diff --git a/vnet/vnet/nsh-gre/decap.c b/vnet/vnet/vxlan-gpe/decap.c
index c10b11b3070..aed5857d0b1 100644
--- a/vnet/vnet/nsh-gre/decap.c
+++ b/vnet/vnet/vxlan-gpe/decap.c
@@ -1,5 +1,5 @@
/*
- * nsh.c: nsh packet processing
+ * decap.c - decapsulate VXLAN GPE
*
* Copyright (c) 2013 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,92 +17,60 @@
#include <vlib/vlib.h>
#include <vnet/pg/pg.h>
-#include <vnet/nsh-gre/nsh_gre.h>
-#include <vnet/nsh/nsh_packet.h>
-
-vlib_node_registration_t nsh_input_node;
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
typedef struct {
u32 next_index;
u32 tunnel_index;
u32 error;
- nsh_header_t h;
-} nsh_rx_trace_t;
-
+} vxlan_gpe_rx_trace_t;
-u8 * format_nsh_header_with_length (u8 * s, va_list * args)
-{
- nsh_header_t * h = va_arg (*args, nsh_header_t *);
- u32 max_header_bytes = va_arg (*args, u32);
- u32 tmp, header_bytes;
-
- header_bytes = sizeof (h[0]);
- if (max_header_bytes != 0 && header_bytes > max_header_bytes)
- return format (s, "gre-nsh header truncated");
-
- s = format (s, "ver %d ", h->ver_o_c>>6);
-
- if (h->ver_o_c & NSH_O_BIT)
- s = format (s, "O-set ");
-
- if (h->ver_o_c & NSH_C_BIT)
- s = format (s, "C-set ");
-
- s = format (s, "len %d (%d bytes) md_type %d next_protocol %d\n",
- h->length, h->length * 4, h->md_type, h->next_protocol);
-
- tmp = clib_net_to_host_u32 (h->spi_si);
-
- s = format (s, " spi %d si %d ",
- (tmp>>NSH_SPI_SHIFT) & NSH_SPI_MASK,
- tmp & NSH_SINDEX_MASK);
-
- s = format (s, "c1 %u c2 %u c3 %u c4 %u",
- clib_net_to_host_u32 (h->c1),
- clib_net_to_host_u32 (h->c2),
- clib_net_to_host_u32 (h->c3),
- clib_net_to_host_u32 (h->c4));
-
- return s;
-}
-
-
-u8 * format_nsh_rx_trace (u8 * s, va_list * args)
+static u8 * format_vxlan_gpe_rx_trace (u8 * s, va_list * args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- nsh_rx_trace_t * t = va_arg (*args, nsh_rx_trace_t *);
+ vxlan_gpe_rx_trace_t * t = va_arg (*args, vxlan_gpe_rx_trace_t *);
if (t->tunnel_index != ~0)
{
- s = format (s, "NSH: tunnel %d next %d error %d", t->tunnel_index,
+ s = format (s, "VXLAN-GPE: tunnel %d next %d error %d", t->tunnel_index,
t->next_index, t->error);
}
else
{
- s = format (s, "NSH: no tunnel next %d error %d\n", t->next_index,
+ s = format (s, "VXLAN-GPE: no tunnel next %d error %d\n", t->next_index,
t->error);
}
- s = format (s, "\n %U", format_nsh_header_with_length, &t->h,
- (u32) sizeof (t->h) /* max size */);
+ return s;
+}
+
+
+static u8 * format_vxlan_gpe_with_length (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+
+
return s;
}
static uword
-nsh_gre_input (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
+vxlan_gpe_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
{
u32 n_left_from, next_index, * from, * to_next;
- nsh_gre_main_t * ngm = &nsh_gre_main;
+ vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
vnet_main_t * vnm = ngm->vnet_main;
vnet_interface_main_t * im = &vnm->interface_main;
u32 last_tunnel_index = ~0;
- u64 last_key = ~0ULL;
+ vxlan_gpe_tunnel_key_t last_key;
u32 pkts_decapsulated = 0;
u32 cpu_index = os_get_cpu_number();
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
+ memset (&last_key, 0xff, sizeof (last_key));
+
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
@@ -122,11 +90,11 @@ nsh_gre_input (vlib_main_t * vm,
u32 bi0, bi1;
vlib_buffer_t * b0, * b1;
u32 next0, next1;
- nsh_header_t * h0, * h1;
+ ip4_vxlan_gpe_header_t * iuvn0, * iuvn1;
uword * p0, * p1;
u32 tunnel_index0, tunnel_index1;
- nsh_gre_tunnel_t * t0, * t1;
- u64 key0, key1;
+ vxlan_gpe_tunnel_t * t0, * t1;
+ vxlan_gpe_tunnel_key_t key0, key1;
u32 error0, error1;
u32 sw_if_index0, sw_if_index1, len0, len1;
@@ -156,35 +124,56 @@ nsh_gre_input (vlib_main_t * vm,
b0 = vlib_get_buffer (vm, bi0);
b1 = vlib_get_buffer (vm, bi1);
- h0 = vlib_buffer_get_current (b0);
- h1 = vlib_buffer_get_current (b1);
+ /* udp leaves current_data pointing at the vxlan header */
+ vlib_buffer_advance
+ (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
+ vlib_buffer_advance
+ (b1, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
- /* gre stashed the src ip4 address for us... */
- key0 = (((u64)(vnet_buffer(b0)->gre.src))<<32) | h0->spi_si;
- key1 = (((u64)(vnet_buffer(b1)->gre.src))<<32) | h1->spi_si;
+ iuvn0 = vlib_buffer_get_current (b0);
+ iuvn1 = vlib_buffer_get_current (b1);
- /* "pop" nsh header */
- vlib_buffer_advance (b0, sizeof (*h0));
- vlib_buffer_advance (b1, sizeof (*h1));
+ /* pop (ip, udp, vxlan) */
+ vlib_buffer_advance (b0, sizeof (*iuvn0));
+ vlib_buffer_advance (b1, sizeof (*iuvn1));
tunnel_index0 = ~0;
tunnel_index1 = ~0;
error0 = 0;
error1 = 0;
- next0 = NSH_GRE_INPUT_NEXT_DROP;
- next1 = NSH_GRE_INPUT_NEXT_DROP;
- if (PREDICT_FALSE(key0 != last_key))
+ next0 = (iuvn0->vxlan.protocol < VXLAN_GPE_INPUT_N_NEXT) ? iuvn0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
+ next1 = (iuvn1->vxlan.protocol < VXLAN_GPE_INPUT_N_NEXT) ? iuvn1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
+
+
+
+
+ key0.local = iuvn0->ip4.dst_address.as_u32;
+ key1.local = iuvn1->ip4.dst_address.as_u32;
+
+ key0.remote = iuvn0->ip4.src_address.as_u32;
+ key1.remote = iuvn1->ip4.src_address.as_u32;
+
+ key0.vni = iuvn0->vxlan.vni_res;
+ key1.vni = iuvn1->vxlan.vni_res;
+
+ key0.pad = 0;
+ key1.pad = 0;
+
+ /* Processing for key0 */
+ if (PREDICT_FALSE ((key0.as_u64[0] != last_key.as_u64[0])
+ || (key0.as_u64[1] != last_key.as_u64[1])))
{
- p0 = hash_get (ngm->nsh_gre_tunnel_by_src_address, key0);
+ p0 = hash_get_mem (ngm->vxlan_gpe_tunnel_by_key, &key0);
if (p0 == 0)
{
- error0 = NSH_GRE_ERROR_NO_SUCH_TUNNEL;
+ error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
goto trace0;
}
- last_key = key0;
+ last_key.as_u64[0] = key0.as_u64[0];
+ last_key.as_u64[1] = key0.as_u64[1];
tunnel_index0 = last_tunnel_index = p0[0];
}
else
@@ -192,17 +181,18 @@ nsh_gre_input (vlib_main_t * vm,
t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0);
- next0 = t0->decap_next_index;
+ next0 = t0->protocol;
+
sw_if_index0 = t0->sw_if_index;
len0 = vlib_buffer_length_in_chain(vm, b0);
/* Required to make the l2 tag push / pop code work on l2 subifs */
vnet_update_l2_len (b0);
-
- next0 = t0->decap_next_index;
-
- /* ip[46] lookup in the configured FIB, otherwise an opaque */
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
+
+ /*
+ * ip[46] lookup in the configured FIB
+ */
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
pkts_decapsulated++;
stats_n_packets += 1;
@@ -224,27 +214,30 @@ nsh_gre_input (vlib_main_t * vm,
trace0:
b0->error = error0 ? node->errors[error0] : 0;
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
- nsh_rx_trace_t *tr = vlib_add_trace (vm, node,
- b0, sizeof (*tr));
+ vxlan_gpe_rx_trace_t *tr
+ = vlib_add_trace (vm, node, b0, sizeof (*tr));
tr->next_index = next0;
tr->error = error0;
tr->tunnel_index = tunnel_index0;
- tr->h = h0[0];
}
- if (PREDICT_FALSE(key1 != last_key))
+
+ /* Processing for key1 */
+ if (PREDICT_FALSE ((key1.as_u64[0] != last_key.as_u64[0])
+ || (key1.as_u64[1] != last_key.as_u64[1])))
{
- p1 = hash_get (ngm->nsh_gre_tunnel_by_src_address, key1);
+ p1 = hash_get_mem (ngm->vxlan_gpe_tunnel_by_key, &key1);
if (p1 == 0)
{
- error1 = NSH_GRE_ERROR_NO_SUCH_TUNNEL;
+ error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
goto trace1;
}
- last_key = key1;
+ last_key.as_u64[0] = key1.as_u64[0];
+ last_key.as_u64[1] = key1.as_u64[1];
tunnel_index1 = last_tunnel_index = p1[0];
}
else
@@ -252,22 +245,24 @@ nsh_gre_input (vlib_main_t * vm,
t1 = pool_elt_at_index (ngm->tunnels, tunnel_index1);
- next1 = t1->decap_next_index;
+ next1 = t1->protocol;
sw_if_index1 = t1->sw_if_index;
len1 = vlib_buffer_length_in_chain(vm, b1);
/* Required to make the l2 tag push / pop code work on l2 subifs */
vnet_update_l2_len (b1);
- next1 = t1->decap_next_index;
+ /*
+ * ip[46] lookup in the configured FIB
+ */
+ vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
- /* ip[46] lookup in the configured FIB, otherwise an opaque */
- vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
pkts_decapsulated++;
stats_n_packets += 1;
stats_n_bytes += len1;
- /* Batch stats increment on the same nsh-gre tunnel so counter
+
+ /* Batch stats increment on the same vxlan tunnel so counter
is not incremented per packet */
if (PREDICT_FALSE(sw_if_index1 != stats_sw_if_index))
{
@@ -286,14 +281,13 @@ nsh_gre_input (vlib_main_t * vm,
trace1:
b1->error = error1 ? node->errors[error1] : 0;
- if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
{
- nsh_rx_trace_t *tr = vlib_add_trace (vm, node,
- b1, sizeof (*tr));
+ vxlan_gpe_rx_trace_t *tr
+ = vlib_add_trace (vm, node, b1, sizeof (*tr));
tr->next_index = next1;
tr->error = error1;
tr->tunnel_index = tunnel_index1;
- tr->h = h1[0];
}
vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
@@ -306,11 +300,11 @@ nsh_gre_input (vlib_main_t * vm,
u32 bi0;
vlib_buffer_t * b0;
u32 next0;
- nsh_header_t * h0;
+ ip4_vxlan_gpe_header_t * iuvn0;
uword * p0;
u32 tunnel_index0;
- nsh_gre_tunnel_t * t0;
- u64 key0;
+ vxlan_gpe_tunnel_t * t0;
+ vxlan_gpe_tunnel_key_t key0;
u32 error0;
u32 sw_if_index0, len0;
@@ -322,29 +316,38 @@ nsh_gre_input (vlib_main_t * vm,
n_left_to_next -= 1;
b0 = vlib_get_buffer (vm, bi0);
- h0 = vlib_buffer_get_current (b0);
- /* gre stashed the src ip4 address for us... */
- key0 = (((u64)(vnet_buffer(b0)->gre.src))<<32) | h0->spi_si;
+ /* udp leaves current_data pointing at the vxlan header */
+ vlib_buffer_advance
+ (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
- /* "pop" nsh header */
- vlib_buffer_advance (b0, sizeof (*h0));
+ iuvn0 = vlib_buffer_get_current (b0);
+
+ /* pop (ip, udp, vxlan) */
+ vlib_buffer_advance (b0, sizeof (*iuvn0));
tunnel_index0 = ~0;
error0 = 0;
- next0 = NSH_GRE_INPUT_NEXT_DROP;
+ next0 = (iuvn0->vxlan.protocol < VXLAN_GPE_INPUT_N_NEXT) ? iuvn0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
- if (PREDICT_FALSE(key0 != last_key))
- {
- p0 = hash_get (ngm->nsh_gre_tunnel_by_src_address, key0);
+ key0.local = iuvn0->ip4.dst_address.as_u32;
+ key0.remote = iuvn0->ip4.src_address.as_u32;
+ key0.vni = iuvn0->vxlan.vni_res;
+ key0.pad = 0;
+ if (PREDICT_FALSE ((key0.as_u64[0] != last_key.as_u64[0])
+ || (key0.as_u64[1] != last_key.as_u64[1])))
+ {
+ p0 = hash_get_mem (ngm->vxlan_gpe_tunnel_by_key, &key0);
+
if (p0 == 0)
{
- error0 = NSH_GRE_ERROR_NO_SUCH_TUNNEL;
+ error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
goto trace00;
}
- last_key = key0;
+ last_key.as_u64[0] = key0.as_u64[0];
+ last_key.as_u64[1] = key0.as_u64[1];
tunnel_index0 = last_tunnel_index = p0[0];
}
else
@@ -352,23 +355,24 @@ nsh_gre_input (vlib_main_t * vm,
t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0);
- next0 = t0->decap_next_index;
- sw_if_index0 = t0->sw_if_index;
+ next0 = t0->protocol;
+
+ sw_if_index0 = t0->sw_if_index;
len0 = vlib_buffer_length_in_chain(vm, b0);
/* Required to make the l2 tag push / pop code work on l2 subifs */
vnet_update_l2_len (b0);
- next0 = t0->decap_next_index;
+ /*
+ * ip[46] lookup in the configured FIB
+ */
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
- /* ip[46] lookup in the configured FIB, otherwise an opaque */
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
pkts_decapsulated ++;
-
stats_n_packets += 1;
stats_n_bytes += len0;
- /* Batch stats increment on the same nsh-gre tunnel so counter
+ /* Batch stats increment on the same vxlan-gpe tunnel so counter
is not incremented per packet */
if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
{
@@ -386,14 +390,13 @@ nsh_gre_input (vlib_main_t * vm,
trace00:
b0->error = error0 ? node->errors[error0] : 0;
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
- nsh_rx_trace_t *tr = vlib_add_trace (vm, node,
- b0, sizeof (*tr));
+ vxlan_gpe_rx_trace_t *tr
+ = vlib_add_trace (vm, node, b0, sizeof (*tr));
tr->next_index = next0;
tr->error = error0;
tr->tunnel_index = tunnel_index0;
- tr->h = h0[0];
}
vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
to_next, n_left_to_next,
@@ -402,8 +405,8 @@ nsh_gre_input (vlib_main_t * vm,
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- vlib_node_increment_counter (vm, nsh_gre_input_node.index,
- NSH_GRE_ERROR_DECAPSULATED,
+ vlib_node_increment_counter (vm, vxlan_gpe_input_node.index,
+ VXLAN_GPE_ERROR_DECAPSULATED,
pkts_decapsulated);
/* Increment any remaining batch stats */
if (stats_n_packets)
@@ -416,30 +419,32 @@ nsh_gre_input (vlib_main_t * vm,
return from_frame->n_vectors;
}
-static char * nsh_error_strings[] = {
-#define nsh_gre_error(n,s) s,
-#include <vnet/nsh/nsh_error.def>
-#undef nsh_gre_error
+static char * vxlan_gpe_error_strings[] = {
+#define vxlan_gpe_error(n,s) s,
+#include <vnet/vxlan-gpe/vxlan_gpe_error.def>
+#undef vxlan_gpe_error
#undef _
};
-VLIB_REGISTER_NODE (nsh_gre_input_node) = {
- .function = nsh_gre_input,
- .name = "nsh-gre-input",
+VLIB_REGISTER_NODE (vxlan_gpe_input_node) = {
+ .function = vxlan_gpe_input,
+ .name = "vxlan-gpe-input",
/* Takes a vector of packets. */
.vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
+ .error_strings = vxlan_gpe_error_strings,
- .n_errors = NSH_GRE_N_ERROR,
- .error_strings = nsh_error_strings,
-
- .n_next_nodes = NSH_GRE_INPUT_N_NEXT,
+ .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
.next_nodes = {
-#define _(s,n) [NSH_GRE_INPUT_NEXT_##s] = n,
- foreach_nsh_gre_input_next
+#define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
+ foreach_vxlan_gpe_input_next
#undef _
},
- .format_buffer = format_nsh_header_with_length,
- .format_trace = format_nsh_rx_trace,
- // $$$$ .unformat_buffer = unformat_nsh_gre_header,
+ .format_buffer = format_vxlan_gpe_with_length,
+ .format_trace = format_vxlan_gpe_rx_trace,
+ // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
};
+
+
diff --git a/vnet/vnet/nsh-vxlan-gpe/encap.c b/vnet/vnet/vxlan-gpe/encap.c
index af520b2f8f0..3ffe2a62db2 100644
--- a/vnet/vnet/nsh-vxlan-gpe/encap.c
+++ b/vnet/vnet/vxlan-gpe/encap.c
@@ -17,43 +17,44 @@
#include <vnet/vnet.h>
#include <vnet/ip/ip.h>
#include <vnet/ethernet/ethernet.h>
-#include <vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
/* Statistics (not really errors) */
-#define foreach_nsh_vxlan_gpe_encap_error \
+#define foreach_vxlan_gpe_encap_error \
_(ENCAPSULATED, "good packets encapsulated")
-static char * nsh_vxlan_gpe_encap_error_strings[] = {
+static char * vxlan_gpe_encap_error_strings[] = {
#define _(sym,string) string,
- foreach_nsh_vxlan_gpe_encap_error
+ foreach_vxlan_gpe_encap_error
#undef _
};
typedef enum {
-#define _(sym,str) NSH_VXLAN_GPE_ENCAP_ERROR_##sym,
- foreach_nsh_vxlan_gpe_encap_error
+#define _(sym,str) VXLAN_GPE_ENCAP_ERROR_##sym,
+ foreach_vxlan_gpe_encap_error
#undef _
- NSH_VXLAN_GPE_ENCAP_N_ERROR,
-} nsh_vxlan_gpe_encap_error_t;
+ VXLAN_GPE_ENCAP_N_ERROR,
+} vxlan_gpe_encap_error_t;
typedef enum {
- NSH_VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP,
- NSH_VXLAN_GPE_ENCAP_NEXT_DROP,
- NSH_VXLAN_GPE_ENCAP_N_NEXT,
-} nsh_vxlan_gpe_encap_next_t;
+ VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP,
+ VXLAN_GPE_ENCAP_NEXT_DROP,
+ VXLAN_GPE_ENCAP_N_NEXT
+} vxlan_gpe_encap_next_t;
typedef struct {
u32 tunnel_index;
-} nsh_vxlan_gpe_encap_trace_t;
+} vxlan_gpe_encap_trace_t;
-u8 * format_nsh_vxlan_gpe_encap_trace (u8 * s, va_list * args)
+
+u8 * format_vxlan_gpe_encap_trace (u8 * s, va_list * args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- nsh_vxlan_gpe_encap_trace_t * t
- = va_arg (*args, nsh_vxlan_gpe_encap_trace_t *);
+ vxlan_gpe_encap_trace_t * t
+ = va_arg (*args, vxlan_gpe_encap_trace_t *);
- s = format (s, "NSH-VXLAN-ENCAP: tunnel %d", t->tunnel_index);
+ s = format (s, "VXLAN-GPE-ENCAP: tunnel %d", t->tunnel_index);
return s;
}
@@ -61,12 +62,12 @@ u8 * format_nsh_vxlan_gpe_encap_trace (u8 * s, va_list * args)
_(0) _(1) _(2) _(3) _(4) _(5) _(6)
static uword
-nsh_vxlan_gpe_encap (vlib_main_t * vm,
+vxlan_gpe_encap (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
{
u32 n_left_from, next_index, * from, * to_next;
- nsh_vxlan_gpe_main_t * ngm = &nsh_vxlan_gpe_main;
+ vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
vnet_main_t * vnm = ngm->vnet_main;
vnet_interface_main_t * im = &vnm->interface_main;
u32 pkts_encapsulated = 0;
@@ -92,8 +93,8 @@ nsh_vxlan_gpe_encap (vlib_main_t * vm,
{
u32 bi0, bi1;
vlib_buffer_t * b0, * b1;
- u32 next0 = NSH_VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
- u32 next1 = NSH_VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
+ u32 next0 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
+ u32 next1 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
u32 sw_if_index0, sw_if_index1, len0, len1;
vnet_hw_interface_t * hi0, * hi1;
ip4_header_t * ip0, * ip1;
@@ -102,7 +103,7 @@ nsh_vxlan_gpe_encap (vlib_main_t * vm,
u64 * copy_src1, * copy_dst1;
u32 * copy_src_last0, * copy_dst_last0;
u32 * copy_src_last1, * copy_dst_last1;
- nsh_vxlan_gpe_tunnel_t * t0, * t1;
+ vxlan_gpe_tunnel_t * t0, * t1;
u16 new_l0, new_l1;
ip_csum_t sum0, sum1;
@@ -158,9 +159,9 @@ nsh_vxlan_gpe_encap (vlib_main_t * vm,
copy_dst1 = (u64 *) ip1;
copy_src1 = (u64 *) t1->rewrite;
- ASSERT (sizeof (ip4_vxlan_gpe_and_nsh_header_t) == 60);
+ ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36);
- /* Copy first 56 octets 8-bytes at a time */
+ /* Copy first 36 octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
foreach_fixed_header_offset;
#undef _
@@ -259,14 +260,14 @@ nsh_vxlan_gpe_encap (vlib_main_t * vm,
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
- nsh_vxlan_gpe_encap_trace_t *tr =
+ vxlan_gpe_encap_trace_t *tr =
vlib_add_trace (vm, node, b0, sizeof (*tr));
tr->tunnel_index = t0 - ngm->tunnels;
}
if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
{
- nsh_vxlan_gpe_encap_trace_t *tr =
+ vxlan_gpe_encap_trace_t *tr =
vlib_add_trace (vm, node, b1, sizeof (*tr));
tr->tunnel_index = t1 - ngm->tunnels;
}
@@ -280,14 +281,14 @@ nsh_vxlan_gpe_encap (vlib_main_t * vm,
{
u32 bi0;
vlib_buffer_t * b0;
- u32 next0 = NSH_VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
- u32 sw_if_index0, len0;
+ u32 next0 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
+ u32 sw_if_index0, len0;
vnet_hw_interface_t * hi0;
ip4_header_t * ip0;
udp_header_t * udp0;
u64 * copy_src0, * copy_dst0;
u32 * copy_src_last0, * copy_dst_last0;
- nsh_vxlan_gpe_tunnel_t * t0;
+ vxlan_gpe_tunnel_t * t0;
u16 new_l0;
ip_csum_t sum0;
@@ -301,7 +302,7 @@ nsh_vxlan_gpe_encap (vlib_main_t * vm,
b0 = vlib_get_buffer (vm, bi0);
/* 1-wide cache? */
- sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
hi0 = vnet_get_sup_hw_interface
(vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
@@ -317,9 +318,9 @@ nsh_vxlan_gpe_encap (vlib_main_t * vm,
copy_dst0 = (u64 *) ip0;
copy_src0 = (u64 *) t0->rewrite;
- ASSERT (sizeof (ip4_vxlan_gpe_and_nsh_header_t) == 60);
+ ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36);
- /* Copy first 56 octets 8-bytes at a time */
+ /* Copy first 36 octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
foreach_fixed_header_offset;
#undef _
@@ -354,32 +355,33 @@ nsh_vxlan_gpe_encap (vlib_main_t * vm,
/* Reset to look up tunnel partner in the configured FIB */
vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
- vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
+ vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
pkts_encapsulated ++;
- len0 = vlib_buffer_length_in_chain(vm, b0);
- stats_n_packets += 1;
- stats_n_bytes += len0;
-
- /* Batch stats increment on the same vxlan tunnel so counter is not
- incremented per packet. Note stats are still incremented for deleted
- and admin-down tunnel where packets are dropped. It is not worthwhile
- to check for this rare case and affect normal path performance. */
- if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index)) {
- stats_n_packets -= 1;
- stats_n_bytes -= len0;
- if (stats_n_packets)
- vlib_increment_combined_counter(
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
- cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
- stats_n_packets = 1;
- stats_n_bytes = len0;
- stats_sw_if_index = sw_if_index0;
- }
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
- {
- nsh_vxlan_gpe_encap_trace_t *tr =
- vlib_add_trace (vm, node, b0, sizeof (*tr));
+ len0 = vlib_buffer_length_in_chain(vm, b0);
+ stats_n_packets += 1;
+ stats_n_bytes += len0;
+
+ /* Batch stats increment on the same vxlan tunnel so counter is not
+ * incremented per packet. Note stats are still incremented for deleted
+ * and admin-down tunnel where packets are dropped. It is not worthwhile
+ * to check for this rare case and affect normal path performance. */
+ if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
+ {
+ stats_n_packets -= 1;
+ stats_n_bytes -= len0;
+ if (stats_n_packets)
+ vlib_increment_combined_counter(
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+ cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ stats_n_packets = 1;
+ stats_n_bytes = len0;
+ stats_sw_if_index = sw_if_index0;
+ }
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_encap_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
tr->tunnel_index = t0 - ngm->tunnels;
}
vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
@@ -390,33 +392,34 @@ nsh_vxlan_gpe_encap (vlib_main_t * vm,
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
vlib_node_increment_counter (vm, node->node_index,
- NSH_VXLAN_GPE_ENCAP_ERROR_ENCAPSULATED,
+ VXLAN_GPE_ENCAP_ERROR_ENCAPSULATED,
pkts_encapsulated);
/* Increment any remaining batch stats */
- if (stats_n_packets) {
- vlib_increment_combined_counter(
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, cpu_index,
- stats_sw_if_index, stats_n_packets, stats_n_bytes);
- node->runtime_data[0] = stats_sw_if_index;
- }
+ if (stats_n_packets) {
+ vlib_increment_combined_counter(
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, cpu_index,
+ stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ node->runtime_data[0] = stats_sw_if_index;
+ }
return from_frame->n_vectors;
}
-VLIB_REGISTER_NODE (nsh_vxlan_gpe_encap_node) = {
- .function = nsh_vxlan_gpe_encap,
- .name = "nsh-vxlan-gpe-encap",
+VLIB_REGISTER_NODE (vxlan_gpe_encap_node) = {
+ .function = vxlan_gpe_encap,
+ .name = "vxlan-gpe-encap",
.vector_size = sizeof (u32),
- .format_trace = format_nsh_vxlan_gpe_encap_trace,
+ .format_trace = format_vxlan_gpe_encap_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(nsh_vxlan_gpe_encap_error_strings),
- .error_strings = nsh_vxlan_gpe_encap_error_strings,
+ .n_errors = ARRAY_LEN(vxlan_gpe_encap_error_strings),
+ .error_strings = vxlan_gpe_encap_error_strings,
- .n_next_nodes = NSH_VXLAN_GPE_ENCAP_N_NEXT,
+ .n_next_nodes = VXLAN_GPE_ENCAP_N_NEXT,
.next_nodes = {
- [NSH_VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
- [NSH_VXLAN_GPE_ENCAP_NEXT_DROP] = "error-drop",
+ [VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
+ [VXLAN_GPE_ENCAP_NEXT_DROP] = "error-drop",
},
};
+
diff --git a/vnet/vnet/nsh-vxlan-gpe/vxlan-gpe-rfc.txt b/vnet/vnet/vxlan-gpe/vxlan-gpe-rfc.txt
index 35cee50f573..35cee50f573 100644
--- a/vnet/vnet/nsh-vxlan-gpe/vxlan-gpe-rfc.txt
+++ b/vnet/vnet/vxlan-gpe/vxlan-gpe-rfc.txt
diff --git a/vnet/vnet/vxlan-gpe/vxlan_gpe.c b/vnet/vnet/vxlan-gpe/vxlan_gpe.c
new file mode 100644
index 00000000000..ef242d0bb8f
--- /dev/null
+++ b/vnet/vnet/vxlan-gpe/vxlan_gpe.c
@@ -0,0 +1,467 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+
+vxlan_gpe_main_t vxlan_gpe_main;
+
+u8 * format_vxlan_gpe_tunnel (u8 * s, va_list * args)
+{
+ vxlan_gpe_tunnel_t * t = va_arg (*args, vxlan_gpe_tunnel_t *);
+ vxlan_gpe_main_t * gm = &vxlan_gpe_main;
+
+ s = format (s, "[%d] local: %U remote: %U ",
+ t - gm->tunnels,
+ format_ip4_address, &t->local,
+ format_ip4_address, &t->remote);
+
+ switch (t->protocol)
+ {
+ case VXLAN_GPE_PROTOCOL_IP4:
+ s = format (s, "next-protocol ip4");
+ case VXLAN_GPE_PROTOCOL_IP6:
+ s = format (s, "next-protocol ip6");
+ case VXLAN_GPE_PROTOCOL_ETHERNET:
+ s = format (s, "next-protocol ethernet");
+ case VXLAN_GPE_PROTOCOL_NSH:
+ s = format (s, "next-protocol nsh");
+ default:
+ s = format (s, "next-protocol unknown %d", t->protocol);
+ }
+
+ s = format (s, " fibs: (encap %d, decap %d)",
+ t->encap_fib_index,
+ t->decap_fib_index);
+
+ s = format (s, " vxlan VNI %d ", t->vni);
+
+ return s;
+}
+
+static u8 * format_vxlan_gpe_name (u8 * s, va_list * args)
+{
+ vxlan_gpe_main_t * gm = &vxlan_gpe_main;
+ u32 i = va_arg (*args, u32);
+ u32 show_dev_instance = ~0;
+
+ if (i < vec_len (gm->dev_inst_by_real))
+ show_dev_instance = gm->dev_inst_by_real[i];
+
+ if (show_dev_instance != ~0)
+ i = show_dev_instance;
+
+ return format (s, "vxlan_gpe_tunnel%d", i);
+}
+
+static int vxlan_gpe_name_renumber (vnet_hw_interface_t * hi,
+ u32 new_dev_instance)
+{
+ vxlan_gpe_main_t * gm = &vxlan_gpe_main;
+
+ vec_validate_init_empty (gm->dev_inst_by_real, hi->dev_instance, ~0);
+
+ gm->dev_inst_by_real [hi->dev_instance] = new_dev_instance;
+
+ return 0;
+}
+
+static uword dummy_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ clib_warning ("you shouldn't be here, leaking buffers...");
+ return frame->n_vectors;
+}
+
+static uword dummy_set_rewrite (vnet_main_t * vnm,
+ u32 sw_if_index,
+ u32 l3_type,
+ void * dst_address,
+ void * rewrite,
+ uword max_rewrite_bytes)
+{
+ return 0;
+}
+
+VNET_DEVICE_CLASS (vxlan_gpe_device_class,static) = {
+ .name = "VXLAN_GPE",
+ .format_device_name = format_vxlan_gpe_name,
+ .format_tx_trace = format_vxlan_gpe_encap_trace,
+ .tx_function = dummy_interface_tx,
+ .name_renumber = vxlan_gpe_name_renumber,
+};
+
+static u8 * format_vxlan_gpe_header_with_length (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ s = format (s, "unimplemented dev %u", dev_instance);
+ return s;
+}
+
+VNET_HW_INTERFACE_CLASS (vxlan_gpe_hw_class) = {
+ .name = "VXLAN_GPE",
+ .format_header = format_vxlan_gpe_header_with_length,
+ .set_rewrite = dummy_set_rewrite,
+};
+
+
+#define foreach_gpe_copy_field \
+_(local.as_u32) \
+_(remote.as_u32) \
+_(vni) \
+_(protocol) \
+_(encap_fib_index) \
+_(decap_fib_index)
+
+#define foreach_copy_field \
+_(src.as_u32) \
+_(dst.as_u32) \
+_(vni) \
+_(encap_fib_index) \
+_(decap_fib_index) \
+_(decap_next_index)
+
+
+
+static int vxlan_gpe_rewrite (vxlan_gpe_tunnel_t * t)
+{
+ u8 *rw = 0;
+ ip4_header_t * ip0;
+ ip4_vxlan_gpe_header_t * h0;
+ int len;
+
+ len = sizeof (*h0);
+
+ vec_validate_aligned (rw, len-1, CLIB_CACHE_LINE_BYTES);
+
+ h0 = (ip4_vxlan_gpe_header_t *) rw;
+
+ /* Fixed portion of the (outer) ip4 header */
+ ip0 = &h0->ip4;
+ ip0->ip_version_and_header_length = 0x45;
+ ip0->ttl = 254;
+ ip0->protocol = IP_PROTOCOL_UDP;
+
+ /* we fix up the ip4 header length and checksum after-the-fact */
+ ip0->src_address.as_u32 = t->local.as_u32;
+ ip0->dst_address.as_u32 = t->remote.as_u32;
+ ip0->checksum = ip4_header_checksum (ip0);
+
+ /* UDP header, randomize src port on something, maybe? */
+ h0->udp.src_port = clib_host_to_net_u16 (4790);
+ h0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gpe);
+
+ /* VXLAN header. Are we having fun yet? */
+ h0->vxlan.flags = VXLAN_GPE_FLAGS_I | VXLAN_GPE_FLAGS_P;
+ h0->vxlan.ver_res = VXLAN_GPE_VERSION;
+ h0->vxlan.protocol = VXLAN_GPE_PROTOCOL_IP4;
+ h0->vxlan.vni_res = clib_host_to_net_u32 (t->vni<<8);
+
+ t->rewrite = rw;
+ return (0);
+}
+
+int vnet_vxlan_gpe_add_del_tunnel
+(vnet_vxlan_gpe_add_del_tunnel_args_t *a, u32 * sw_if_indexp)
+{
+ vxlan_gpe_main_t * gm = &vxlan_gpe_main;
+ vxlan_gpe_tunnel_t *t = 0;
+ vnet_main_t * vnm = gm->vnet_main;
+ vnet_hw_interface_t * hi;
+ uword * p;
+ u32 hw_if_index = ~0;
+ u32 sw_if_index = ~0;
+ int rv;
+ vxlan_gpe_tunnel_key_t key, *key_copy;
+ hash_pair_t *hp;
+
+ key.local = a->local.as_u32;
+ key.remote = a->remote.as_u32;
+ key.vni = clib_host_to_net_u32 (a->vni << 8);
+ key.pad = 0;
+
+ p = hash_get_mem (gm->vxlan_gpe_tunnel_by_key, &key);
+
+ if (a->is_add)
+ {
+ /* adding a tunnel: tunnel must not already exist */
+ if (p)
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ if (a->decap_next_index >= VXLAN_GPE_INPUT_N_NEXT)
+ return VNET_API_ERROR_INVALID_DECAP_NEXT;
+
+ pool_get_aligned (gm->tunnels, t, CLIB_CACHE_LINE_BYTES);
+ memset (t, 0, sizeof (*t));
+
+ /* copy from arg structure */
+#define _(x) t->x = a->x;
+ foreach_gpe_copy_field;
+#undef _
+
+ rv = vxlan_gpe_rewrite (t);
+
+ if (rv)
+ {
+ pool_put (gm->tunnels, t);
+ return rv;
+ }
+
+ key_copy = clib_mem_alloc (sizeof (*key_copy));
+ clib_memcpy (key_copy, &key, sizeof (*key_copy));
+
+ hash_set_mem (gm->vxlan_gpe_tunnel_by_key, key_copy,
+ t - gm->tunnels);
+
+ if (vec_len (gm->free_vxlan_gpe_tunnel_hw_if_indices) > 0)
+ {
+ hw_if_index = gm->free_vxlan_gpe_tunnel_hw_if_indices
+ [vec_len (gm->free_vxlan_gpe_tunnel_hw_if_indices)-1];
+ _vec_len (gm->free_vxlan_gpe_tunnel_hw_if_indices) -= 1;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ hi->dev_instance = t - gm->tunnels;
+ hi->hw_instance = hi->dev_instance;
+ }
+ else
+ {
+ hw_if_index = vnet_register_interface
+ (vnm, vxlan_gpe_device_class.index, t - gm->tunnels,
+ vxlan_gpe_hw_class.index, t - gm->tunnels);
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ hi->output_node_index = vxlan_gpe_encap_node.index;
+ }
+
+ t->hw_if_index = hw_if_index;
+ t->sw_if_index = sw_if_index = hi->sw_if_index;
+
+ vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ }
+ else
+ {
+ /* deleting a tunnel: tunnel must exist */
+ if (!p)
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ t = pool_elt_at_index (gm->tunnels, p[0]);
+
+ vnet_sw_interface_set_flags (vnm, t->sw_if_index, 0 /* down */);
+ vec_add1 (gm->free_vxlan_gpe_tunnel_hw_if_indices, t->hw_if_index);
+
+ hp = hash_get_pair (gm->vxlan_gpe_tunnel_by_key, &key);
+ key_copy = (void *)(hp->key);
+ hash_unset_mem (gm->vxlan_gpe_tunnel_by_key, &key);
+ clib_mem_free (key_copy);
+
+ vec_free (t->rewrite);
+ pool_put (gm->tunnels, t);
+ }
+
+ if (sw_if_indexp)
+ *sw_if_indexp = sw_if_index;
+
+ return 0;
+}
+
+static u32 fib_index_from_fib_id (u32 fib_id)
+{
+ ip4_main_t * im = &ip4_main;
+ uword * p;
+
+ p = hash_get (im->fib_index_by_table_id, fib_id);
+ if (!p)
+ return ~0;
+
+ return p[0];
+}
+
+static uword unformat_gpe_decap_next (unformat_input_t * input, va_list * args)
+{
+ u32 * result = va_arg (*args, u32 *);
+ u32 tmp;
+
+ if (unformat (input, "drop"))
+ *result = VXLAN_GPE_INPUT_NEXT_DROP;
+ else if (unformat (input, "ip4"))
+ *result = VXLAN_GPE_INPUT_NEXT_IP4_INPUT;
+ else if (unformat (input, "ip6"))
+ *result = VXLAN_GPE_INPUT_NEXT_IP6_INPUT;
+ else if (unformat (input, "ethernet"))
+ *result = VXLAN_GPE_INPUT_NEXT_ETHERNET_INPUT;
+ else if (unformat (input, "nsh"))
+ *result = VXLAN_GPE_INPUT_NEXT_NSH_INPUT;
+ else if (unformat (input, "%d", &tmp))
+ *result = tmp;
+ else
+ return 0;
+ return 1;
+}
+
+static clib_error_t *
+vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, * line_input = &_line_input;
+ u8 is_add = 1;
+ ip4_address_t local, remote;
+ u8 local_set = 0;
+ u8 remote_set = 0;
+ u32 encap_fib_index = 0;
+ u32 decap_fib_index = 0;
+ u8 protocol = VXLAN_GPE_PROTOCOL_IP4;
+ u32 decap_next_index = VXLAN_GPE_INPUT_NEXT_IP4_INPUT;
+ u32 vni;
+ u8 vni_set = 0;
+ int rv;
+ u32 tmp;
+ vnet_vxlan_gpe_add_del_tunnel_args_t _a, * a = &_a;
+
+ /* Get a line of input. */
+ if (! unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "local %U",
+ unformat_ip4_address, &local))
+ local_set = 1;
+ else if (unformat (line_input, "remote %U",
+ unformat_ip4_address, &remote))
+ remote_set = 1;
+ else if (unformat (line_input, "encap-vrf-id %d", &tmp))
+ {
+ encap_fib_index = fib_index_from_fib_id (tmp);
+ if (encap_fib_index == ~0)
+ return clib_error_return (0, "nonexistent encap fib id %d", tmp);
+ }
+ else if (unformat (line_input, "decap-vrf-id %d", &tmp))
+ {
+ decap_fib_index = fib_index_from_fib_id (tmp);
+ if (decap_fib_index == ~0)
+ return clib_error_return (0, "nonexistent decap fib id %d", tmp);
+ }
+ else if (unformat (line_input, "decap-next %U", unformat_gpe_decap_next,
+ &decap_next_index))
+ ;
+ else if (unformat (line_input, "vni %d", &vni))
+ vni_set = 1;
+ else if (unformat(line_input, "next-ip4"))
+ protocol = VXLAN_GPE_PROTOCOL_IP4;
+ else if (unformat(line_input, "next-ip6"))
+ protocol = VXLAN_GPE_PROTOCOL_IP6;
+ else if (unformat(line_input, "next-ethernet"))
+ protocol = VXLAN_GPE_PROTOCOL_ETHERNET;
+ else if (unformat(line_input, "next-nsh"))
+ protocol = VXLAN_GPE_PROTOCOL_NSH;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (local_set == 0)
+ return clib_error_return (0, "tunnel local address not specified");
+
+ if (remote_set == 0)
+ return clib_error_return (0, "tunnel remote address not specified");
+
+ if (vni_set == 0)
+ return clib_error_return (0, "vni not specified");
+
+ memset (a, 0, sizeof (*a));
+
+ a->is_add = is_add;
+
+#define _(x) a->x = x;
+ foreach_gpe_copy_field;
+#undef _
+
+ rv = vnet_vxlan_gpe_add_del_tunnel (a, 0 /* hw_if_indexp */);
+
+ switch(rv)
+ {
+ case 0:
+ break;
+ case VNET_API_ERROR_INVALID_DECAP_NEXT:
+ return clib_error_return (0, "invalid decap-next...");
+
+ case VNET_API_ERROR_TUNNEL_EXIST:
+ return clib_error_return (0, "tunnel already exists...");
+
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ return clib_error_return (0, "tunnel does not exist...");
+
+ default:
+ return clib_error_return
+ (0, "vnet_vxlan_gpe_add_del_tunnel returned %d", rv);
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (create_vxlan_gpe_tunnel_command, static) = {
+ .path = "create vxlan-gpe tunnel",
+ .short_help =
+ "create vxlan-gpe tunnel local <ip4-addr> remote <ip4-addr>"
+ " vni <nn> [next-ip4][next-ip6][next-ethernet][next-nsh]"
+ " [encap-vrf-id <nn>] [decap-vrf-id <nn>]"
+ " [del]\n",
+ .function = vxlan_gpe_add_del_tunnel_command_fn,
+};
+
+static clib_error_t *
+show_vxlan_gpe_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vxlan_gpe_main_t * gm = &vxlan_gpe_main;
+ vxlan_gpe_tunnel_t * t;
+
+ if (pool_elts (gm->tunnels) == 0)
+ vlib_cli_output (vm, "No vxlan-gpe tunnels configured.");
+
+ pool_foreach (t, gm->tunnels,
+ ({
+ vlib_cli_output (vm, "%U", format_vxlan_gpe_tunnel, t);
+ }));
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (show_vxlan_gpe_tunnel_command, static) = {
+ .path = "show vxlan-gpe",
+ .function = show_vxlan_gpe_tunnel_command_fn,
+};
+
+clib_error_t *vxlan_gpe_init (vlib_main_t *vm)
+{
+ vxlan_gpe_main_t *gm = &vxlan_gpe_main;
+
+ gm->vnet_main = vnet_get_main();
+ gm->vlib_main = vm;
+
+ gm->vxlan_gpe_tunnel_by_key
+ = hash_create_mem (0, sizeof(vxlan_gpe_tunnel_key_t), sizeof (uword));
+
+ udp_register_dst_port (vm, UDP_DST_PORT_vxlan_gpe,
+ vxlan_gpe_input_node.index, 1 /* is_ip4 */);
+ return 0;
+}
+
+VLIB_INIT_FUNCTION(vxlan_gpe_init);
+
diff --git a/vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.h b/vnet/vnet/vxlan-gpe/vxlan_gpe.h
index 3effd3318cb..4c2ac444e34 100644
--- a/vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe.h
+++ b/vnet/vnet/vxlan-gpe/vxlan_gpe.h
@@ -12,8 +12,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#ifndef included_vnet_nsh_vxlan_gpe_h
-#define included_vnet_nsh_vxlan_gpe_h
+#ifndef included_vnet_vxlan_gpe_h
+#define included_vnet_vxlan_gpe_h
#include <vppinfra/error.h>
#include <vppinfra/hash.h>
@@ -21,45 +21,43 @@
#include <vnet/ip/ip.h>
#include <vnet/l2/l2_input.h>
#include <vnet/ethernet/ethernet.h>
-#include <vnet/gre/gre.h>
-#include <vnet/nsh/nsh_packet.h>
-#include <vnet/nsh-vxlan-gpe/vxlan_gpe_packet.h>
+#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
#include <vnet/ip/ip4_packet.h>
#include <vnet/ip/udp.h>
+
typedef CLIB_PACKED (struct {
ip4_header_t ip4; /* 20 bytes */
udp_header_t udp; /* 8 bytes */
vxlan_gpe_header_t vxlan; /* 8 bytes */
- nsh_header_t nsh; /* 28 bytes */
-}) ip4_vxlan_gpe_and_nsh_header_t;
+}) ip4_vxlan_gpe_header_t;
typedef CLIB_PACKED(struct {
/*
- * Key fields: ip src, vxlan vni, nsh spi_si
+ * Key fields: local remote, vni
* all fields in NET byte order
*/
union {
struct {
- u32 src;
+ u32 local;
+ u32 remote;
u32 vni; /* shifted 8 bits */
- u32 spi_si;
u32 pad;
};
u64 as_u64[2];
};
-}) nsh_vxlan_gpe_tunnel_key_t;
+}) vxlan_gpe_tunnel_key_t;
typedef struct {
/* Rewrite string. $$$$ embed vnet_rewrite header */
u8 * rewrite;
- /* decap next index */
- u32 decap_next_index;
+ /* encapsulated protocol */
+ u8 protocol;
/* tunnel src and dst addresses */
- ip4_address_t src;
- ip4_address_t dst;
+ ip4_address_t local;
+ ip4_address_t remote;
/* FIB indices */
u32 encap_fib_index; /* tunnel partner lookup here */
@@ -72,40 +70,38 @@ typedef struct {
u32 hw_if_index;
u32 sw_if_index;
- /* NSH header fields in HOST byte order */
- nsh_header_t nsh_hdr;
-} nsh_vxlan_gpe_tunnel_t;
+} vxlan_gpe_tunnel_t;
-#define foreach_nsh_vxlan_gpe_input_next \
+#define foreach_vxlan_gpe_input_next \
_(DROP, "error-drop") \
_(IP4_INPUT, "ip4-input") \
_(IP6_INPUT, "ip6-input") \
_(ETHERNET_INPUT, "ethernet-input") \
-_(NSH_VXLAN_GPE_ENCAP, "nsh-vxlan-gpe-encap")
+_(NSH_INPUT, "nsh-input")
typedef enum {
-#define _(s,n) NSH_VXLAN_GPE_INPUT_NEXT_##s,
- foreach_nsh_vxlan_gpe_input_next
+#define _(s,n) VXLAN_GPE_INPUT_NEXT_##s,
+ foreach_vxlan_gpe_input_next
#undef _
- NSH_VXLAN_GPE_INPUT_N_NEXT,
-} nsh_vxlan_gpe_input_next_t;
+ VXLAN_GPE_INPUT_N_NEXT,
+} vxlan_gpe_input_next_t;
typedef enum {
-#define nsh_vxlan_gpe_error(n,s) NSH_VXLAN_GPE_ERROR_##n,
-#include <vnet/nsh-vxlan-gpe/nsh_vxlan_gpe_error.def>
-#undef nsh_vxlan_gpe_error
- NSH_VXLAN_GPE_N_ERROR,
-} nsh_vxlan_gpe_input_error_t;
+#define vxlan_gpe_error(n,s) VXLAN_GPE_ERROR_##n,
+#include <vnet/vxlan-gpe/vxlan_gpe_error.def>
+#undef vxlan_gpe_error
+ VXLAN_GPE_N_ERROR,
+} vxlan_gpe_input_error_t;
typedef struct {
/* vector of encap tunnel instances */
- nsh_vxlan_gpe_tunnel_t *tunnels;
+ vxlan_gpe_tunnel_t *tunnels;
/* lookup tunnel by key */
- uword * nsh_vxlan_gpe_tunnel_by_key;
+ uword * vxlan_gpe_tunnel_by_key;
/* Free vlib hw_if_indices */
- u32 * free_nsh_vxlan_gpe_tunnel_hw_if_indices;
+ u32 * free_vxlan_gpe_tunnel_hw_if_indices;
/* show device instance by real device instance */
u32 * dev_inst_by_real;
@@ -113,26 +109,31 @@ typedef struct {
/* convenience */
vlib_main_t * vlib_main;
vnet_main_t * vnet_main;
-} nsh_vxlan_gpe_main_t;
+} vxlan_gpe_main_t;
-nsh_vxlan_gpe_main_t nsh_vxlan_gpe_main;
+vxlan_gpe_main_t vxlan_gpe_main;
-extern vlib_node_registration_t nsh_vxlan_gpe_input_node;
-extern vlib_node_registration_t nsh_vxlan_gpe_encap_node;
+extern vlib_node_registration_t vxlan_gpe_encap_node;
+extern vlib_node_registration_t vxlan_gpe_input_node;
-u8 * format_nsh_vxlan_gpe_encap_trace (u8 * s, va_list * args);
+u8 * format_vxlan_gpe_encap_trace (u8 * s, va_list * args);
typedef struct {
u8 is_add;
- ip4_address_t src, dst;
+ ip4_address_t local, remote;
+ u8 protocol;
u32 encap_fib_index;
u32 decap_fib_index;
u32 decap_next_index;
u32 vni;
- nsh_header_t nsh_hdr;
-} vnet_nsh_vxlan_gpe_add_del_tunnel_args_t;
+} vnet_vxlan_gpe_add_del_tunnel_args_t;
+
+
+int vnet_vxlan_gpe_add_del_tunnel
+(vnet_vxlan_gpe_add_del_tunnel_args_t *a, u32 * sw_if_indexp);
+
+
+
-int vnet_nsh_vxlan_gpe_add_del_tunnel
-(vnet_nsh_vxlan_gpe_add_del_tunnel_args_t *a, u32 * sw_if_indexp);
-#endif /* included_vnet_nsh_vxlan_gpe_h */
+#endif /* included_vnet_vxlan_gpe_h */
diff --git a/vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe_error.def b/vnet/vnet/vxlan-gpe/vxlan_gpe_error.def
index 4ba64fe4dc5..9cf1b1cb656 100644
--- a/vnet/vnet/nsh-vxlan-gpe/nsh_vxlan_gpe_error.def
+++ b/vnet/vnet/vxlan-gpe/vxlan_gpe_error.def
@@ -12,5 +12,5 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-nsh_vxlan_gpe_error (DECAPSULATED, "good packets decapsulated")
-nsh_vxlan_gpe_error (NO_SUCH_TUNNEL, "no such tunnel packets")
+vxlan_gpe_error (DECAPSULATED, "good packets decapsulated")
+vxlan_gpe_error (NO_SUCH_TUNNEL, "no such tunnel packets")
diff --git a/vnet/vnet/nsh-vxlan-gpe/vxlan_gpe_packet.h b/vnet/vnet/vxlan-gpe/vxlan_gpe_packet.h
index efc85c4bb54..3403cc9ebad 100644
--- a/vnet/vnet/nsh-vxlan-gpe/vxlan_gpe_packet.h
+++ b/vnet/vnet/vxlan-gpe/vxlan_gpe_packet.h
@@ -52,23 +52,31 @@
* 0x4 : Network Service Header [NSH]
*/
+#define foreach_vxlan_gpe_protocol \
+_ (0x01, IP4) \
+_ (0x02, IP6) \
+_ (0x03, ETHERNET) \
+_ (0x04, NSH)
+
+
+typedef enum {
+#define _(n,f) VXLAN_GPE_PROTOCOL_##f = n,
+ foreach_vxlan_gpe_protocol
+#undef _
+} vxlan_gpe_protocol_t;
+
typedef struct {
u8 flags;
u8 ver_res;
u8 res;
- u8 next_protocol;
+ /* see vxlan_gpe_protocol_t */
+ u8 protocol;
u32 vni_res;
} vxlan_gpe_header_t;
#define VXLAN_GPE_FLAGS_I 0x08
#define VXLAN_GPE_FLAGS_P 0x04
#define VXLAN_GPE_FLAGS_O 0x01
-
#define VXLAN_GPE_VERSION 0x0
-#define VXLAN_NEXT_PROTOCOL_IP4 0x1
-#define VXLAN_NEXT_PROTOCOL_IP6 0x2
-#define VXLAN_NEXT_PROTOCOL_ETHERNET 0x3
-#define VXLAN_NEXT_PROTOCOL_NSH 0x4
-
#endif /* included_vxlan_gpe_packet_h */