summaryrefslogtreecommitdiffstats
path: root/src/plugins/nsh
diff options
context:
space:
mode:
authorHongjun Ni <hongjun.ni@intel.com>2018-08-27 20:27:43 +0800
committerDamjan Marion <dmarion@me.com>2018-08-28 06:04:57 +0000
commitd313f9e6f7c6d50aac189668a67bf13b86dd791c (patch)
tree0f92babad6891604e79f521de136b0a2bb946fbc /src/plugins/nsh
parenta5679e86af3d06df46edbf2654d48103e05c4b48 (diff)
Port NSH plugin to VPP
Please refer to https://wiki.fd.io/view/NSH_SFC Change-Id: Iba7e33e4dbb064c1527aaddbe8dce4b6b63a627a Signed-off-by: Hongjun Ni <hongjun.ni@intel.com> Signed-off-by: Keith Burns (alagalah) <alagalah@gmail.com>
Diffstat (limited to 'src/plugins/nsh')
-rw-r--r--src/plugins/nsh/CMakeLists.txt33
-rw-r--r--src/plugins/nsh/nsh-md2-ioam/export-nsh-md2-ioam/nsh_md2_ioam_export.c177
-rw-r--r--src/plugins/nsh/nsh-md2-ioam/export-nsh-md2-ioam/nsh_md2_ioam_export_thread.c50
-rw-r--r--src/plugins/nsh/nsh-md2-ioam/export-nsh-md2-ioam/nsh_md2_ioam_node.c155
-rw-r--r--src/plugins/nsh/nsh-md2-ioam/md2_ioam_transit.c195
-rw-r--r--src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam.c508
-rw-r--r--src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam.h119
-rw-r--r--src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam_api.c77
-rw-r--r--src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam_trace.c464
-rw-r--r--src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam_util.h122
-rw-r--r--src/plugins/nsh/nsh.api123
-rw-r--r--src/plugins/nsh/nsh.c2545
-rw-r--r--src/plugins/nsh/nsh.h271
-rw-r--r--src/plugins/nsh/nsh_error.def17
-rw-r--r--src/plugins/nsh/nsh_output.c520
-rw-r--r--src/plugins/nsh/nsh_packet.h116
-rw-r--r--src/plugins/nsh/nsh_pop.c365
-rw-r--r--src/plugins/nsh/nsh_test.c411
18 files changed, 6268 insertions, 0 deletions
diff --git a/src/plugins/nsh/CMakeLists.txt b/src/plugins/nsh/CMakeLists.txt
new file mode 100644
index 00000000000..866b3704c0d
--- /dev/null
+++ b/src/plugins/nsh/CMakeLists.txt
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_vpp_plugin(nsh
+ SOURCES
+ nsh.c
+ nsh_output.c
+ nsh_pop.c
+ nsh-md2-ioam/nsh_md2_ioam.c
+ nsh-md2-ioam/nsh_md2_ioam_api.c
+ nsh-md2-ioam/md2_ioam_transit.c
+ nsh-md2-ioam/nsh_md2_ioam_trace.c
+ nsh-md2-ioam/export-nsh-md2-ioam/nsh_md2_ioam_export.c
+ nsh-md2-ioam/export-nsh-md2-ioam/nsh_md2_ioam_export_thread.c
+ nsh-md2-ioam/export-nsh-md2-ioam/nsh_md2_ioam_node.c
+
+ API_FILES
+ nsh.api
+
+ API_TEST_SOURCES
+ nsh_test.c
+)
+
diff --git a/src/plugins/nsh/nsh-md2-ioam/export-nsh-md2-ioam/nsh_md2_ioam_export.c b/src/plugins/nsh/nsh-md2-ioam/export-nsh-md2-ioam/nsh_md2_ioam_export.c
new file mode 100644
index 00000000000..cb2bb2107c9
--- /dev/null
+++ b/src/plugins/nsh/nsh-md2-ioam/export-nsh-md2-ioam/nsh_md2_ioam_export.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ *------------------------------------------------------------------
+ * nsh_md2_ioam_export.c - ioam export API / debug CLI handling
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/plugin/plugin.h>
+#include <ioam/export-common/ioam_export.h>
+
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+#include <nsh/nsh.h>
+#include <nsh/nsh-md2-ioam/nsh_md2_ioam.h>
+
+
+ioam_export_main_t nsh_md2_ioam_export_main;
+
+
+extern vlib_node_registration_t nsh_md2_ioam_export_node;
+extern void nsh_md2_set_next_ioam_export_override (uword next);
+/* Action function shared between message handler and debug CLI */
+int
+nsh_md2_ioam_export_enable_disable (ioam_export_main_t * em,
+ u8 is_disable,
+ ip4_address_t * collector_address,
+ ip4_address_t * src_address)
+{
+ vlib_main_t *vm = em->vlib_main;
+ u32 node_index = nsh_md2_ioam_export_node.index;
+ vlib_node_t *nsh_input_node = NULL;
+
+ if (is_disable == 0)
+ {
+ if (em->my_hbh_slot == ~0)
+ {
+ /* Hook this export node to nsh-input */
+ nsh_input_node = vlib_get_node_by_name (vm, (u8 *) "nsh-input");
+ if (!nsh_input_node)
+ {
+ /* node does not exist give up */
+ return (-1);
+ }
+ em->my_hbh_slot =
+ vlib_node_add_next (vm, nsh_input_node->index, node_index);
+ }
+ if (1 == ioam_export_header_create (em, collector_address, src_address))
+ {
+ ioam_export_thread_buffer_init (em, vm);
+ nsh_md2_set_next_ioam_export_override (em->my_hbh_slot);
+ /* Turn on the export buffer check process */
+ vlib_process_signal_event (vm, em->export_process_node_index, 1, 0);
+
+ }
+ else
+ {
+ return (-2);
+ }
+ }
+ else
+ {
+ nsh_md2_set_next_ioam_export_override (0); // VXLAN_GPE_DECAP_IOAM_V4_NEXT_POP
+ ioam_export_header_cleanup (em, collector_address, src_address);
+ ioam_export_thread_buffer_free (em);
+ /* Turn off the export buffer check process */
+ vlib_process_signal_event (vm, em->export_process_node_index, 2, 0);
+
+ }
+
+ return 0;
+}
+
+
+
+static clib_error_t *
+set_nsh_md2_ioam_export_ipfix_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ ioam_export_main_t *em = &nsh_md2_ioam_export_main;
+ ip4_address_t collector, src;
+ u8 is_disable = 0;
+
+ collector.as_u32 = 0;
+ src.as_u32 = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "collector %U", unformat_ip4_address, &collector))
+ ;
+ else if (unformat (input, "src %U", unformat_ip4_address, &src))
+ ;
+ else if (unformat (input, "disable"))
+ is_disable = 1;
+ else
+ break;
+ }
+
+ if (collector.as_u32 == 0)
+ return clib_error_return (0, "collector address required");
+
+ if (src.as_u32 == 0)
+ return clib_error_return (0, "src address required");
+
+ em->ipfix_collector.as_u32 = collector.as_u32;
+ em->src_address.as_u32 = src.as_u32;
+
+ vlib_cli_output (vm, "Collector %U, src address %U",
+ format_ip4_address, &em->ipfix_collector,
+ format_ip4_address, &em->src_address);
+
+ /* Turn on the export timer process */
+ // vlib_process_signal_event (vm, flow_report_process_node.index,
+ //1, 0);
+ if (0 !=
+ nsh_md2_ioam_export_enable_disable (em, is_disable, &collector, &src))
+ {
+ return clib_error_return (0, "Unable to set ioam nsh-md2 export");
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_nsh_md2_ioam_ipfix_command, static) =
+{
+.path = "set nsh-md2-ioam export ipfix",
+.short_help = "set nsh-md2-ioam export ipfix collector <ip4-address> src <ip4-address>",
+.function = set_nsh_md2_ioam_export_ipfix_command_fn,
+};
+/* *INDENT-ON* */
+
+
+#define IPFIX_NSH_MD2_IOAM_EXPORT_ID 274 // TODO: Move this to ioam/ioam_export.h
+static clib_error_t *
+nsh_md2_ioam_export_init (vlib_main_t * vm)
+{
+ ioam_export_main_t *em = &nsh_md2_ioam_export_main;
+ clib_error_t *error = 0;
+
+ em->set_id = IPFIX_NSH_MD2_IOAM_EXPORT_ID;
+ em->unix_time_0 = (u32) time (0); /* Store starting time */
+ em->vlib_time_0 = vlib_time_now (vm);
+
+ em->my_hbh_slot = ~0;
+ em->vlib_main = vm;
+ em->vnet_main = vnet_get_main ();
+ ioam_export_reset_next_node (em);
+
+ return error;
+}
+
+VLIB_INIT_FUNCTION (nsh_md2_ioam_export_init);
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/nsh/nsh-md2-ioam/export-nsh-md2-ioam/nsh_md2_ioam_export_thread.c b/src/plugins/nsh/nsh-md2-ioam/export-nsh-md2-ioam/nsh_md2_ioam_export_thread.c
new file mode 100644
index 00000000000..c5dd1bc1130
--- /dev/null
+++ b/src/plugins/nsh/nsh-md2-ioam/export-nsh-md2-ioam/nsh_md2_ioam_export_thread.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * nsh_md2_ioam_export_thread.c
+ */
+#include <vnet/api_errno.h>
+#include <vppinfra/pool.h>
+#include <ioam/export-common/ioam_export.h>
+
+static vlib_node_registration_t nsh_md2_ioam_export_process_node;
+extern ioam_export_main_t nsh_md2_ioam_export_main;
+
+static uword
+nsh_md2_ioam_export_process (vlib_main_t * vm,
+ vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ return (ioam_export_process_common (&nsh_md2_ioam_export_main,
+ vm, rt, f,
+ nsh_md2_ioam_export_process_node.index));
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (nsh_md2_ioam_export_process_node, static) =
+{
+ .function = nsh_md2_ioam_export_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "nsh-md2-ioam-export-process",
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/nsh/nsh-md2-ioam/export-nsh-md2-ioam/nsh_md2_ioam_node.c b/src/plugins/nsh/nsh-md2-ioam/export-nsh-md2-ioam/nsh_md2_ioam_node.c
new file mode 100644
index 00000000000..f2910c3d9fb
--- /dev/null
+++ b/src/plugins/nsh/nsh-md2-ioam/export-nsh-md2-ioam/nsh_md2_ioam_node.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vppinfra/error.h>
+#include <vnet/ip/ip.h>
+#include <nsh/nsh.h>
+#include <nsh/nsh_packet.h>
+#include <ioam/export-common/ioam_export.h>
+
+typedef struct
+{
+ u32 next_index;
+ u32 flow_label;
+} export_trace_t;
+
+extern ioam_export_main_t nsh_md2_ioam_export_main;
+vlib_node_registration_t export_node;
+/* packet trace format function */
+static u8 *
+format_export_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ export_trace_t *t = va_arg (*args, export_trace_t *);
+
+ s = format (s, "EXPORT: flow_label %d, next index %d",
+ t->flow_label, t->next_index);
+ return s;
+}
+
+vlib_node_registration_t nsh_md2_ioam_export_node;
+
+#define foreach_export_error \
+_(RECORDED, "Packets recorded for export")
+
+typedef enum
+{
+#define _(sym,str) EXPORT_ERROR_##sym,
+ foreach_export_error
+#undef _
+ EXPORT_N_ERROR,
+} export_error_t;
+
+static char *export_error_strings[] = {
+#define _(sym,string) string,
+ foreach_export_error
+#undef _
+};
+
+typedef enum
+{
+ EXPORT_NEXT_NSH_MD2_IOAM_INPUT,
+ EXPORT_N_NEXT,
+} export_next_t;
+
+always_inline void
+copy3cachelines (void *dst, const void *src, size_t n)
+{
+
+ u64 *copy_dst, *copy_src;
+ int i;
+ copy_dst = (u64 *) dst;
+ copy_src = (u64 *) src;
+ if (PREDICT_FALSE (n < DEFAULT_EXPORT_SIZE))
+ {
+ for (i = 0; i < n / 64; i++)
+ {
+ copy_dst[0] = copy_src[0];
+ copy_dst[1] = copy_src[1];
+ copy_dst[2] = copy_src[2];
+ copy_dst[3] = copy_src[3];
+ copy_dst[4] = copy_src[4];
+ copy_dst[5] = copy_src[5];
+ copy_dst[6] = copy_src[6];
+ copy_dst[7] = copy_src[7];
+ copy_dst += 8;
+ copy_src += 8;
+ }
+ return;
+ }
+ for (i = 0; i < 3; i++)
+ {
+ copy_dst[0] = copy_src[0];
+ copy_dst[1] = copy_src[1];
+ copy_dst[2] = copy_src[2];
+ copy_dst[3] = copy_src[3];
+ copy_dst[4] = copy_src[4];
+ copy_dst[5] = copy_src[5];
+ copy_dst[6] = copy_src[6];
+ copy_dst[7] = copy_src[7];
+ copy_dst += 8;
+ copy_src += 8;
+ }
+}
+
+static void
+nsh_md2_ioam_export_fixup_func (vlib_buffer_t * export_buf,
+ vlib_buffer_t * pak_buf)
+{
+ /* Todo: on implementing analyse */
+}
+
+static uword
+nsh_md2_ioam_export_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ ioam_export_main_t *em = &nsh_md2_ioam_export_main;
+ ioam_export_node_common (em, vm, node, frame, ip4_header_t, length,
+ ip_version_and_header_length,
+ EXPORT_NEXT_NSH_MD2_IOAM_INPUT,
+ nsh_md2_ioam_export_fixup_func);
+ return frame->n_vectors;
+}
+
+/*
+ * Node for iOAM export
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (nsh_md2_ioam_export_node) =
+{
+ .function = nsh_md2_ioam_export_node_fn,
+ .name = "nsh-md2-ioam-export",
+ .vector_size = sizeof (u32),
+ .format_trace = format_export_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (export_error_strings),
+ .error_strings = export_error_strings,
+ .n_next_nodes = EXPORT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {[EXPORT_NEXT_NSH_MD2_IOAM_INPUT] = "nsh-pop"},
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/nsh/nsh-md2-ioam/md2_ioam_transit.c b/src/plugins/nsh/nsh-md2-ioam/md2_ioam_transit.c
new file mode 100644
index 00000000000..9ea5447eba8
--- /dev/null
+++ b/src/plugins/nsh/nsh-md2-ioam/md2_ioam_transit.c
@@ -0,0 +1,195 @@
+ /*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/udp/udp.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/lisp-gpe/lisp_gpe.h>
+#include <vnet/lisp-gpe/lisp_gpe_packet.h>
+#include <nsh/nsh.h>
+#include <nsh/nsh_packet.h>
+#include <nsh/nsh-md2-ioam/nsh_md2_ioam.h>
+#include <nsh/nsh-md2-ioam/nsh_md2_ioam_util.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/fib_entry.h>
+
+/* Statistics (not really errors) */
+#define foreach_nsh_md2_ioam_encap_transit_error \
+_(ENCAPSULATED, "good packets encapsulated")
+
+static char *nsh_md2_ioam_encap_transit_error_strings[] = {
+#define _(sym,string) string,
+ foreach_nsh_md2_ioam_encap_transit_error
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) NSH_MD2_IOAM_ENCAP_TRANSIT_IOAM_ERROR_##sym,
+ foreach_nsh_md2_ioam_encap_transit_error
+#undef _
+ NSH_MD2_IOAM_ENCAP_TRANSIT_IOAM_N_ERROR,
+} nsh_md2_ioam_encap_transit_error_t;
+
+typedef enum
+{
+ NSH_MD2_IOAM_ENCAP_TRANSIT_IOAM_NEXT_OUTPUT,
+ NSH_MD2_IOAM_ENCAP_TRANSIT_IOAM_NEXT_DROP,
+ NSH_MD2_IOAM_ENCAP_TRANSIT_IOAM_N_NEXT
+} nsh_md2_ioam_encap_transit_next_t;
+
+
+/* *INDENT-OFF* */
+VNET_FEATURE_INIT (nsh_md2_ioam_encap_transit, static) =
+{
+ .arc_name = "ip4-output",
+ .node_name = "nsh-md2-ioam-encap-transit",
+ .runs_before = VNET_FEATURES ("adj-midchain-tx"),
+};
+/* *INDENT-ON* */
+
+
+static uword
+nsh_md2_ioam_encap_transit (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0 = NSH_MD2_IOAM_ENCAP_TRANSIT_IOAM_NEXT_OUTPUT;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+ ip4_header_t *ip0;
+ u32 iph_offset = 0;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ iph_offset = vnet_buffer (b0)->ip.save_rewrite_length;
+ ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0)
+ + iph_offset);
+
+ /* just forward non ipv4 packets */
+ if (PREDICT_FALSE
+ ((ip0->ip_version_and_header_length & 0xF0) == 0x40))
+ {
+ /* ipv4 packets */
+ udp_header_t *udp_hdr0 = (udp_header_t *) (ip0 + 1);
+ if (PREDICT_FALSE
+ ((ip0->protocol == IP_PROTOCOL_UDP) &&
+ (clib_net_to_host_u16 (udp_hdr0->dst_port) ==
+ UDP_DST_PORT_lisp_gpe)))
+ {
+
+ /* Check the iOAM header */
+ lisp_gpe_header_t *lisp_gpe_hdr0 =
+ (lisp_gpe_header_t *) (udp_hdr0 + 1);
+ nsh_base_header_t *nsh_hdr =
+ (nsh_base_header_t *) (lisp_gpe_hdr0 + 1);
+
+ if (PREDICT_FALSE
+ (lisp_gpe_hdr0->next_protocol ==
+ LISP_GPE_NEXT_PROTO_NSH) && (nsh_hdr->md_type == 2))
+ {
+ uword *t = NULL;
+ nsh_md2_ioam_main_t *hm = &nsh_md2_ioam_main;
+ fib_prefix_t key4;
+ memset (&key4, 0, sizeof (key4));
+ key4.fp_proto = FIB_PROTOCOL_IP4;
+ key4.fp_addr.ip4.as_u32 = ip0->dst_address.as_u32;
+ t = hash_get_mem (hm->dst_by_ip4, &key4);
+ if (t)
+ {
+ vlib_buffer_advance (b0,
+ (word) (sizeof
+ (ethernet_header_t)));
+ nsh_md2_ioam_encap_decap_ioam_v4_one_inline (vm,
+ node,
+ b0,
+ &next0,
+ NSH_MD2_IOAM_ENCAP_TRANSIT_IOAM_NEXT_DROP,
+ 1
+ /* use_adj */
+ );
+ vlib_buffer_advance (b0,
+ -(word) (sizeof
+ (ethernet_header_t)));
+ }
+ }
+ }
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+extern u8 *format_nsh_node_map_trace (u8 * s, va_list * args);
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (nsh_md2_ioam_encap_transit_node) = {
+ .function = nsh_md2_ioam_encap_transit,
+ .name = "nsh-md2-ioam-encap-transit",
+ .vector_size = sizeof (u32),
+ .format_trace = format_nsh_node_map_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(nsh_md2_ioam_encap_transit_error_strings),
+ .error_strings = nsh_md2_ioam_encap_transit_error_strings,
+
+ .n_next_nodes = NSH_MD2_IOAM_ENCAP_TRANSIT_IOAM_N_NEXT,
+
+ .next_nodes = {
+ [NSH_MD2_IOAM_ENCAP_TRANSIT_IOAM_NEXT_OUTPUT] = "interface-output",
+ [NSH_MD2_IOAM_ENCAP_TRANSIT_IOAM_NEXT_DROP] = "error-drop",
+ },
+
+};
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam.c b/src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam.c
new file mode 100644
index 00000000000..1fa1c55b00f
--- /dev/null
+++ b/src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam.c
@@ -0,0 +1,508 @@
+/*
+ * nsh_md2_ioam.c - NSH iOAM functions for MD type 2
+ *
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/plugin/plugin.h>
+#include <nsh/nsh.h>
+#include <nsh/nsh_packet.h>
+#include <vnet/ip/ip.h>
+#include <nsh/nsh-md2-ioam/nsh_md2_ioam.h>
+
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/fib_entry.h>
+
+/* define message structures */
+#define vl_typedefs
+#include <nsh/nsh.api.h>
+#undef vl_typedefs
+
+/* define generated endian-swappers */
+#define vl_endianfun
+#include <nsh/nsh.api.h>
+#undef vl_endianfun
+
+nsh_md2_ioam_main_t nsh_md2_ioam_main;
+
+static void
+nsh_md2_ioam_set_clear_output_feature_on_intf (vlib_main_t * vm,
+ u32 sw_if_index0,
+ u8 is_add)
+{
+
+
+
+ vnet_feature_enable_disable ("ip4-output",
+ "nsh-md2-ioam-encap-transit",
+ sw_if_index0, is_add,
+ 0 /* void *feature_config */ ,
+ 0 /* u32 n_feature_config_bytes */ );
+ return;
+}
+
+void
+nsh_md2_ioam_clear_output_feature_on_all_intfs (vlib_main_t * vm)
+{
+ vnet_sw_interface_t *si = 0;
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+
+ pool_foreach (si, im->sw_interfaces, (
+ {
+ nsh_md2_ioam_set_clear_output_feature_on_intf
+ (vm, si->sw_if_index, 0);
+ }));
+ return;
+}
+
+
+extern fib_forward_chain_type_t
+fib_entry_get_default_chain_type (const fib_entry_t * fib_entry);
+
+int
+nsh_md2_ioam_enable_disable_for_dest (vlib_main_t * vm,
+ ip46_address_t dst_addr,
+ u32 outer_fib_index,
+ u8 is_ipv4, u8 is_add)
+{
+ nsh_md2_ioam_main_t *hm = &nsh_md2_ioam_main;
+ u32 fib_index0 = 0;
+
+ fib_node_index_t fei = ~0;
+ u32 *sw_if_index0 = NULL;
+#if 0
+ fib_entry_t *fib_entry;
+ u32 adj_index0;
+ ip_adjacency_t *adj0;
+ load_balance_t *lb_m, *lb_b;
+ const dpo_id_t *dpo0, *dpo1;
+ u32 i, j, k;
+#endif
+ u32 *intf_list = NULL;
+ fib_prefix_t fib_prefix;
+
+ if (is_ipv4)
+ {
+ memset (&fib_prefix, 0, sizeof (fib_prefix_t));
+ fib_prefix.fp_len = 32;
+ fib_prefix.fp_proto = FIB_PROTOCOL_IP4;
+#define TRANSIT_UNIT_TEST_HACK 1
+#ifdef TRANSIT_UNIT_TEST_HACK
+ memset(&dst_addr, 0, sizeof(dst_addr));
+ dst_addr.ip4.as_u32 = clib_net_to_host_u32(0x14020102);
+#endif
+ fib_prefix.fp_addr = dst_addr;
+ }
+ else
+ {
+ return 0;
+ }
+
+ fei = fib_table_lookup (fib_index0, &fib_prefix);
+#if 0
+ fib_entry = fib_entry_get (fei);
+
+
+ if (!dpo_id_is_valid (&fib_entry->fe_lb))
+ {
+ return (-1);
+ }
+
+ lb_m = load_balance_get (fib_entry->fe_lb.dpoi_index);
+
+ for (i = 0; i < lb_m->lb_n_buckets; i++)
+ {
+ dpo0 = load_balance_get_bucket_i (lb_m, i);
+
+ if (dpo0->dpoi_type == DPO_LOAD_BALANCE ||
+ dpo0->dpoi_type == DPO_ADJACENCY)
+ {
+ if (dpo0->dpoi_type == DPO_ADJACENCY)
+ {
+ k = 1;
+ }
+ else
+ {
+ lb_b = load_balance_get (dpo0->dpoi_index);
+ k = lb_b->lb_n_buckets;
+ }
+
+ for (j = 0; j < k; j++)
+ {
+ if (dpo0->dpoi_type == DPO_ADJACENCY)
+ {
+ dpo1 = dpo0;
+ }
+ else
+ {
+ dpo1 = load_balance_get_bucket_i (lb_b, j);
+ }
+
+ if (dpo1->dpoi_type == DPO_ADJACENCY)
+ {
+ adj_index0 = dpo1->dpoi_index;
+
+ if (ADJ_INDEX_INVALID == adj_index0)
+ {
+ continue;
+ }
+
+ adj0 =
+ ip_get_adjacency (&(ip4_main.lookup_main), adj_index0);
+ sw_if_index0 = adj0->rewrite_header.sw_if_index;
+
+ if (~0 == sw_if_index0)
+ {
+ continue;
+ }
+
+
+ if (is_add)
+ {
+ vnet_feature_enable_disable ("ip4-output",
+ "nsh-md2-ioam-encap-transit",
+ sw_if_index0, is_add, 0,
+ /* void *feature_config */
+ 0 /* u32 n_feature_config_bytes */
+ );
+
+ vec_validate_init_empty (hm->bool_ref_by_sw_if_index,
+ sw_if_index0, ~0);
+ hm->bool_ref_by_sw_if_index[sw_if_index0] = 1;
+ }
+ else
+ {
+ hm->bool_ref_by_sw_if_index[sw_if_index0] = ~0;
+ }
+ }
+ }
+ }
+ }
+#else
+
+u32 fib_path_get_resolving_interface (fib_node_index_t path_index);
+ vec_add1(intf_list, fib_path_get_resolving_interface(fei));
+ vec_foreach(sw_if_index0, intf_list)
+ if (is_add)
+ {
+ vnet_feature_enable_disable ("ip4-output",
+ "nsh-md2-ioam-encap-transit",
+ *sw_if_index0, is_add, 0,
+ /* void *feature_config */
+ 0 /* u32 n_feature_config_bytes */
+ );
+
+ vec_validate_init_empty (hm->bool_ref_by_sw_if_index,
+ *sw_if_index0, ~0);
+ hm->bool_ref_by_sw_if_index[*sw_if_index0] = 1;
+ }
+ else
+ {
+ hm->bool_ref_by_sw_if_index[*sw_if_index0] = ~0;
+ }
+
+#endif
+
+ if (is_ipv4)
+ {
+ uword *t = NULL;
+ nsh_md2_ioam_dest_tunnels_t *t1;
+ fib_prefix_t key4, *key4_copy;
+ hash_pair_t *hp;
+ memset (&key4, 0, sizeof (key4));
+ key4.fp_proto = FIB_PROTOCOL_IP4;
+ key4.fp_addr.ip4.as_u32 = fib_prefix.fp_addr.ip4.as_u32;
+ t = hash_get_mem (hm->dst_by_ip4, &key4);
+ if (is_add)
+ {
+ if (t)
+ {
+ return 0;
+ }
+ pool_get_aligned (hm->dst_tunnels, t1, CLIB_CACHE_LINE_BYTES);
+ memset (t1, 0, sizeof (*t1));
+ t1->fp_proto = FIB_PROTOCOL_IP4;
+ t1->dst_addr.ip4.as_u32 = fib_prefix.fp_addr.ip4.as_u32;
+ key4_copy = clib_mem_alloc (sizeof (*key4_copy));
+ memset(key4_copy, 0, sizeof(*key4_copy));
+ clib_memcpy (key4_copy, &key4, sizeof (*key4_copy));
+ hash_set_mem (hm->dst_by_ip4, key4_copy, t1 - hm->dst_tunnels);
+ /*
+ * Attach to the FIB entry for the VxLAN-GPE destination
+ * and become its child. The dest route will invoke a callback
+ * when the fib entry changes, it can be used to
+ * re-program the output feature on the egress interface.
+ */
+
+ const fib_prefix_t tun_dst_pfx = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {.ip4 = t1->dst_addr.ip4,}
+ };
+
+ t1->fib_entry_index =
+ fib_table_entry_special_add (outer_fib_index,
+ &tun_dst_pfx,
+ FIB_SOURCE_RR,
+ FIB_ENTRY_FLAG_NONE);
+ t1->sibling_index =
+ fib_entry_child_add (t1->fib_entry_index,
+ hm->fib_entry_type, t1 - hm->dst_tunnels);
+ t1->outer_fib_index = outer_fib_index;
+
+ }
+ else
+ {
+ if (!t)
+ {
+ return 0;
+ }
+ t1 = pool_elt_at_index (hm->dst_tunnels, t[0]);
+ hp = hash_get_pair (hm->dst_by_ip4, &key4);
+ key4_copy = (void *) (hp->key);
+ hash_unset_mem (hm->dst_by_ip4, &key4);
+ clib_mem_free (key4_copy);
+ pool_put (hm->dst_tunnels, t1);
+ }
+ }
+ else
+ {
+ // TBD for IPv6
+ }
+
+ return 0;
+}
+
+void
+nsh_md2_ioam_refresh_output_feature_on_all_dest (void)
+{
+ nsh_md2_ioam_main_t *hm = &nsh_md2_ioam_main;
+ nsh_main_t *gm = &nsh_main;
+ nsh_md2_ioam_dest_tunnels_t *t;
+ u32 i;
+
+ if (pool_elts (hm->dst_tunnels) == 0)
+ return;
+
+ nsh_md2_ioam_clear_output_feature_on_all_intfs (gm->vlib_main);
+ i = vec_len (hm->bool_ref_by_sw_if_index);
+ vec_free (hm->bool_ref_by_sw_if_index);
+ vec_validate_init_empty (hm->bool_ref_by_sw_if_index, i, ~0);
+ pool_foreach (t, hm->dst_tunnels, (
+ {
+ nsh_md2_ioam_enable_disable_for_dest
+ (gm->vlib_main,
+ t->dst_addr,
+ t->outer_fib_index,
+ (t->fp_proto == FIB_PROTOCOL_IP4), 1
+ /* is_add */
+ );
+ }
+ ));
+ return;
+}
+
+void
+nsh_md2_ioam_clear_output_feature_on_select_intfs (void)
+{
+ nsh_md2_ioam_main_t *hm = &nsh_md2_ioam_main;
+ nsh_main_t *gm = &nsh_main;
+
+ u32 sw_if_index0 = 0;
+ for (sw_if_index0 = 0;
+ sw_if_index0 < vec_len (hm->bool_ref_by_sw_if_index); sw_if_index0++)
+ {
+ if (hm->bool_ref_by_sw_if_index[sw_if_index0] == 0xFF)
+ {
+ nsh_md2_ioam_set_clear_output_feature_on_intf
+ (gm->vlib_main, sw_if_index0, 0);
+ }
+ }
+
+ return;
+}
+
+
+
+
+clib_error_t *
+nsh_md2_ioam_enable_disable (int has_trace_option, int has_pot_option,
+ int has_ppc_option)
+{
+ nsh_md2_ioam_main_t *hm = &nsh_md2_ioam_main;
+
+ hm->has_trace_option = has_trace_option;
+ hm->has_pot_option = has_pot_option;
+ hm->has_ppc_option = has_ppc_option;
+
+ if (hm->has_trace_option)
+ {
+ nsh_md2_ioam_trace_profile_setup ();
+ }
+ else if (!hm->has_trace_option)
+ {
+ nsh_md2_ioam_trace_profile_cleanup ();
+ }
+
+ return 0;
+}
+
+
+int nsh_md2_ioam_disable_for_dest
+ (vlib_main_t * vm, ip46_address_t dst_addr, u32 outer_fib_index,
+ u8 ipv4_set)
+{
+ nsh_md2_ioam_dest_tunnels_t *t;
+ nsh_md2_ioam_main_t *hm = &nsh_md2_ioam_main;
+ nsh_main_t *gm = &nsh_main;
+
+ nsh_md2_ioam_enable_disable_for_dest (gm->vlib_main,
+ dst_addr, outer_fib_index,
+ ipv4_set, 0);
+ if (pool_elts (hm->dst_tunnels) == 0)
+ {
+ nsh_md2_ioam_clear_output_feature_on_select_intfs ();
+ return 0;
+ }
+
+ pool_foreach (t, hm->dst_tunnels, (
+ {
+ nsh_md2_ioam_enable_disable_for_dest
+ (gm->vlib_main,
+ t->dst_addr,
+ t->outer_fib_index,
+ (t->fp_proto ==
+ FIB_PROTOCOL_IP4), 1 /* is_add */ );
+ }
+ ));
+ nsh_md2_ioam_clear_output_feature_on_select_intfs ();
+ return (0);
+
+}
+
+static clib_error_t *nsh_md2_ioam_set_transit_rewrite_command_fn
+ (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ nsh_main_t *gm = &nsh_main;
+ ip46_address_t dst_addr;
+ u8 dst_addr_set = 0;
+ u8 ipv4_set = 0;
+ u8 ipv6_set = 0;
+ u8 disable = 0;
+ clib_error_t *rv = 0;
+ u32 outer_fib_index = 0;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "dst-ip %U", unformat_ip4_address, &dst_addr.ip4))
+ {
+ dst_addr_set = 1;
+ ipv4_set = 1;
+ }
+ else
+ if (unformat
+ (input, "dst-ip %U", unformat_ip6_address, &dst_addr.ip6))
+ {
+ dst_addr_set = 1;
+ ipv6_set = 1;
+ }
+ else if (unformat (input, "outer-fib-index %d", &outer_fib_index))
+ {
+ }
+
+ else if (unformat (input, "disable"))
+ disable = 1;
+ else
+ break;
+ }
+
+ if (dst_addr_set == 0)
+ return clib_error_return (0,
+ "LISP-GPE Tunnel destination address not specified");
+ if (ipv4_set && ipv6_set)
+ return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
+ if (!disable)
+ {
+ nsh_md2_ioam_enable_disable_for_dest (gm->vlib_main,
+ dst_addr, outer_fib_index,
+ ipv4_set, 1);
+ }
+ else
+ {
+ nsh_md2_ioam_disable_for_dest
+ (vm, dst_addr, outer_fib_index, ipv4_set);
+ }
+ return rv;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (nsh_md2_ioam_set_transit_rewrite_cmd, static) = {
+ .path = "set nsh-md2-ioam-transit",
+ .short_help = "set nsh-ioam-lisp-gpe-transit dst-ip <dst_ip> [outer-fib-index <outer_fib_index>] [disable]",
+ .function = nsh_md2_ioam_set_transit_rewrite_command_fn,
+};
+
+/**
+ * Function definition to backwalk a FIB node
+ */
+static fib_node_back_walk_rc_t
+nsh_md2_ioam_back_walk (fib_node_t * node, fib_node_back_walk_ctx_t * ctx)
+{
+ nsh_md2_ioam_refresh_output_feature_on_all_dest ();
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+/**
+ * Function definition to get a FIB node from its index
+ */
+static fib_node_t *
+nsh_md2_ioam_fib_node_get (fib_node_index_t index)
+{
+ nsh_md2_ioam_main_t *hm = &nsh_md2_ioam_main;
+ return (&hm->node);
+}
+
+/**
+ * Function definition to inform the FIB node that its last lock has gone.
+ */
+static void
+nsh_md2_ioam_last_lock_gone (fib_node_t * node)
+{
+ ASSERT (0);
+}
+
+
+/*
+ * Virtual function table registered by MPLS GRE tunnels
+ * for participation in the FIB object graph.
+ */
+const static fib_node_vft_t nsh_md2_ioam_vft = {
+ .fnv_get = nsh_md2_ioam_fib_node_get,
+ .fnv_last_lock = nsh_md2_ioam_last_lock_gone,
+ .fnv_back_walk = nsh_md2_ioam_back_walk,
+};
+
+void
+nsh_md2_ioam_interface_init (void)
+{
+ nsh_md2_ioam_main_t *hm = &nsh_md2_ioam_main;
+ hm->fib_entry_type = fib_node_register_new_type (&nsh_md2_ioam_vft);
+ return;
+}
+
diff --git a/src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam.h b/src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam.h
new file mode 100644
index 00000000000..3d48fde6ecd
--- /dev/null
+++ b/src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_nsh_md2_ioam_h__
+#define __included_nsh_md2_ioam_h__
+
+#include <nsh/nsh.h>
+#include <nsh/nsh_packet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+
+
+typedef struct nsh_md2_ioam_sw_interface_
+{
+ u32 sw_if_index;
+} nsh_md2_ioam_sw_interface_t;
+
+typedef struct nsh_md2_ioam_dest_tunnels_
+{
+ ip46_address_t dst_addr;
+ u32 fp_proto;
+ u32 sibling_index;
+ fib_node_index_t fib_entry_index;
+ u32 outer_fib_index;
+} nsh_md2_ioam_dest_tunnels_t;
+
+typedef struct nsh_md2_ioam_main_
+{
+ /**
+ * Linkage into the FIB object graph
+ */
+ fib_node_t node;
+
+ /* time scale transform. Joy. */
+ u32 unix_time_0;
+ f64 vlib_time_0;
+
+
+ /* Trace option */
+ u8 has_trace_option;
+
+ /* Pot option */
+ u8 has_pot_option;
+
+#define PPC_NONE 0
+#define PPC_ENCAP 1
+#define PPC_DECAP 2
+ u8 has_ppc_option;
+
+#define TSP_SECONDS 0
+#define TSP_MILLISECONDS 1
+#define TSP_MICROSECONDS 2
+#define TSP_NANOSECONDS 3
+
+
+ /* API message ID base */
+ u16 msg_id_base;
+
+ /* Override to export for iOAM */
+ uword decap_v4_next_override;
+ uword decap_v6_next_override;
+
+ /* sequence of node graph for encap */
+ uword encap_v4_next_node;
+ uword encap_v6_next_node;
+
+ /* Software interfaces. */
+ nsh_md2_ioam_sw_interface_t *sw_interfaces;
+
+ /* hash ip4/ip6 -> list of destinations for doing transit iOAM operation */
+ nsh_md2_ioam_dest_tunnels_t *dst_tunnels;
+ uword *dst_by_ip4;
+ uword *dst_by_ip6;
+
+ /** per sw_if_index, to maintain bitmap */
+ u8 *bool_ref_by_sw_if_index;
+ fib_node_type_t fib_entry_type;
+
+
+} nsh_md2_ioam_main_t;
+extern nsh_md2_ioam_main_t nsh_md2_ioam_main;
+
+/*
+ * Primary h-b-h handler trace support
+ */
+typedef struct
+{
+ u32 next_index;
+ u32 trace_len;
+ u8 option_data[256];
+} ioam_trace_t;
+
+
+clib_error_t *nsh_md2_ioam_enable_disable (int has_trace_option,
+ int has_pot_option,
+ int has_ppc_option);
+
+
+
+int nsh_md2_ioam_trace_profile_setup (void);
+
+int nsh_md2_ioam_trace_profile_cleanup (void);
+extern void nsh_md2_ioam_interface_init (void);
+
+
+
+#endif
diff --git a/src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam_api.c b/src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam_api.c
new file mode 100644
index 00000000000..9ed835bd98f
--- /dev/null
+++ b/src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam_api.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ *-----------------------------------------------------------------------
+ * nsh_md2_ioam_api.c - iOAM for NSH/LISP-GPE related APIs to create
+ * and maintain profiles
+ *-----------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlib/unix/plugin.h>
+#include <vnet/plugin/plugin.h>
+#include <nsh/nsh-md2-ioam/nsh_md2_ioam.h>
+
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+/* define message structures */
+#define vl_typedefs
+#include <nsh/nsh.api.h>
+#undef vl_typedefs
+
+/* define generated endian-swappers */
+#define vl_endianfun
+#include <nsh/nsh.api.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <nsh/nsh.api.h>
+#undef vl_printfun
+
+u8 *nsh_trace_main = NULL;
+static clib_error_t *
+nsh_md2_ioam_init (vlib_main_t * vm)
+{
+ nsh_md2_ioam_main_t *sm = &nsh_md2_ioam_main;
+ clib_error_t *error = 0;
+
+ nsh_trace_main =
+ (u8 *) vlib_get_plugin_symbol ("ioam_plugin.so", "trace_main");
+
+ if (!nsh_trace_main)
+ return error;
+
+ vec_new (nsh_md2_ioam_sw_interface_t, pool_elts (sm->sw_interfaces));
+ sm->dst_by_ip4 = hash_create_mem (0, sizeof (fib_prefix_t), sizeof (uword));
+
+ sm->dst_by_ip6 = hash_create_mem (0, sizeof (fib_prefix_t), sizeof (uword));
+
+ nsh_md2_ioam_interface_init ();
+
+ return error;
+}
+
+VLIB_INIT_FUNCTION (nsh_md2_ioam_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam_trace.c b/src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam_trace.c
new file mode 100644
index 00000000000..1baa2365b15
--- /dev/null
+++ b/src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam_trace.c
@@ -0,0 +1,464 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vppinfra/error.h>
+
+#include <vppinfra/hash.h>
+#include <vppinfra/error.h>
+#include <vppinfra/elog.h>
+
+#include <ioam/lib-trace/trace_util.h>
+#include <nsh/nsh-md2-ioam/nsh_md2_ioam.h>
+#include <nsh/nsh_packet.h>
+
+/* Timestamp precision multipliers for seconds, milliseconds, microseconds
+ * and nanoseconds respectively.
+ */
+static f64 trace_tsp_mul[4] = { 1, 1e3, 1e6, 1e9 };
+
+#define NSH_MD2_IOAM_TRACE_SIZE_DUMMY 20
+
+typedef union
+{
+ u64 as_u64;
+ u32 as_u32[2];
+} time_u64_t;
+
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED(struct {
+ u16 class;
+ u8 type;
+ u8 length;
+ u8 data_list_elts_left;
+ u16 ioam_trace_type;
+ u8 reserve;
+ u32 elts[0]; /* Variable type. So keep it generic */
+}) nsh_md2_ioam_trace_option_t;
+/* *INDENT-ON* */
+
+
+#define foreach_nsh_md2_ioam_trace_stats \
+ _(SUCCESS, "Pkts updated with TRACE records") \
+ _(FAILED, "Errors in TRACE due to lack of TRACE records")
+
+static char *nsh_md2_ioam_trace_stats_strings[] = {
+#define _(sym,string) string,
+ foreach_nsh_md2_ioam_trace_stats
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) NSH_MD2_IOAM_TRACE_##sym,
+ foreach_nsh_md2_ioam_trace_stats
+#undef _
+ NSH_MD2_IOAM_TRACE_N_STATS,
+} nsh_md2_ioam_trace_stats_t;
+
+
+typedef struct
+{
+ /* stats */
+ u64 counters[ARRAY_LEN (nsh_md2_ioam_trace_stats_strings)];
+
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} nsh_md2_ioam_trace_main_t;
+
+nsh_md2_ioam_trace_main_t nsh_md2_ioam_trace_main;
+
+/*
+ * Find a trace profile
+ */
+
+extern u8 *nsh_trace_main;
+always_inline trace_profile *
+nsh_trace_profile_find (void)
+{
+ trace_main_t *sm = (trace_main_t *) nsh_trace_main;
+
+ return (&(sm->profile));
+}
+
+
+always_inline void
+nsh_md2_ioam_trace_stats_increment_counter (u32 counter_index, u64 increment)
+{
+ nsh_md2_ioam_trace_main_t *hm = &nsh_md2_ioam_trace_main;
+
+ hm->counters[counter_index] += increment;
+}
+
+
+static u8 *
+format_ioam_data_list_element (u8 * s, va_list * args)
+{
+ u32 *elt = va_arg (*args, u32 *);
+ u8 *trace_type_p = va_arg (*args, u8 *);
+ u8 trace_type = *trace_type_p;
+
+
+ if (trace_type & BIT_TTL_NODEID)
+ {
+ u32 ttl_node_id_host_byte_order = clib_net_to_host_u32 (*elt);
+ s = format (s, "ttl 0x%x node id 0x%x ",
+ ttl_node_id_host_byte_order >> 24,
+ ttl_node_id_host_byte_order & 0x00FFFFFF);
+
+ elt++;
+ }
+
+ if (trace_type & BIT_ING_INTERFACE && trace_type & BIT_ING_INTERFACE)
+ {
+ u32 ingress_host_byte_order = clib_net_to_host_u32 (*elt);
+ s = format (s, "ingress 0x%x egress 0x%x ",
+ ingress_host_byte_order >> 16,
+ ingress_host_byte_order & 0xFFFF);
+ elt++;
+ }
+
+ if (trace_type & BIT_TIMESTAMP)
+ {
+ u32 ts_in_host_byte_order = clib_net_to_host_u32 (*elt);
+ s = format (s, "ts 0x%x \n", ts_in_host_byte_order);
+ elt++;
+ }
+
+ if (trace_type & BIT_APPDATA)
+ {
+ u32 appdata_in_host_byte_order = clib_net_to_host_u32 (*elt);
+ s = format (s, "app 0x%x ", appdata_in_host_byte_order);
+ elt++;
+ }
+
+ return s;
+}
+
+
+
+int
+nsh_md2_ioam_trace_rewrite_handler (u8 * rewrite_string, u8 * rewrite_size)
+{
+ nsh_md2_ioam_trace_option_t *trace_option = NULL;
+ u8 trace_data_size = 0;
+ u8 trace_option_elts = 0;
+ trace_profile *profile = NULL;
+
+ profile = nsh_trace_profile_find ();
+
+ if (PREDICT_FALSE (!profile))
+ {
+ return (-1);
+ }
+
+ if (PREDICT_FALSE (!rewrite_string))
+ return -1;
+
+ trace_option_elts = profile->num_elts;
+ trace_data_size = fetch_trace_data_size (profile->trace_type);
+
+ trace_option = (nsh_md2_ioam_trace_option_t *) rewrite_string;
+ trace_option->class = clib_host_to_net_u16 (0x9);
+ trace_option->type = NSH_MD2_IOAM_OPTION_TYPE_TRACE;
+ trace_option->length = (trace_option_elts * trace_data_size) + 4;
+ trace_option->data_list_elts_left = trace_option_elts;
+ trace_option->ioam_trace_type =
+ clib_host_to_net_u16 (profile->trace_type & TRACE_TYPE_MASK);
+
+ *rewrite_size =
+ sizeof (nsh_md2_ioam_trace_option_t) +
+ (trace_option_elts * trace_data_size);
+
+ return 0;
+}
+
+
+int
+nsh_md2_ioam_trace_data_list_handler (vlib_buffer_t * b,
+ nsh_tlv_header_t * opt)
+{
+ u8 elt_index = 0;
+ nsh_md2_ioam_trace_option_t *trace =
+ (nsh_md2_ioam_trace_option_t *) ((u8 *) opt);
+ time_u64_t time_u64;
+ u32 *elt;
+ int rv = 0;
+ trace_profile *profile = NULL;
+ nsh_md2_ioam_main_t *hm = &nsh_md2_ioam_main;
+ nsh_main_t *gm = &nsh_main;
+ u16 ioam_trace_type = 0;
+
+ profile = nsh_trace_profile_find ();
+
+ if (PREDICT_FALSE (!profile))
+ {
+ return (-1);
+ }
+
+
+ ioam_trace_type = profile->trace_type & TRACE_TYPE_MASK;
+ time_u64.as_u64 = 0;
+
+ if (PREDICT_TRUE (trace->data_list_elts_left))
+ {
+ trace->data_list_elts_left--;
+ /* fetch_trace_data_size returns in bytes. Convert it to 4-bytes
+ * to skip to this node's location.
+ */
+ elt_index =
+ trace->data_list_elts_left *
+ fetch_trace_data_size (ioam_trace_type) / 4;
+ elt = &trace->elts[elt_index];
+ if (ioam_trace_type & BIT_TTL_NODEID)
+ {
+ ip4_header_t *ip0 = vlib_buffer_get_current (b);
+ *elt = clib_host_to_net_u32 (((ip0->ttl - 1) << 24) |
+ profile->node_id);
+ elt++;
+ }
+
+ if (ioam_trace_type & BIT_ING_INTERFACE)
+ {
+ u16 tx_if = vnet_buffer (b)->sw_if_index[VLIB_TX];
+
+ *elt =
+ (vnet_buffer (b)->sw_if_index[VLIB_RX] & 0xFFFF) << 16 | tx_if;
+ *elt = clib_host_to_net_u32 (*elt);
+ elt++;
+ }
+
+
+ if (ioam_trace_type & BIT_TIMESTAMP)
+ {
+ /* Send least significant 32 bits */
+ f64 time_f64 =
+ (f64) (((f64) hm->unix_time_0) +
+ (vlib_time_now (gm->vlib_main) - hm->vlib_time_0));
+
+ time_u64.as_u64 = time_f64 * trace_tsp_mul[profile->trace_tsp];
+ *elt = clib_host_to_net_u32 (time_u64.as_u32[0]);
+ elt++;
+ }
+
+ if (ioam_trace_type & BIT_APPDATA)
+ {
+ /* $$$ set elt0->app_data */
+ *elt = clib_host_to_net_u32 (profile->app_data);
+ elt++;
+ }
+ nsh_md2_ioam_trace_stats_increment_counter
+ (NSH_MD2_IOAM_TRACE_SUCCESS, 1);
+ }
+ else
+ {
+ nsh_md2_ioam_trace_stats_increment_counter
+ (NSH_MD2_IOAM_TRACE_FAILED, 1);
+ }
+ return (rv);
+}
+
+
+
+u8 *
+nsh_md2_ioam_trace_data_list_trace_handler (u8 * s, nsh_tlv_header_t * opt)
+{
+ nsh_md2_ioam_trace_option_t *trace;
+ u8 trace_data_size_in_words = 0;
+ u32 *elt;
+ int elt_index = 0;
+ u16 ioam_trace_type = 0;
+
+ trace = (nsh_md2_ioam_trace_option_t *) ((u8 *) opt);
+ ioam_trace_type = clib_net_to_host_u16 (trace->ioam_trace_type);
+ trace_data_size_in_words = fetch_trace_data_size (ioam_trace_type) / 4;
+ elt = &trace->elts[0];
+ s =
+ format (s, " Trace Type 0x%x , %d elts left\n", ioam_trace_type,
+ trace->data_list_elts_left);
+ while ((u8 *) elt < ((u8 *) (&trace->elts[0]) + trace->length - 4
+ /* -2 accounts for ioam_trace_type,elts_left */ ))
+ {
+ s = format (s, " [%d] %U\n", elt_index,
+ format_ioam_data_list_element, elt, &ioam_trace_type);
+ elt_index++;
+ elt += trace_data_size_in_words;
+ }
+ return (s);
+}
+
+int
+nsh_md2_ioam_trace_swap_handler (vlib_buffer_t * b,
+ nsh_tlv_header_t * old_opt,
+ nsh_tlv_header_t * new_opt)
+{
+
+ clib_memcpy (new_opt, old_opt, new_opt->length + sizeof (nsh_tlv_header_t));
+ return nsh_md2_ioam_trace_data_list_handler (b, new_opt);
+}
+
+static clib_error_t *
+nsh_md2_ioam_show_ioam_trace_cmd_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ nsh_md2_ioam_trace_main_t *hm = &nsh_md2_ioam_trace_main;
+ u8 *s = 0;
+ int i = 0;
+
+ for (i = 0; i < NSH_MD2_IOAM_TRACE_N_STATS; i++)
+ {
+ s = format (s, " %s - %lu\n", nsh_md2_ioam_trace_stats_strings[i],
+ hm->counters[i]);
+ }
+
+ vlib_cli_output (vm, "%v", s);
+ vec_free (s);
+ return 0;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (nsh_md2_ioam_show_ioam_trace_cmd, static) = {
+ .path = "show ioam nsh-lisp-gpe trace",
+ .short_help = "iOAM trace statistics",
+ .function = nsh_md2_ioam_show_ioam_trace_cmd_fn,
+};
+/* *INDENT-ON* */
+
+
+int
+nsh_md2_ioam_trace_pop_handler (vlib_buffer_t * b, nsh_tlv_header_t * opt)
+{
+ return nsh_md2_ioam_trace_data_list_handler (b, opt);
+}
+
+static clib_error_t *
+nsh_md2_ioam_trace_init (vlib_main_t * vm)
+{
+ nsh_md2_ioam_trace_main_t *hm = &nsh_md2_ioam_trace_main;
+ nsh_md2_ioam_main_t *gm = &nsh_md2_ioam_main;
+ clib_error_t *error;
+
+ if ((error = vlib_call_init_function (vm, nsh_init)))
+ return (error);
+
+ if ((error = vlib_call_init_function (vm, nsh_md2_ioam_init)))
+ return (error);
+
+ hm->vlib_main = vm;
+ hm->vnet_main = vnet_get_main ();
+ gm->unix_time_0 = (u32) time (0); /* Store starting time */
+ gm->vlib_time_0 = vlib_time_now (vm);
+
+ memset (hm->counters, 0, sizeof (hm->counters));
+
+ if (nsh_md2_register_option
+ (clib_host_to_net_u16 (0x9),
+ NSH_MD2_IOAM_OPTION_TYPE_TRACE,
+ NSH_MD2_IOAM_TRACE_SIZE_DUMMY,
+ nsh_md2_ioam_trace_rewrite_handler,
+ nsh_md2_ioam_trace_data_list_handler,
+ nsh_md2_ioam_trace_swap_handler,
+ nsh_md2_ioam_trace_pop_handler,
+ nsh_md2_ioam_trace_data_list_trace_handler) < 0)
+ return (clib_error_create
+ ("registration of NSH_MD2_IOAM_OPTION_TYPE_TRACE failed"));
+
+ return (0);
+}
+
+VLIB_INIT_FUNCTION (nsh_md2_ioam_trace_init);
+
+int
+nsh_md2_ioam_trace_profile_cleanup (void)
+{
+ nsh_main_t *hm = &nsh_main;
+
+ hm->options_size[NSH_MD2_IOAM_OPTION_TYPE_TRACE] = 0;
+
+ return 0;
+
+}
+
+static int
+nsh_md2_ioam_trace_get_sizeof_handler (u32 * result)
+{
+ u16 size = 0;
+ u8 trace_data_size = 0;
+ trace_profile *profile = NULL;
+
+ *result = 0;
+
+ profile = nsh_trace_profile_find ();
+
+ if (PREDICT_FALSE (!profile))
+ {
+ return (-1);
+ }
+
+ trace_data_size = fetch_trace_data_size (profile->trace_type);
+ if (PREDICT_FALSE (trace_data_size == 0))
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ if (PREDICT_FALSE (profile->num_elts * trace_data_size > 254))
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ size +=
+ sizeof (nsh_md2_ioam_trace_option_t) +
+ profile->num_elts * trace_data_size;
+ *result = size;
+
+ return 0;
+}
+
+
+int
+nsh_md2_ioam_trace_profile_setup (void)
+{
+ u32 trace_size = 0;
+ nsh_main_t *hm = &nsh_main;
+
+ trace_profile *profile = NULL;
+
+
+ profile = nsh_trace_profile_find ();
+
+ if (PREDICT_FALSE (!profile))
+ {
+ return (-1);
+ }
+
+
+ if (nsh_md2_ioam_trace_get_sizeof_handler (&trace_size) < 0)
+ return (-1);
+
+ hm->options_size[NSH_MD2_IOAM_OPTION_TYPE_TRACE] = trace_size;
+
+ return (0);
+}
+
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam_util.h b/src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam_util.h
new file mode 100644
index 00000000000..8fbb3b7aaf7
--- /dev/null
+++ b/src/plugins/nsh/nsh-md2-ioam/nsh_md2_ioam_util.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_nsh_md2_ioam_util_h__
+#define __included_nsh_md2_ioam_util_h__
+
+#include <vnet/lisp-gpe/lisp_gpe.h>
+#include <vnet/lisp-gpe/lisp_gpe_packet.h>
+#include <vnet/ip/ip.h>
+#include <nsh/nsh.h>
+#include <nsh/nsh-md2-ioam/nsh_md2_ioam.h>
+#include <nsh/nsh_packet.h>
+
+
+extern nsh_option_map_t *nsh_md2_lookup_option (u16 class, u8 type);
+
+
+typedef struct
+{
+ u8 trace_data[256];
+} nsh_transit_trace_t;
+
+always_inline void
+nsh_md2_ioam_encap_decap_ioam_v4_one_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_buffer_t * b0,
+ u32 * next0, u32 drop_node_val,
+ u8 use_adj)
+{
+ ip4_header_t *ip0;
+ udp_header_t *udp_hdr0;
+ lisp_gpe_header_t *lisp_gpe_hdr0;
+ nsh_base_header_t *nsh_hdr;
+ nsh_tlv_header_t *opt0;
+ nsh_tlv_header_t *limit0;
+ nsh_main_t *hm = &nsh_main;
+ nsh_option_map_t *nsh_option;
+
+ /* Populate the iOAM header */
+ ip0 = vlib_buffer_get_current (b0);
+ udp_hdr0 = (udp_header_t *) (ip0 + 1);
+ lisp_gpe_hdr0 = (lisp_gpe_header_t *) (udp_hdr0 + 1);
+ nsh_hdr = (nsh_base_header_t *) (lisp_gpe_hdr0 + 1);
+ opt0 = (nsh_tlv_header_t *) (nsh_hdr + 1);
+ limit0 =
+ (nsh_tlv_header_t *) ((u8 *) opt0 + (nsh_hdr->length * 4) -
+ sizeof (nsh_base_header_t));
+
+ /*
+ * Basic validity checks
+ */
+ if ((nsh_hdr->length * 4) > clib_net_to_host_u16 (ip0->length))
+ {
+ *next0 = drop_node_val;
+ return;
+ }
+
+ if (nsh_hdr->md_type != 2)
+ {
+ *next0 = drop_node_val;
+ return;
+ }
+
+ /* Scan the set of h-b-h options, process ones that we understand */
+ while (opt0 < limit0)
+ {
+ u8 type0;
+ type0 = opt0->type;
+ switch (type0)
+ {
+ case 0: /* Pad1 */
+ opt0 = (nsh_tlv_header_t *) ((u8 *) opt0) + 1;
+ continue;
+ case 1: /* PadN */
+ break;
+ default:
+ nsh_option = nsh_md2_lookup_option (opt0->class, opt0->type);
+ if ((nsh_option != NULL) && (hm->options[nsh_option->option_id]))
+ {
+ if ((*hm->options[nsh_option->option_id]) (b0, opt0) < 0)
+ {
+ *next0 = drop_node_val;
+ return;
+ }
+ }
+ break;
+ }
+ opt0 =
+ (nsh_tlv_header_t *) (((u8 *) opt0) + opt0->length +
+ sizeof (nsh_tlv_header_t));
+ }
+
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ nsh_transit_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
+ clib_memcpy (&(tr->trace_data), nsh_hdr, (nsh_hdr->length * 4));
+ }
+ return;
+}
+
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/nsh/nsh.api b/src/plugins/nsh/nsh.api
new file mode 100644
index 00000000000..6beed00d4e1
--- /dev/null
+++ b/src/plugins/nsh/nsh.api
@@ -0,0 +1,123 @@
+
+
+/** \brief /** \brief Set or delete an NSH header entry keyed by NSP/NSI
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero, else delete
+ @param nsp_nsi - Key for nsh_header_t entry to map to. : 24bit NSP 8bit NSI
+ @param md_type - metadata type [1|2] - only MDType1 supported today
+ @param ver_o_c - version, O-bit and C-bit (see nsh_packet.h)
+ @param ttl - indicates the maximum SFF hops for an SFP
+ @param length - header length in n x 32bits, should be 6 for MDtype1
+ @param next_protocol - next protocol encapsulated behind NSH header: 1=Ethernet, 2=IP4, 3=IP6
+ @param c1 - 32bit Metadata type1 field (context1)
+ @param c2 - 32bit Metadata type1 field (context2)
+ @param c3 - 32bit Metadata type1 field (context3)
+ @param c4 - 32bit Metadata type1 field (context4)
+ @param tlvs - Metadata Type 2 only, Type Length Value metadata.
+*/
+define nsh_add_del_entry {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u32 nsp_nsi;
+ u8 md_type;
+ u8 ver_o_c;
+ u8 ttl;
+ u8 length;
+ u8 next_protocol;
+ u32 c1;
+ u32 c2;
+ u32 c3;
+ u32 c4;
+ u8 tlv_length;
+ u8 tlv[248];
+};
+
+/** \brief Reply from adding NSH entry (nsh_add_del_entry)
+ @param context - sender context, to match reply w/ request
+ @param retval - 0 means all ok
+*/
+define nsh_add_del_entry_reply {
+ u32 context;
+ i32 retval;
+ u32 entry_index;
+};
+
+
+define nsh_entry_dump {
+ u32 client_index;
+ u32 context;
+ u32 entry_index;
+};
+
+define nsh_entry_details {
+ u32 context;
+ u32 entry_index;
+ u32 nsp_nsi;
+ u8 md_type;
+ u8 ver_o_c;
+ u8 ttl;
+ u8 length;
+ u8 next_protocol;
+ u32 c1;
+ u32 c2;
+ u32 c3;
+ u32 c4;
+ u8 tlv_length;
+ u8 tlv[248];
+};
+
+/** \brief Set or delete a mapping from one NSH header to another and its egress (decap to inner packet, encap NSH with outer header)
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero, else delete
+ @param nsh_nsi - Key for nsh_header_t entry to map to. : 24bit NSP 8bit NSI
+ @param mapped_nsp_nsi - Key for nsh_header_t entry to map to. : 24bit NSP 8bit NSI this may be ~0
+ if next action is to decap to NSH next protocol
+ Note the following heuristic:
+ - if nsp_nsi == mapped_nsp_nsi then use-case is like SFC SFF
+ - if nsp_nsi != mapped_nsp_nsi then use-case is like SFC SF
+ Note: these are heuristics. Rules about NSI decrement are out of scope
+ @param sw_if_index - index number of outer encap for NSH egress
+ @param next_node - explicitly which node to send to
+ Note the above parameters are instantiated by "encap-gre-intf <x>" means sw_if_index x, next_node gre-input
+*/
+define nsh_add_del_map {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u32 nsp_nsi;
+ u32 mapped_nsp_nsi;
+ u32 nsh_action;
+ u32 sw_if_index;
+ u32 rx_sw_if_index;
+ u32 next_node;
+};
+
+/** \brief Reply from adding NSH map (nsh_add_del_map)
+ @param context - sender context, to match reply w/ request
+ @param retval - 0 means all ok
+*/
+define nsh_add_del_map_reply {
+ u32 context;
+ i32 retval;
+ u32 map_index;
+};
+
+define nsh_map_dump {
+ u32 client_index;
+ u32 context;
+ u32 map_index;
+};
+
+define nsh_map_details {
+ u32 context;
+ u32 map_index;
+ u32 nsp_nsi;
+ u32 mapped_nsp_nsi;
+ u32 nsh_action;
+ u32 sw_if_index;
+ u32 rx_sw_if_index;
+ u32 next_node;
+};
diff --git a/src/plugins/nsh/nsh.c b/src/plugins/nsh/nsh.c
new file mode 100644
index 00000000000..20e4603b449
--- /dev/null
+++ b/src/plugins/nsh/nsh.c
@@ -0,0 +1,2545 @@
+/*
+ * nsh.c - nsh mapping
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/plugin/plugin.h>
+#include <nsh/nsh.h>
+#include <vnet/gre/gre.h>
+#include <vnet/vxlan/vxlan.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/l2/l2_classify.h>
+#include <vnet/adj/adj.h>
+
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vpp/app/version.h>
+
+/* define message IDs */
+#define vl_msg_id(n,h) n,
+typedef enum
+{
+#include <nsh/nsh.api.h>
+ /* We'll want to know how many messages IDs we need... */
+ VL_MSG_FIRST_AVAILABLE,
+} vl_msg_id_t;
+#undef vl_msg_id
+
+/* define message structures */
+#define vl_typedefs
+#include <nsh/nsh.api.h>
+#undef vl_typedefs
+
+/* define generated endian-swappers */
+#define vl_endianfun
+#include <nsh/nsh.api.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <nsh/nsh.api.h>
+#undef vl_printfun
+
+/* Get the API version number */
+#define vl_api_version(n,v) static u32 api_version=(v);
+#include <nsh/nsh.api.h>
+#undef vl_api_version
+
+#define vl_msg_name_crc_list
+#include <nsh/nsh.api.h>
+#undef vl_msg_name_crc_list
+
+/*
+ * A handy macro to set up a message reply.
+ * Assumes that the following variables are available:
+ * mp - pointer to request message
+ * rmp - pointer to reply message type
+ * rv - return value
+ */
+
+#define REPLY_MACRO(t) \
+ do { \
+ unix_shared_memory_queue_t * q = \
+ vl_api_client_index_to_input_queue (mp->client_index); \
+ if (!q) \
+ return; \
+ \
+ rmp = vl_msg_api_alloc (sizeof (*rmp)); \
+ rmp->_vl_msg_id = ntohs((t)+nm->msg_id_base); \
+ rmp->context = mp->context; \
+ rmp->retval = ntohl(rv); \
+ \
+ vl_msg_api_send_shmem (q, (u8 *)&rmp); \
+ } while(0);
+
+#define REPLY_MACRO2(t, body) \
+ do { \
+ unix_shared_memory_queue_t * q; \
+ rv = vl_msg_api_pd_handler (mp, rv); \
+ q = vl_api_client_index_to_input_queue (mp->client_index); \
+ if (!q) \
+ return; \
+ \
+ rmp = vl_msg_api_alloc (sizeof (*rmp)); \
+ rmp->_vl_msg_id = ntohs((t)+nm->msg_id_base); \
+ rmp->context = mp->context; \
+ rmp->retval = ntohl(rv); \
+ do {body;} while (0); \
+ vl_msg_api_send_shmem (q, (u8 *)&rmp); \
+ } while(0);
+
+#define FINISH \
+ vec_add1 (s, 0); \
+ vl_print (handle, (char *)s); \
+ vec_free (s); \
+ return handle;
+
+/* List of message types that this plugin understands */
+
+#define foreach_nsh_plugin_api_msg \
+ _(NSH_ADD_DEL_ENTRY, nsh_add_del_entry) \
+ _(NSH_ENTRY_DUMP, nsh_entry_dump) \
+ _(NSH_ADD_DEL_MAP, nsh_add_del_map) \
+ _(NSH_MAP_DUMP, nsh_map_dump)
+
+ /* Uses network order's class and type to register */
+int
+nsh_md2_register_option (u16 class,
+ u8 type,
+ u8 option_size,
+ int add_options (u8 * opt,
+ u8 * opt_size),
+ int options (vlib_buffer_t * b,
+ nsh_tlv_header_t * opt),
+ int swap_options (vlib_buffer_t * b,
+ nsh_tlv_header_t * old_opt,
+ nsh_tlv_header_t * new_opt),
+ int pop_options (vlib_buffer_t * b,
+ nsh_tlv_header_t * opt),
+ u8 * trace (u8 * s, nsh_tlv_header_t * opt))
+{
+ nsh_main_t *nm = &nsh_main;
+ nsh_option_map_by_key_t key, *key_copy;
+ uword *p;
+ nsh_option_map_t *nsh_option;
+
+ key.class = class;
+ key.type = type;
+ key.pad = 0;
+
+ p = hash_get_mem (nm->nsh_option_map_by_key, &key);
+ /* Already registered */
+ if (p != 0)
+ {
+ return (-1);
+ }
+
+ pool_get_aligned (nm->nsh_option_mappings, nsh_option,
+ CLIB_CACHE_LINE_BYTES);
+ memset (nsh_option, 0, sizeof (*nsh_option));
+ nsh_option->option_id = nsh_option - nm->nsh_option_mappings;
+
+ key_copy = clib_mem_alloc (sizeof (*key_copy));
+ clib_memcpy (key_copy, &key, sizeof (*key_copy));
+ hash_set_mem (nm->nsh_option_map_by_key, key_copy,
+ nsh_option - nm->nsh_option_mappings);
+
+ if (option_size > (MAX_NSH_OPTION_LEN + sizeof (nsh_tlv_header_t)))
+ {
+ return (-1);
+ }
+ nm->options_size[nsh_option->option_id] = option_size;
+ nm->add_options[nsh_option->option_id] = add_options;
+ nm->options[nsh_option->option_id] = options;
+ nm->swap_options[nsh_option->option_id] = swap_options;
+ nm->pop_options[nsh_option->option_id] = pop_options;
+ nm->trace[nsh_option->option_id] = trace;
+
+ return (0);
+}
+
+/* Uses network order's class and type to lookup */
+nsh_option_map_t *
+nsh_md2_lookup_option (u16 class, u8 type)
+{
+ nsh_main_t *nm = &nsh_main;
+ nsh_option_map_by_key_t key;
+ uword *p;
+
+ key.class = class;
+ key.type = type;
+ key.pad = 0;
+
+ p = hash_get_mem (nm->nsh_option_map_by_key, &key);
+ /* not registered */
+ if (p == 0)
+ {
+ return NULL;
+ }
+
+ return pool_elt_at_index (nm->nsh_option_mappings, p[0]);
+
+}
+
+/* Uses network order's class and type to unregister */
+int
+nsh_md2_unregister_option (u16 class,
+ u8 type,
+ int options (vlib_buffer_t * b,
+ nsh_tlv_header_t * opt),
+ u8 * trace (u8 * s, nsh_tlv_header_t * opt))
+{
+ nsh_main_t *nm = &nsh_main;
+ nsh_option_map_by_key_t key, *key_copy;
+ uword *p;
+ hash_pair_t *hp;
+ nsh_option_map_t *nsh_option;
+
+ key.class = class;
+ key.type = type;
+ key.pad = 0;
+
+ p = hash_get_mem (nm->nsh_option_map_by_key, &key);
+ /* not registered */
+ if (p == 0)
+ {
+ return (-1);
+ }
+
+ nsh_option = pool_elt_at_index (nm->nsh_option_mappings, p[0]);
+ nm->options[nsh_option->option_id] = NULL;
+ nm->add_options[nsh_option->option_id] = NULL;
+ nm->pop_options[nsh_option->option_id] = NULL;
+ nm->trace[nsh_option->option_id] = NULL;
+
+ hp = hash_get_pair (nm->nsh_option_map_by_key, &key);
+ key_copy = (void *) (hp->key);
+ hash_unset_mem (nm->nsh_option_map_by_key, &key_copy);
+ clib_mem_free (key_copy);
+
+ pool_put (nm->nsh_option_mappings, nsh_option);
+
+ return (0);
+}
+
+/* format from network order */
+u8 *
+format_nsh_header (u8 * s, va_list * args)
+{
+ nsh_main_t *nm = &nsh_main;
+ nsh_md2_data_t *opt0;
+ nsh_md2_data_t *limit0;
+ nsh_option_map_t *nsh_option;
+ u8 option_len = 0;
+
+ u8 *header = va_arg (*args, u8 *);
+ nsh_base_header_t *nsh_base = (nsh_base_header_t *) header;
+ nsh_md1_data_t *nsh_md1 = (nsh_md1_data_t *) (nsh_base + 1);
+ nsh_md2_data_t *nsh_md2 = (nsh_md2_data_t *) (nsh_base + 1);
+ opt0 = (nsh_md2_data_t *) nsh_md2;
+ limit0 = (nsh_md2_data_t *) ((u8 *) nsh_md2 +
+ ((nsh_base->length & NSH_LEN_MASK) * 4
+ - sizeof (nsh_base_header_t)));
+
+ s = format (s, "nsh ver %d ", (nsh_base->ver_o_c >> 6));
+ if (nsh_base->ver_o_c & NSH_O_BIT)
+ s = format (s, "O-set ");
+
+ if (nsh_base->ver_o_c & NSH_C_BIT)
+ s = format (s, "C-set ");
+
+ s = format (s, "ttl %d ", (nsh_base->ver_o_c & NSH_TTL_H4_MASK) << 2 |
+ (nsh_base->length & NSH_TTL_L2_MASK) >> 6);
+
+ s = format (s, "len %d (%d bytes) md_type %d next_protocol %d\n",
+ (nsh_base->length & NSH_LEN_MASK),
+ (nsh_base->length & NSH_LEN_MASK) * 4,
+ nsh_base->md_type, nsh_base->next_protocol);
+
+ s = format (s, " service path %d service index %d\n",
+ (clib_net_to_host_u32 (nsh_base->nsp_nsi) >> NSH_NSP_SHIFT) &
+ NSH_NSP_MASK,
+ clib_net_to_host_u32 (nsh_base->nsp_nsi) & NSH_NSI_MASK);
+
+ if (nsh_base->md_type == 1)
+ {
+ s = format (s, " c1 %d c2 %d c3 %d c4 %d\n",
+ clib_net_to_host_u32 (nsh_md1->c1),
+ clib_net_to_host_u32 (nsh_md1->c2),
+ clib_net_to_host_u32 (nsh_md1->c3),
+ clib_net_to_host_u32 (nsh_md1->c4));
+ }
+ else if (nsh_base->md_type == 2)
+ {
+ s = format (s, " Supported TLVs: \n");
+
+ /* Scan the set of variable metadata, network order */
+ while (opt0 < limit0)
+ {
+ nsh_option = nsh_md2_lookup_option (opt0->class, opt0->type);
+ if (nsh_option != NULL)
+ {
+ if (nm->trace[nsh_option->option_id] != NULL)
+ {
+ s = (*nm->trace[nsh_option->option_id]) (s, opt0);
+ }
+ else
+ {
+ s =
+ format (s, "\n untraced option %d length %d",
+ opt0->type, opt0->length);
+ }
+ }
+ else
+ {
+ s =
+ format (s, "\n unrecognized option %d length %d",
+ opt0->type, opt0->length);
+ }
+
+ /* round to 4-byte */
+ option_len = ((opt0->length + 3) >> 2) << 2;
+ opt0 =
+ (nsh_md2_data_t *) (((u8 *) opt0) + sizeof (nsh_md2_data_t) +
+ option_len);
+ }
+ }
+
+ return s;
+}
+
+static u8 *
+format_nsh_action (u8 * s, va_list * args)
+{
+ u32 nsh_action = va_arg (*args, u32);
+
+ switch (nsh_action)
+ {
+ case NSH_ACTION_SWAP:
+ return format (s, "swap");
+ case NSH_ACTION_PUSH:
+ return format (s, "push");
+ case NSH_ACTION_POP:
+ return format (s, "pop");
+ default:
+ return format (s, "unknown %d", nsh_action);
+ }
+ return s;
+}
+
+u8 *
+format_nsh_map (u8 * s, va_list * args)
+{
+ nsh_map_t *map = va_arg (*args, nsh_map_t *);
+
+ s = format (s, "nsh entry nsp: %d nsi: %d ",
+ (map->nsp_nsi >> NSH_NSP_SHIFT) & NSH_NSP_MASK,
+ map->nsp_nsi & NSH_NSI_MASK);
+ s = format (s, "maps to nsp: %d nsi: %d ",
+ (map->mapped_nsp_nsi >> NSH_NSP_SHIFT) & NSH_NSP_MASK,
+ map->mapped_nsp_nsi & NSH_NSI_MASK);
+
+ s = format (s, " nsh_action %U\n", format_nsh_action, map->nsh_action);
+
+ switch (map->next_node)
+ {
+ case NSH_NODE_NEXT_ENCAP_GRE4:
+ {
+ s = format (s, "encapped by GRE4 intf: %d", map->sw_if_index);
+ break;
+ }
+ case NSH_NODE_NEXT_ENCAP_GRE6:
+ {
+ s = format (s, "encapped by GRE6 intf: %d", map->sw_if_index);
+ break;
+ }
+ case NSH_NODE_NEXT_ENCAP_VXLANGPE:
+ {
+ s = format (s, "encapped by VXLAN GPE intf: %d", map->sw_if_index);
+ break;
+ }
+ case NSH_NODE_NEXT_ENCAP_VXLAN4:
+ {
+ s = format (s, "encapped by VXLAN4 intf: %d", map->sw_if_index);
+ break;
+ }
+ case NSH_NODE_NEXT_ENCAP_VXLAN6:
+ {
+ s = format (s, "encapped by VXLAN6 intf: %d", map->sw_if_index);
+ break;
+ }
+ case NSH_NODE_NEXT_DECAP_ETH_INPUT:
+ {
+ s = format (s, "encap-none");
+ break;
+ }
+ case NSH_NODE_NEXT_ENCAP_LISP_GPE:
+ {
+ s = format (s, "encapped by LISP GPE intf: %d", map->sw_if_index);
+ break;
+ }
+ case NSH_NODE_NEXT_ENCAP_ETHERNET:
+ {
+ s = format (s, "encapped by Ethernet intf: %d", map->sw_if_index);
+ break;
+ }
+ default:
+ s = format (s, "only GRE and VXLANGPE support in this rev");
+ }
+
+ return s;
+}
+
+u8 *
+format_nsh_node_map_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ nsh_input_trace_t *t = va_arg (*args, nsh_input_trace_t *);
+
+ s = format (s, "\n %U", format_nsh_header, &(t->trace_data));
+
+ return s;
+}
+
+/**
+ * @brief Naming for NSH tunnel
+ *
+ * @param *s formatting string
+ * @param *args
+ *
+ * @return *s formatted string
+ *
+ */
+static u8 *
+format_nsh_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ return format (s, "nsh_tunnel%d", dev_instance);
+}
+
+/**
+ * @brief CLI function for NSH admin up/down
+ *
+ * @param *vnm
+ * @param nsh_hw_if
+ * @param flag
+ *
+ * @return *rc
+ *
+ */
+static clib_error_t *
+nsh_interface_admin_up_down (vnet_main_t * vnm, u32 nsh_hw_if, u32 flags)
+{
+ if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ vnet_hw_interface_set_flags (vnm, nsh_hw_if,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+ else
+ vnet_hw_interface_set_flags (vnm, nsh_hw_if, 0);
+
+ return 0;
+}
+
+static uword
+dummy_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ clib_warning ("you shouldn't be here, leaking buffers...");
+ return frame->n_vectors;
+}
+
+VNET_DEVICE_CLASS (nsh_device_class, static) =
+{
+.name = "NSH",.format_device_name = format_nsh_name,.tx_function =
+ dummy_interface_tx,.admin_up_down_function =
+ nsh_interface_admin_up_down,};
+
+/**
+ * @brief Formatting function for tracing VXLAN GPE with length
+ *
+ * @param *s
+ * @param *args
+ *
+ * @return *s
+ *
+ */
+static u8 *
+format_nsh_tunnel_with_length (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ s = format (s, "unimplemented dev %u", dev_instance);
+ return s;
+}
+
+VNET_HW_INTERFACE_CLASS (nsh_hw_class) =
+{
+.name = "NSH",.format_header =
+ format_nsh_tunnel_with_length,.build_rewrite =
+ default_build_rewrite,.flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,};
+
+
+/**
+ * Action function to add or del an nsh map.
+ * Shared by both CLI and binary API
+ **/
+
+int
+nsh_add_del_map (nsh_add_del_map_args_t * a, u32 * map_indexp)
+{
+ nsh_main_t *nm = &nsh_main;
+ vnet_main_t *vnm = nm->vnet_main;
+ nsh_map_t *map = 0;
+ u32 key, *key_copy;
+ uword *entry;
+ hash_pair_t *hp;
+ u32 map_index = ~0;
+ vnet_hw_interface_t *hi;
+ u32 nsh_hw_if = ~0;
+ u32 nsh_sw_if = ~0;
+
+ /* net order, so data plane could use nsh header to lookup directly */
+ key = clib_host_to_net_u32 (a->map.nsp_nsi);
+
+ entry = hash_get_mem (nm->nsh_mapping_by_key, &key);
+
+ if (a->is_add)
+ {
+ /* adding an entry, must not already exist */
+ if (entry)
+ return -1; //TODO API_ERROR_INVALID_VALUE;
+
+ pool_get_aligned (nm->nsh_mappings, map, CLIB_CACHE_LINE_BYTES);
+ memset (map, 0, sizeof (*map));
+
+ /* copy from arg structure */
+ map->nsp_nsi = a->map.nsp_nsi;
+ map->mapped_nsp_nsi = a->map.mapped_nsp_nsi;
+ map->nsh_action = a->map.nsh_action;
+ map->sw_if_index = a->map.sw_if_index;
+ map->rx_sw_if_index = a->map.rx_sw_if_index;
+ map->next_node = a->map.next_node;
+ map->adj_index = a->map.adj_index;
+
+
+ key_copy = clib_mem_alloc (sizeof (*key_copy));
+ clib_memcpy (key_copy, &key, sizeof (*key_copy));
+
+ hash_set_mem (nm->nsh_mapping_by_key, key_copy, map - nm->nsh_mappings);
+ map_index = map - nm->nsh_mappings;
+
+ if (vec_len (nm->free_nsh_tunnel_hw_if_indices) > 0)
+ {
+ nsh_hw_if = nm->free_nsh_tunnel_hw_if_indices
+ [vec_len (nm->free_nsh_tunnel_hw_if_indices) - 1];
+ _vec_len (nm->free_nsh_tunnel_hw_if_indices) -= 1;
+
+ hi = vnet_get_hw_interface (vnm, nsh_hw_if);
+ hi->dev_instance = map_index;
+ hi->hw_instance = hi->dev_instance;
+ }
+ else
+ {
+ nsh_hw_if = vnet_register_interface
+ (vnm, nsh_device_class.index, map_index, nsh_hw_class.index,
+ map_index);
+ hi = vnet_get_hw_interface (vnm, nsh_hw_if);
+ hi->output_node_index = nsh_aware_vnf_proxy_node.index;
+ }
+
+ map->nsh_hw_if = nsh_hw_if;
+ map->nsh_sw_if = nsh_sw_if = hi->sw_if_index;
+ vec_validate_init_empty (nm->tunnel_index_by_sw_if_index, nsh_sw_if,
+ ~0);
+ nm->tunnel_index_by_sw_if_index[nsh_sw_if] = key;
+
+ vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ }
+ else
+ {
+ if (!entry)
+ return -2; //TODO API_ERROR_NO_SUCH_ENTRY;
+
+ map = pool_elt_at_index (nm->nsh_mappings, entry[0]);
+
+ vnet_sw_interface_set_flags (vnm, map->nsh_sw_if,
+ VNET_SW_INTERFACE_FLAG_ADMIN_DOWN);
+ vec_add1 (nm->free_nsh_tunnel_hw_if_indices, map->nsh_sw_if);
+ nm->tunnel_index_by_sw_if_index[map->nsh_sw_if] = ~0;
+
+ hp = hash_get_pair (nm->nsh_mapping_by_key, &key);
+ key_copy = (void *) (hp->key);
+ hash_unset_mem (nm->nsh_mapping_by_key, &key);
+ clib_mem_free (key_copy);
+
+ pool_put (nm->nsh_mappings, map);
+ }
+
+ if (map_indexp)
+ *map_indexp = map_index;
+
+ return 0;
+}
+
+/**
+ * Action function to add or del an nsh-proxy-session.
+ * Shared by both CLI and binary API
+ **/
+
+int
+nsh_add_del_proxy_session (nsh_add_del_map_args_t * a)
+{
+ nsh_main_t *nm = &nsh_main;
+ nsh_proxy_session_t *proxy = 0;
+ nsh_proxy_session_by_key_t key, *key_copy;
+ uword *entry;
+ hash_pair_t *hp;
+ u32 nsp = 0, nsi = 0;
+
+ memset (&key, 0, sizeof (key));
+ key.transport_type = a->map.next_node;
+ key.transport_index = a->map.sw_if_index;
+
+ entry = hash_get_mem (nm->nsh_proxy_session_by_key, &key);
+
+ if (a->is_add)
+ {
+ /* adding an entry, must not already exist */
+ if (entry)
+ return -1; //TODO API_ERROR_INVALID_VALUE;
+
+ pool_get_aligned (nm->nsh_proxy_sessions, proxy, CLIB_CACHE_LINE_BYTES);
+ memset (proxy, 0, sizeof (*proxy));
+
+ /* Nsi needs to minus 1 within NSH-Proxy */
+ nsp = (a->map.nsp_nsi >> NSH_NSP_SHIFT) & NSH_NSP_MASK;
+ nsi = a->map.nsp_nsi & NSH_NSI_MASK;
+ if (nsi == 0)
+ return -1;
+
+ nsi = nsi - 1;
+ /* net order, so could use it to lookup nsh map table directly */
+ proxy->nsp_nsi = clib_host_to_net_u32 ((nsp << NSH_NSP_SHIFT) | nsi);
+
+ key_copy = clib_mem_alloc (sizeof (*key_copy));
+ clib_memcpy (key_copy, &key, sizeof (*key_copy));
+
+ hash_set_mem (nm->nsh_proxy_session_by_key, key_copy,
+ proxy - nm->nsh_proxy_sessions);
+ }
+ else
+ {
+ if (!entry)
+ return -2; //TODO API_ERROR_NO_SUCH_ENTRY;
+
+ proxy = pool_elt_at_index (nm->nsh_proxy_sessions, entry[0]);
+ hp = hash_get_pair (nm->nsh_proxy_session_by_key, &key);
+ key_copy = (void *) (hp->key);
+ hash_unset_mem (nm->nsh_proxy_session_by_key, &key);
+ clib_mem_free (key_copy);
+
+ pool_put (nm->nsh_proxy_sessions, proxy);
+ }
+
+ return 0;
+}
+
+/**
+ * CLI command for NSH map
+ */
+
+static uword
+unformat_nsh_action (unformat_input_t * input, va_list * args)
+{
+ u32 *result = va_arg (*args, u32 *);
+ u32 tmp;
+
+ if (unformat (input, "swap"))
+ *result = NSH_ACTION_SWAP;
+ else if (unformat (input, "push"))
+ *result = NSH_ACTION_PUSH;
+ else if (unformat (input, "pop"))
+ *result = NSH_ACTION_POP;
+ else if (unformat (input, "%d", &tmp))
+ *result = tmp;
+ else
+ return 0;
+
+ return 1;
+}
+
+static adj_index_t
+nsh_get_adj_by_sw_if_index (u32 sw_if_index)
+{
+ adj_index_t ai = ~0;
+
+ /* *INDENT-OFF* */
+ pool_foreach_index(ai, adj_pool,
+ ({
+ if (sw_if_index == adj_get_sw_if_index(ai))
+ {
+ return ai;
+ }
+ }));
+ /* *INDENT-ON* */
+
+ return ~0;
+}
+
+static clib_error_t *
+nsh_add_del_map_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 is_add = 1;
+ u32 nsp, nsi, mapped_nsp, mapped_nsi, nsh_action;
+ int nsp_set = 0, nsi_set = 0, mapped_nsp_set = 0, mapped_nsi_set = 0;
+ int nsh_action_set = 0;
+ u32 next_node = ~0;
+ u32 adj_index = ~0;
+ u32 sw_if_index = ~0; // temporary requirement to get this moved over to NSHSFC
+ u32 rx_sw_if_index = ~0; // temporary requirement to get this moved over to NSHSFC
+ nsh_add_del_map_args_t _a, *a = &_a;
+ u32 map_index;
+ int rv;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "nsp %d", &nsp))
+ nsp_set = 1;
+ else if (unformat (line_input, "nsi %d", &nsi))
+ nsi_set = 1;
+ else if (unformat (line_input, "mapped-nsp %d", &mapped_nsp))
+ mapped_nsp_set = 1;
+ else if (unformat (line_input, "mapped-nsi %d", &mapped_nsi))
+ mapped_nsi_set = 1;
+ else if (unformat (line_input, "nsh_action %U", unformat_nsh_action,
+ &nsh_action))
+ nsh_action_set = 1;
+ else if (unformat (line_input, "encap-gre4-intf %d", &sw_if_index))
+ next_node = NSH_NODE_NEXT_ENCAP_GRE4;
+ else if (unformat (line_input, "encap-gre6-intf %d", &sw_if_index))
+ next_node = NSH_NODE_NEXT_ENCAP_GRE6;
+ else if (unformat (line_input, "encap-vxlan-gpe-intf %d", &sw_if_index))
+ next_node = NSH_NODE_NEXT_ENCAP_VXLANGPE;
+ else if (unformat (line_input, "encap-lisp-gpe-intf %d", &sw_if_index))
+ next_node = NSH_NODE_NEXT_ENCAP_LISP_GPE;
+ else if (unformat (line_input, "encap-vxlan4-intf %d", &sw_if_index))
+ next_node = NSH_NODE_NEXT_ENCAP_VXLAN4;
+ else if (unformat (line_input, "encap-vxlan6-intf %d", &sw_if_index))
+ next_node = NSH_NODE_NEXT_ENCAP_VXLAN6;
+ else if (unformat (line_input, "encap-eth-intf %d", &sw_if_index))
+ {
+ next_node = NSH_NODE_NEXT_ENCAP_ETHERNET;
+ adj_index = nsh_get_adj_by_sw_if_index (sw_if_index);
+ }
+ else
+ if (unformat
+ (line_input, "encap-none %d %d", &sw_if_index, &rx_sw_if_index))
+ next_node = NSH_NODE_NEXT_DECAP_ETH_INPUT;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (nsp_set == 0 || nsi_set == 0)
+ return clib_error_return (0, "nsp nsi pair required. Key: for NSH entry");
+
+ if (mapped_nsp_set == 0 || mapped_nsi_set == 0)
+ return clib_error_return (0,
+ "mapped-nsp mapped-nsi pair required. Key: for NSH entry");
+
+ if (nsh_action_set == 0)
+ return clib_error_return (0, "nsh_action required: swap|push|pop.");
+
+ if (next_node == ~0)
+ return clib_error_return (0,
+ "must specific action: [encap-gre-intf <nn> | encap-vxlan-gpe-intf <nn> | encap-lisp-gpe-intf <nn> | encap-none <tx_sw_if_index> <rx_sw_if_index>]");
+
+ memset (a, 0, sizeof (*a));
+
+ /* set args structure */
+ a->is_add = is_add;
+ a->map.nsp_nsi = (nsp << NSH_NSP_SHIFT) | nsi;
+ a->map.mapped_nsp_nsi = (mapped_nsp << NSH_NSP_SHIFT) | mapped_nsi;
+ a->map.nsh_action = nsh_action;
+ a->map.sw_if_index = sw_if_index;
+ a->map.rx_sw_if_index = rx_sw_if_index;
+ a->map.next_node = next_node;
+ a->map.adj_index = adj_index;
+
+ rv = nsh_add_del_map (a, &map_index);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+ case -1: //TODO API_ERROR_INVALID_VALUE:
+ return clib_error_return (0,
+ "mapping already exists. Remove it first.");
+
+ case -2: // TODO API_ERROR_NO_SUCH_ENTRY:
+ return clib_error_return (0, "mapping does not exist.");
+
+ default:
+ return clib_error_return (0, "nsh_add_del_map returned %d", rv);
+ }
+
+ if ((a->map.next_node == NSH_NODE_NEXT_ENCAP_VXLAN4)
+ | (a->map.next_node == NSH_NODE_NEXT_ENCAP_VXLAN6))
+ {
+ rv = nsh_add_del_proxy_session (a);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+ case -1: //TODO API_ERROR_INVALID_VALUE:
+ return clib_error_return (0,
+ "nsh-proxy-session already exists. Remove it first.");
+
+ case -2: // TODO API_ERROR_NO_SUCH_ENTRY:
+ return clib_error_return (0, "nsh-proxy-session does not exist.");
+
+ default:
+ return clib_error_return
+ (0, "nsh_add_del_proxy_session() returned %d", rv);
+ }
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (create_nsh_map_command, static) =
+{
+.path = "create nsh map",.short_help =
+ "create nsh map nsp <nn> nsi <nn> [del] mapped-nsp <nn> mapped-nsi <nn> nsh_action [swap|push|pop] "
+ "[encap-gre4-intf <nn> | encap-gre4-intf <nn> | encap-vxlan-gpe-intf <nn> | encap-lisp-gpe-intf <nn> "
+ " encap-vxlan4-intf <nn> | encap-vxlan6-intf <nn>| encap-eth-intf <nn> | encap-none]\n",.function
+ = nsh_add_del_map_command_fn,};
+
+/** API message handler */
+static void
+vl_api_nsh_add_del_map_t_handler (vl_api_nsh_add_del_map_t * mp)
+{
+ vl_api_nsh_add_del_map_reply_t *rmp;
+ nsh_main_t *nm = &nsh_main;
+ int rv;
+ nsh_add_del_map_args_t _a, *a = &_a;
+ u32 map_index = ~0;
+
+ a->is_add = mp->is_add;
+ a->map.nsp_nsi = ntohl (mp->nsp_nsi);
+ a->map.mapped_nsp_nsi = ntohl (mp->mapped_nsp_nsi);
+ a->map.nsh_action = ntohl (mp->nsh_action);
+ a->map.sw_if_index = ntohl (mp->sw_if_index);
+ a->map.rx_sw_if_index = ntohl (mp->rx_sw_if_index);
+ a->map.next_node = ntohl (mp->next_node);
+
+ rv = nsh_add_del_map (a, &map_index);
+
+ if ((a->map.next_node == NSH_NODE_NEXT_ENCAP_VXLAN4)
+ | (a->map.next_node == NSH_NODE_NEXT_ENCAP_VXLAN6))
+ {
+ rv = nsh_add_del_proxy_session (a);
+ }
+
+ REPLY_MACRO2 (VL_API_NSH_ADD_DEL_MAP_REPLY, (
+ {
+ rmp->map_index =
+ htonl (map_index);
+ }
+ ));
+}
+
+/**
+ * CLI command for showing the mapping between NSH entries
+ */
+static clib_error_t *
+show_nsh_map_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ nsh_main_t *nm = &nsh_main;
+ nsh_map_t *map;
+
+ if (pool_elts (nm->nsh_mappings) == 0)
+ vlib_cli_output (vm, "No nsh maps configured.");
+
+ pool_foreach (map, nm->nsh_mappings, (
+ {
+ vlib_cli_output (vm, "%U",
+ format_nsh_map,
+ map);
+ }
+ ));
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (show_nsh_map_command, static) =
+{
+.path = "show nsh map",.function = show_nsh_map_command_fn,};
+
+int
+nsh_header_rewrite (nsh_entry_t * nsh_entry)
+{
+ u8 *rw = 0;
+ int len = 0;
+ nsh_base_header_t *nsh_base;
+ nsh_md1_data_t *nsh_md1;
+ nsh_main_t *nm = &nsh_main;
+ nsh_md2_data_t *opt0;
+ nsh_md2_data_t *limit0;
+ nsh_md2_data_t *nsh_md2;
+ nsh_option_map_t _nsh_option, *nsh_option = &_nsh_option;
+ u8 old_option_size = 0;
+ u8 new_option_size = 0;
+
+ vec_free (nsh_entry->rewrite);
+ if (nsh_entry->nsh_base.md_type == 1)
+ {
+ len = sizeof (nsh_base_header_t) + sizeof (nsh_md1_data_t);
+ }
+ else if (nsh_entry->nsh_base.md_type == 2)
+ {
+ /* set to maxim, maybe dataplane will add more TLVs */
+ len = MAX_NSH_HEADER_LEN;
+ }
+ vec_validate_aligned (rw, len - 1, CLIB_CACHE_LINE_BYTES);
+ memset (rw, 0, len);
+
+ nsh_base = (nsh_base_header_t *) rw;
+ nsh_base->ver_o_c = nsh_entry->nsh_base.ver_o_c;
+ nsh_base->length = nsh_entry->nsh_base.length;
+ nsh_base->md_type = nsh_entry->nsh_base.md_type;
+ nsh_base->next_protocol = nsh_entry->nsh_base.next_protocol;
+ nsh_base->nsp_nsi = clib_host_to_net_u32 (nsh_entry->nsh_base.nsp_nsi);
+
+ if (nsh_base->md_type == 1)
+ {
+ nsh_md1 = (nsh_md1_data_t *) (rw + sizeof (nsh_base_header_t));
+ nsh_md1->c1 = clib_host_to_net_u32 (nsh_entry->md.md1_data.c1);
+ nsh_md1->c2 = clib_host_to_net_u32 (nsh_entry->md.md1_data.c2);
+ nsh_md1->c3 = clib_host_to_net_u32 (nsh_entry->md.md1_data.c3);
+ nsh_md1->c4 = clib_host_to_net_u32 (nsh_entry->md.md1_data.c4);
+ nsh_entry->rewrite_size = 24;
+ }
+ else if (nsh_base->md_type == 2)
+ {
+ opt0 = (nsh_md2_data_t *) (nsh_entry->tlvs_data);
+ limit0 = (nsh_md2_data_t *) ((u8 *) opt0 + nsh_entry->tlvs_len);
+
+ nsh_md2 = (nsh_md2_data_t *) (rw + sizeof (nsh_base_header_t));
+ nsh_entry->rewrite_size = sizeof (nsh_base_header_t);
+
+ while (opt0 < limit0)
+ {
+ old_option_size = sizeof (nsh_md2_data_t) + opt0->length;
+ /* round to 4-byte */
+ old_option_size = ((old_option_size + 3) >> 2) << 2;
+
+ nsh_option = nsh_md2_lookup_option (opt0->class, opt0->type);
+ if (nsh_option == NULL)
+ {
+ goto next_tlv_md2;
+ }
+
+ if (nm->add_options[nsh_option->option_id] != NULL)
+ {
+ if (0 != nm->add_options[nsh_option->option_id] ((u8 *) nsh_md2,
+ &new_option_size))
+ {
+ goto next_tlv_md2;
+ }
+
+ /* round to 4-byte */
+ new_option_size = ((new_option_size + 3) >> 2) << 2;
+
+ nsh_entry->rewrite_size += new_option_size;
+ nsh_md2 =
+ (nsh_md2_data_t *) (((u8 *) nsh_md2) + new_option_size);
+ opt0 = (nsh_md2_data_t *) (((u8 *) opt0) + old_option_size);
+ }
+ else
+ {
+ next_tlv_md2:
+ opt0 = (nsh_md2_data_t *) (((u8 *) opt0) + old_option_size);
+ }
+
+ }
+ }
+
+ nsh_entry->rewrite = rw;
+ nsh_base->length = (nsh_base->length & NSH_TTL_L2_MASK) |
+ ((nsh_entry->rewrite_size >> 2) & NSH_LEN_MASK);
+
+ return 0;
+}
+
+
+/**
+ * Action function for adding an NSH entry
+ * nsh_add_del_entry_args_t *a: host order
+ */
+
+int
+nsh_add_del_entry (nsh_add_del_entry_args_t * a, u32 * entry_indexp)
+{
+ nsh_main_t *nm = &nsh_main;
+ nsh_entry_t *nsh_entry = 0;
+ u32 key, *key_copy;
+ uword *entry_id;
+ hash_pair_t *hp;
+ u32 entry_index = ~0;
+ u8 tlvs_len = 0;
+ u8 *data = 0;
+
+ /* host order, because nsh map table stores nsp_nsi in host order */
+ key = a->nsh_entry.nsh_base.nsp_nsi;
+
+ entry_id = hash_get_mem (nm->nsh_entry_by_key, &key);
+
+ if (a->is_add)
+ {
+ /* adding an entry, must not already exist */
+ if (entry_id)
+ return -1; // TODO VNET_API_ERROR_INVALID_VALUE;
+
+ pool_get_aligned (nm->nsh_entries, nsh_entry, CLIB_CACHE_LINE_BYTES);
+ memset (nsh_entry, 0, sizeof (*nsh_entry));
+
+ /* copy from arg structure */
+#define _(x) nsh_entry->nsh_base.x = a->nsh_entry.nsh_base.x;
+ foreach_copy_nsh_base_hdr_field;
+#undef _
+
+ if (a->nsh_entry.nsh_base.md_type == 1)
+ {
+ nsh_entry->md.md1_data.c1 = a->nsh_entry.md.md1_data.c1;
+ nsh_entry->md.md1_data.c2 = a->nsh_entry.md.md1_data.c2;
+ nsh_entry->md.md1_data.c3 = a->nsh_entry.md.md1_data.c3;
+ nsh_entry->md.md1_data.c4 = a->nsh_entry.md.md1_data.c4;
+ }
+ else if (a->nsh_entry.nsh_base.md_type == 2)
+ {
+ vec_free (nsh_entry->tlvs_data);
+ tlvs_len = a->nsh_entry.tlvs_len;
+ vec_validate_aligned (data, tlvs_len - 1, CLIB_CACHE_LINE_BYTES);
+
+ clib_memcpy (data, a->nsh_entry.tlvs_data, tlvs_len);
+ nsh_entry->tlvs_data = data;
+ nsh_entry->tlvs_len = tlvs_len;
+ vec_free (a->nsh_entry.tlvs_data);
+ }
+
+ nsh_header_rewrite (nsh_entry);
+
+ key_copy = clib_mem_alloc (sizeof (*key_copy));
+ clib_memcpy (key_copy, &key, sizeof (*key_copy));
+
+ hash_set_mem (nm->nsh_entry_by_key, key_copy,
+ nsh_entry - nm->nsh_entries);
+ entry_index = nsh_entry - nm->nsh_entries;
+ }
+ else
+ {
+ if (!entry_id)
+ return -2; //TODO API_ERROR_NO_SUCH_ENTRY;
+
+ nsh_entry = pool_elt_at_index (nm->nsh_entries, entry_id[0]);
+ hp = hash_get_pair (nm->nsh_entry_by_key, &key);
+ key_copy = (void *) (hp->key);
+ hash_unset_mem (nm->nsh_entry_by_key, &key);
+ clib_mem_free (key_copy);
+
+ vec_free (nsh_entry->tlvs_data);
+ vec_free (nsh_entry->rewrite);
+ pool_put (nm->nsh_entries, nsh_entry);
+ }
+
+ if (entry_indexp)
+ *entry_indexp = entry_index;
+
+ return 0;
+}
+
+
+/**
+ * CLI command for adding NSH entry
+ */
+
+static clib_error_t *
+nsh_add_del_entry_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 is_add = 1;
+ u8 ver_o_c = 0;
+ u8 ttl = 63;
+ u8 length = 0;
+ u8 md_type = 0;
+ u8 next_protocol = 1; /* default: ip4 */
+ u32 nsp;
+ u8 nsp_set = 0;
+ u32 nsi;
+ u8 nsi_set = 0;
+ u32 nsp_nsi;
+ u32 c1 = 0;
+ u32 c2 = 0;
+ u32 c3 = 0;
+ u32 c4 = 0;
+ u8 *data = 0;
+ nsh_tlv_header_t tlv_header;
+ u8 cur_len = 0, tlvs_len = 0;
+ u8 *current;
+ nsh_main_t *nm = &nsh_main;
+ nsh_option_map_t _nsh_option, *nsh_option = &_nsh_option;
+ u8 option_size = 0;
+ u32 tmp;
+ int rv;
+ u32 entry_index;
+ nsh_add_del_entry_args_t _a, *a = &_a;
+ u8 has_ioam_trace_option = 0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "version %d", &tmp))
+ ver_o_c |= (tmp & 3) << 6;
+ else if (unformat (line_input, "o-bit %d", &tmp))
+ ver_o_c |= (tmp & 1) << 5;
+ else if (unformat (line_input, "c-bit %d", &tmp))
+ ver_o_c |= (tmp & 1) << 4;
+ else if (unformat (line_input, "ttl %d", &ttl))
+ ver_o_c |= (ttl & NSH_LEN_MASK) >> 2;
+ else if (unformat (line_input, "md-type %d", &tmp))
+ md_type = tmp;
+ else if (unformat (line_input, "next-ip4"))
+ next_protocol = 1;
+ else if (unformat (line_input, "next-ip6"))
+ next_protocol = 2;
+ else if (unformat (line_input, "next-ethernet"))
+ next_protocol = 3;
+ else if (unformat (line_input, "c1 %d", &c1))
+ ;
+ else if (unformat (line_input, "c2 %d", &c2))
+ ;
+ else if (unformat (line_input, "c3 %d", &c3))
+ ;
+ else if (unformat (line_input, "c4 %d", &c4))
+ ;
+ else if (unformat (line_input, "nsp %d", &nsp))
+ nsp_set = 1;
+ else if (unformat (line_input, "nsi %d", &nsi))
+ nsi_set = 1;
+ else if (unformat (line_input, "tlv-ioam-trace"))
+ has_ioam_trace_option = 1;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (nsp_set == 0)
+ return clib_error_return (0, "nsp not specified");
+
+ if (nsi_set == 0)
+ return clib_error_return (0, "nsi not specified");
+
+ if (md_type == 1 && has_ioam_trace_option == 1)
+ return clib_error_return (0, "Invalid MD Type");
+
+ nsp_nsi = (nsp << 8) | nsi;
+
+ memset (a, 0, sizeof (*a));
+ a->is_add = is_add;
+
+ if (md_type == 1)
+ {
+ a->nsh_entry.md.md1_data.c1 = c1;
+ a->nsh_entry.md.md1_data.c2 = c2;
+ a->nsh_entry.md.md1_data.c3 = c3;
+ a->nsh_entry.md.md1_data.c4 = c4;
+ length = (sizeof (nsh_base_header_t) + sizeof (nsh_md1_data_t)) >> 2;
+ }
+ else if (md_type == 2)
+ {
+ length = sizeof (nsh_base_header_t) >> 2;
+
+ vec_free (a->nsh_entry.tlvs_data);
+ tlvs_len = (MAX_METADATA_LEN << 2);
+ vec_validate_aligned (data, tlvs_len - 1, CLIB_CACHE_LINE_BYTES);
+ a->nsh_entry.tlvs_data = data;
+ current = data;
+
+ if (has_ioam_trace_option)
+ {
+ tlv_header.class = clib_host_to_net_u16 (NSH_MD2_IOAM_CLASS);
+ tlv_header.type = NSH_MD2_IOAM_OPTION_TYPE_TRACE;
+ /* Uses network order's class and type to lookup */
+ nsh_option =
+ nsh_md2_lookup_option (tlv_header.class, tlv_header.type);
+ if (nsh_option == NULL)
+ return clib_error_return (0, "iOAM Trace not registered");
+
+ if (nm->add_options[nsh_option->option_id] != NULL)
+ {
+ if (0 != nm->add_options[nsh_option->option_id] ((u8 *) current,
+ &option_size))
+ {
+ return clib_error_return (0, "Invalid MD Type");
+ }
+ }
+
+ nm->options_size[nsh_option->option_id] = option_size;
+ /* round to 4-byte */
+ option_size = (((option_size + 3) >> 2) << 2);
+
+ cur_len += option_size;
+ current = data + option_size;
+ }
+
+ /* Add more options' parsing */
+
+ a->nsh_entry.tlvs_len = cur_len;
+ length += (cur_len >> 2);
+ }
+ length = (length & NSH_LEN_MASK) | ((ttl & 0x3) << 6);
+
+#define _(x) a->nsh_entry.nsh_base.x = x;
+ foreach_copy_nsh_base_hdr_field;
+#undef _
+
+ rv = nsh_add_del_entry (a, &entry_index);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+ default:
+ return clib_error_return (0, "nsh_add_del_entry returned %d", rv);
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (create_nsh_entry_command, static) =
+{
+.path = "create nsh entry",.short_help =
+ "create nsh entry {nsp <nn> nsi <nn>} [ttl <nn>] [md-type <nn>]"
+ " [c1 <nn> c2 <nn> c3 <nn> c4 <nn>] [tlv-ioam-trace] [del]\n",.function
+ = nsh_add_del_entry_command_fn,};
+
+/** API message handler */
+static void vl_api_nsh_add_del_entry_t_handler
+ (vl_api_nsh_add_del_entry_t * mp)
+{
+ vl_api_nsh_add_del_entry_reply_t *rmp;
+ nsh_main_t *nm = &nsh_main;
+ int rv;
+ nsh_add_del_entry_args_t _a, *a = &_a;
+ u32 entry_index = ~0;
+ u8 tlvs_len = 0;
+ u8 *data = 0;
+
+ a->is_add = mp->is_add;
+ a->nsh_entry.nsh_base.ver_o_c =
+ (mp->ver_o_c & 0xF0) | ((mp->ttl & NSH_LEN_MASK) >> 2);
+ a->nsh_entry.nsh_base.length =
+ (mp->length & NSH_LEN_MASK) | ((mp->ttl & 0x3) << 6);
+ a->nsh_entry.nsh_base.md_type = mp->md_type;
+ a->nsh_entry.nsh_base.next_protocol = mp->next_protocol;
+ a->nsh_entry.nsh_base.nsp_nsi = ntohl (mp->nsp_nsi);
+ if (mp->md_type == 1)
+ {
+ a->nsh_entry.md.md1_data.c1 = ntohl (mp->c1);
+ a->nsh_entry.md.md1_data.c2 = ntohl (mp->c2);
+ a->nsh_entry.md.md1_data.c3 = ntohl (mp->c3);
+ a->nsh_entry.md.md1_data.c4 = ntohl (mp->c4);
+ }
+ else if (mp->md_type == 2)
+ {
+ tlvs_len = mp->tlv_length;
+ vec_validate_aligned (data, tlvs_len - 1, CLIB_CACHE_LINE_BYTES);
+
+ clib_memcpy (data, mp->tlv, tlvs_len);
+ a->nsh_entry.tlvs_data = data;
+ a->nsh_entry.tlvs_len = tlvs_len;
+ }
+
+ rv = nsh_add_del_entry (a, &entry_index);
+
+ REPLY_MACRO2 (VL_API_NSH_ADD_DEL_ENTRY_REPLY, (
+ {
+ rmp->entry_index =
+ htonl (entry_index);
+ }
+ ));
+}
+
+static void send_nsh_entry_details
+ (nsh_entry_t * t, unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_nsh_entry_details_t *rmp;
+ nsh_main_t *nm = &nsh_main;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+
+ rmp->_vl_msg_id = ntohs ((VL_API_NSH_ENTRY_DETAILS) + nm->msg_id_base);
+ rmp->ver_o_c = t->nsh_base.ver_o_c;
+ rmp->ttl = (t->nsh_base.ver_o_c & NSH_TTL_H4_MASK) << 2 |
+ (t->nsh_base.length & NSH_TTL_L2_MASK) >> 6;
+ rmp->length = t->nsh_base.length & NSH_LEN_MASK;
+ rmp->md_type = t->nsh_base.md_type;
+ rmp->next_protocol = t->nsh_base.next_protocol;
+ rmp->nsp_nsi = htonl (t->nsh_base.nsp_nsi);
+
+ if (t->nsh_base.md_type == 1)
+ {
+ rmp->tlv_length = 4;
+ rmp->c1 = htonl (t->md.md1_data.c1);
+ rmp->c2 = htonl (t->md.md1_data.c2);
+ rmp->c3 = htonl (t->md.md1_data.c3);
+ rmp->c4 = htonl (t->md.md1_data.c4);
+ }
+ else if (t->nsh_base.md_type == 2)
+ {
+ rmp->tlv_length = t->tlvs_len;
+ clib_memcpy (rmp->tlv, t->tlvs_data, t->tlvs_len);
+ }
+
+ rmp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+vl_api_nsh_entry_dump_t_handler (vl_api_nsh_entry_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+ nsh_main_t *nm = &nsh_main;
+ nsh_entry_t *t;
+ u32 entry_index;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ entry_index = ntohl (mp->entry_index);
+
+ if (~0 == entry_index)
+ {
+ pool_foreach (t, nm->nsh_entries, (
+ {
+ send_nsh_entry_details (t, q,
+ mp->context);
+ }
+ ));
+ }
+ else
+ {
+ if (entry_index >= vec_len (nm->nsh_entries))
+ {
+ return;
+ }
+ t = &nm->nsh_entries[entry_index];
+ send_nsh_entry_details (t, q, mp->context);
+ }
+}
+
+static void send_nsh_map_details
+ (nsh_map_t * t, unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_nsh_map_details_t *rmp;
+ nsh_main_t *nm = &nsh_main;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+
+ rmp->_vl_msg_id = ntohs ((VL_API_NSH_MAP_DETAILS) + nm->msg_id_base);
+ rmp->nsp_nsi = htonl (t->nsp_nsi);
+ rmp->mapped_nsp_nsi = htonl (t->mapped_nsp_nsi);
+ rmp->nsh_action = htonl (t->nsh_action);
+ rmp->sw_if_index = htonl (t->sw_if_index);
+ rmp->rx_sw_if_index = htonl (t->rx_sw_if_index);
+ rmp->next_node = htonl (t->next_node);
+
+ rmp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+vl_api_nsh_map_dump_t_handler (vl_api_nsh_map_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+ nsh_main_t *nm = &nsh_main;
+ nsh_map_t *t;
+ u32 map_index;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ map_index = ntohl (mp->map_index);
+
+ if (~0 == map_index)
+ {
+ pool_foreach (t, nm->nsh_mappings, (
+ {
+ send_nsh_map_details (t, q,
+ mp->context);
+ }
+ ));
+ }
+ else
+ {
+ if (map_index >= vec_len (nm->nsh_mappings))
+ {
+ return;
+ }
+ t = &nm->nsh_mappings[map_index];
+ send_nsh_map_details (t, q, mp->context);
+ }
+}
+
+static clib_error_t *
+show_nsh_entry_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ nsh_main_t *nm = &nsh_main;
+ nsh_entry_t *nsh_entry;
+
+ if (pool_elts (nm->nsh_entries) == 0)
+ vlib_cli_output (vm, "No nsh entries configured.");
+
+ pool_foreach (nsh_entry, nm->nsh_entries, (
+ {
+ vlib_cli_output (vm, "%U",
+ format_nsh_header,
+ nsh_entry->rewrite);
+ vlib_cli_output (vm,
+ " rewrite_size: %d bytes",
+ nsh_entry->rewrite_size);
+ }
+ ));
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (show_nsh_entry_command, static) =
+{
+.path = "show nsh entry",.function = show_nsh_entry_command_fn,};
+
+
+/* Set up the API message handling tables */
+static clib_error_t *
+nsh_plugin_api_hookup (vlib_main_t * vm)
+{
+ nsh_main_t *nm __attribute__ ((unused)) = &nsh_main;
+#define _(N,n) \
+ vl_msg_api_set_handlers((VL_API_##N + nm->msg_id_base), \
+ #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_nsh_plugin_api_msg;
+#undef _
+
+ return 0;
+}
+
+static void
+setup_message_id_table (nsh_main_t * nm, api_main_t * am)
+{
+#define _(id,n,crc) \
+ vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id + nm->msg_id_base);
+ foreach_vl_msg_name_crc_nsh;
+#undef _
+}
+
+always_inline void
+nsh_md2_encap (vlib_buffer_t * b, nsh_base_header_t * hdr,
+ nsh_entry_t * nsh_entry)
+{
+ nsh_main_t *nm = &nsh_main;
+ nsh_base_header_t *nsh_base;
+ nsh_tlv_header_t *opt0;
+ nsh_tlv_header_t *limit0;
+ nsh_tlv_header_t *nsh_md2;
+ nsh_option_map_t *nsh_option;
+ u8 old_option_size = 0;
+ u8 new_option_size = 0;
+
+ /* Populate the NSH Header */
+ opt0 = (nsh_tlv_header_t *) (nsh_entry->tlvs_data);
+ limit0 = (nsh_tlv_header_t *) (nsh_entry->tlvs_data + nsh_entry->tlvs_len);
+
+ nsh_md2 = (nsh_tlv_header_t *) ((u8 *) hdr /*nsh_entry->rewrite */ +
+ sizeof (nsh_base_header_t));
+ nsh_entry->rewrite_size = sizeof (nsh_base_header_t);
+
+ /* Scan the set of variable metadata, process ones that we understand */
+ while (opt0 < limit0)
+ {
+ old_option_size = sizeof (nsh_tlv_header_t) + opt0->length;
+ /* round to 4-byte */
+ old_option_size = ((old_option_size + 3) >> 2) << 2;
+
+ nsh_option = nsh_md2_lookup_option (opt0->class, opt0->type);
+ if (nsh_option == NULL)
+ {
+ goto next_tlv_md2;
+ }
+
+ if (nm->options[nsh_option->option_id])
+ {
+ if ((*nm->options[nsh_option->option_id]) (b, nsh_md2))
+ {
+ goto next_tlv_md2;
+ }
+
+ /* option length may be varied */
+ new_option_size = sizeof (nsh_tlv_header_t) + nsh_md2->length;
+ /* round to 4-byte */
+ new_option_size = ((new_option_size + 3) >> 2) << 2;
+ nsh_entry->rewrite_size += new_option_size;
+
+ nsh_md2 = (nsh_tlv_header_t *) (((u8 *) nsh_md2) + new_option_size);
+ opt0 = (nsh_tlv_header_t *) (((u8 *) opt0) + old_option_size);
+
+ }
+ else
+ {
+ next_tlv_md2:
+ opt0 = (nsh_tlv_header_t *) (((u8 *) opt0) + old_option_size);
+ }
+ }
+
+ /* update nsh header's length */
+ nsh_base = (nsh_base_header_t *) nsh_entry->rewrite;
+ nsh_base->length = (nsh_base->length & NSH_TTL_L2_MASK) |
+ ((nsh_entry->rewrite_size >> 2) & NSH_LEN_MASK);
+ return;
+}
+
+always_inline void
+nsh_md2_swap (vlib_buffer_t * b,
+ nsh_base_header_t * hdr,
+ u32 header_len,
+ nsh_entry_t * nsh_entry, u32 * next, u32 drop_node_val)
+{
+ nsh_main_t *nm = &nsh_main;
+ nsh_base_header_t *nsh_base;
+ nsh_tlv_header_t *opt0;
+ nsh_tlv_header_t *limit0;
+ nsh_tlv_header_t *nsh_md2;
+ nsh_option_map_t *nsh_option;
+ u8 old_option_size = 0;
+ u8 new_option_size = 0;
+
+ /* Populate the NSH Header */
+ opt0 = (nsh_md2_data_t *) (hdr + 1);
+ limit0 = (nsh_md2_data_t *) ((u8 *) hdr + header_len);
+
+ nsh_md2 =
+ (nsh_tlv_header_t *) (nsh_entry->rewrite + sizeof (nsh_base_header_t));
+ nsh_entry->rewrite_size = sizeof (nsh_base_header_t);
+
+ /* Scan the set of variable metadata, process ones that we understand */
+ while (opt0 < limit0)
+ {
+ old_option_size = sizeof (nsh_tlv_header_t) + opt0->length;
+ /* round to 4-byte */
+ old_option_size = ((old_option_size + 3) >> 2) << 2;
+
+ nsh_option = nsh_md2_lookup_option (opt0->class, opt0->type);
+ if (nsh_option == NULL)
+ {
+ goto next_tlv_md2;
+ }
+
+ if (nm->swap_options[nsh_option->option_id])
+ {
+ if ((*nm->swap_options[nsh_option->option_id]) (b, opt0, nsh_md2))
+ {
+ goto next_tlv_md2;
+ }
+
+ /* option length may be varied */
+ new_option_size = sizeof (nsh_tlv_header_t) + nsh_md2->length;
+ /* round to 4-byte */
+ new_option_size = ((new_option_size + 3) >> 2) << 2;
+ nsh_entry->rewrite_size += new_option_size;
+ nsh_md2 = (nsh_tlv_header_t *) (((u8 *) nsh_md2) + new_option_size);
+
+ opt0 = (nsh_tlv_header_t *) (((u8 *) opt0) + old_option_size);
+
+ }
+ else
+ {
+ next_tlv_md2:
+ opt0 = (nsh_tlv_header_t *) (((u8 *) opt0) + old_option_size);
+ }
+ }
+
+ /* update nsh header's length */
+ nsh_base = (nsh_base_header_t *) nsh_entry->rewrite;
+ nsh_base->length = (nsh_base->length & NSH_TTL_L2_MASK) |
+ ((nsh_entry->rewrite_size >> 2) & NSH_LEN_MASK);
+ return;
+}
+
+always_inline void
+nsh_md2_decap (vlib_buffer_t * b,
+ nsh_base_header_t * hdr,
+ u32 * header_len, u32 * next, u32 drop_node_val)
+{
+ nsh_main_t *nm = &nsh_main;
+ nsh_md2_data_t *opt0;
+ nsh_md2_data_t *limit0;
+ nsh_option_map_t *nsh_option;
+ u8 option_len = 0;
+
+ /* Populate the NSH Header */
+ opt0 = (nsh_md2_data_t *) (hdr + 1);
+ limit0 = (nsh_md2_data_t *) ((u8 *) hdr + *header_len);
+
+ /* Scan the set of variable metadata, process ones that we understand */
+ while (opt0 < limit0)
+ {
+ nsh_option = nsh_md2_lookup_option (opt0->class, opt0->type);
+ if (nsh_option == NULL)
+ {
+ *next = drop_node_val;
+ return;
+ }
+
+ if (nm->pop_options[nsh_option->option_id])
+ {
+ if ((*nm->pop_options[nsh_option->option_id]) (b, opt0))
+ {
+ *next = drop_node_val;
+ return;
+ }
+ }
+ /* round to 4-byte */
+ option_len = ((opt0->length + 3) >> 2) << 2;
+ opt0 =
+ (nsh_md2_data_t *) (((u8 *) opt0) + sizeof (nsh_md2_data_t) +
+ option_len);
+ *next =
+ (nm->decap_v4_next_override) ? (nm->decap_v4_next_override) : (*next);
+ *header_len = (nm->decap_v4_next_override) ? 0 : (*header_len);
+ }
+
+ return;
+}
+
+static uword
+nsh_input_map (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame, u32 node_type)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+ nsh_main_t *nm = &nsh_main;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u32 next0 = NSH_NODE_NEXT_DROP, next1 = NSH_NODE_NEXT_DROP;
+ uword *entry0, *entry1;
+ nsh_base_header_t *hdr0 = 0, *hdr1 = 0;
+ u32 header_len0 = 0, header_len1 = 0;
+ u32 nsp_nsi0, nsp_nsi1;
+ u32 ttl0, ttl1;
+ u32 error0, error1;
+ nsh_map_t *map0 = 0, *map1 = 0;
+ nsh_entry_t *nsh_entry0 = 0, *nsh_entry1 = 0;
+ nsh_base_header_t *encap_hdr0 = 0, *encap_hdr1 = 0;
+ u32 encap_hdr_len0 = 0, encap_hdr_len1 = 0;
+ nsh_proxy_session_by_key_t key0, key1;
+ uword *p0, *p1;
+ nsh_proxy_session_t *proxy0, *proxy1;
+ u32 sw_if_index0 = 0, sw_if_index1 = 0;
+ ethernet_header_t dummy_eth0, dummy_eth1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ error0 = 0;
+ error1 = 0;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ hdr0 = vlib_buffer_get_current (b0);
+ if (node_type == NSH_INPUT_TYPE)
+ {
+ nsp_nsi0 = hdr0->nsp_nsi;
+ header_len0 = (hdr0->length & NSH_LEN_MASK) * 4;
+ ttl0 = (hdr0->ver_o_c & NSH_TTL_H4_MASK) << 2 |
+ (hdr0->length & NSH_TTL_L2_MASK) >> 6;
+ ttl0 = ttl0 - 1;
+ if (PREDICT_FALSE (ttl0 == 0))
+ {
+ error0 = NSH_NODE_ERROR_INVALID_TTL;
+ goto trace0;
+ }
+ }
+ else if (node_type == NSH_CLASSIFIER_TYPE)
+ {
+ nsp_nsi0 =
+ clib_host_to_net_u32 (vnet_buffer (b0)->
+ l2_classify.opaque_index);
+ }
+ else if (node_type == NSH_AWARE_VNF_PROXY_TYPE)
+ {
+ /* Push dummy Eth header */
+ memset (&dummy_eth0.dst_address[0], 0x11223344, 4);
+ memset (&dummy_eth0.dst_address[4], 0x5566, 2);
+ memset (&dummy_eth0.src_address[0], 0x778899aa, 4);
+ memset (&dummy_eth0.src_address[4], 0xbbcc, 2);
+ dummy_eth0.type = 0x0800;
+ vlib_buffer_advance (b0, -(word) sizeof (ethernet_header_t));
+ hdr0 = vlib_buffer_get_current (b0);
+ clib_memcpy (hdr0, &dummy_eth0,
+ (word) sizeof (ethernet_header_t));
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ nsp_nsi0 = nm->tunnel_index_by_sw_if_index[sw_if_index0];
+ }
+ else
+ {
+ memset (&key0, 0, sizeof (key0));
+ key0.transport_type = NSH_NODE_NEXT_ENCAP_VXLAN4;
+ key0.transport_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ p0 = hash_get_mem (nm->nsh_proxy_session_by_key, &key0);
+ if (PREDICT_FALSE (p0 == 0))
+ {
+ error0 = NSH_NODE_ERROR_NO_PROXY;
+ goto trace0;
+ }
+
+ proxy0 = pool_elt_at_index (nm->nsh_proxy_sessions, p0[0]);
+ if (PREDICT_FALSE (proxy0 == 0))
+ {
+ error0 = NSH_NODE_ERROR_NO_PROXY;
+ goto trace0;
+ }
+ nsp_nsi0 = proxy0->nsp_nsi;
+ }
+
+ hdr1 = vlib_buffer_get_current (b1);
+ if (node_type == NSH_INPUT_TYPE)
+ {
+ nsp_nsi1 = hdr1->nsp_nsi;
+ header_len1 = (hdr1->length & NSH_LEN_MASK) * 4;
+ ttl1 = (hdr1->ver_o_c & NSH_TTL_H4_MASK) << 2 |
+ (hdr1->length & NSH_TTL_L2_MASK) >> 6;
+ ttl1 = ttl1 - 1;
+ if (PREDICT_FALSE (ttl1 == 0))
+ {
+ error1 = NSH_NODE_ERROR_INVALID_TTL;
+ goto trace1;
+ }
+ }
+ else if (node_type == NSH_CLASSIFIER_TYPE)
+ {
+ nsp_nsi1 =
+ clib_host_to_net_u32 (vnet_buffer (b1)->
+ l2_classify.opaque_index);
+ }
+ else if (node_type == NSH_AWARE_VNF_PROXY_TYPE)
+ {
+ /* Push dummy Eth header */
+ memset (&dummy_eth1.dst_address[0], 0x11223344, 4);
+ memset (&dummy_eth1.dst_address[4], 0x5566, 2);
+ memset (&dummy_eth1.src_address[0], 0x778899aa, 4);
+ memset (&dummy_eth1.src_address[4], 0xbbcc, 2);
+ dummy_eth1.type = 0x0800;
+ vlib_buffer_advance (b1, -(word) sizeof (ethernet_header_t));
+ hdr1 = vlib_buffer_get_current (b1);
+ clib_memcpy (hdr1, &dummy_eth1,
+ (word) sizeof (ethernet_header_t));
+
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
+ nsp_nsi1 = nm->tunnel_index_by_sw_if_index[sw_if_index1];
+ }
+ else
+ {
+ memset (&key1, 0, sizeof (key1));
+ key1.transport_type = NSH_NODE_NEXT_ENCAP_VXLAN4;
+ key1.transport_index = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ p1 = hash_get_mem (nm->nsh_proxy_session_by_key, &key1);
+ if (PREDICT_FALSE (p1 == 0))
+ {
+ error1 = NSH_NODE_ERROR_NO_PROXY;
+ goto trace1;
+ }
+
+ proxy1 = pool_elt_at_index (nm->nsh_proxy_sessions, p1[0]);
+ if (PREDICT_FALSE (proxy1 == 0))
+ {
+ error1 = NSH_NODE_ERROR_NO_PROXY;
+ goto trace1;
+ }
+ nsp_nsi1 = proxy1->nsp_nsi;
+ }
+
+ /* Process packet 0 */
+ entry0 = hash_get_mem (nm->nsh_mapping_by_key, &nsp_nsi0);
+ if (PREDICT_FALSE (entry0 == 0))
+ {
+ error0 = NSH_NODE_ERROR_NO_MAPPING;
+ goto trace0;
+ }
+
+ /* Entry should point to a mapping ... */
+ map0 = pool_elt_at_index (nm->nsh_mappings, entry0[0]);
+ if (PREDICT_FALSE (map0 == 0))
+ {
+ error0 = NSH_NODE_ERROR_NO_MAPPING;
+ goto trace0;
+ }
+
+ /* set up things for next node to transmit ie which node to handle it and where */
+ next0 = map0->next_node;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = map0->sw_if_index;
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = map0->adj_index;
+
+ if (PREDICT_FALSE (map0->nsh_action == NSH_ACTION_POP))
+ {
+ /* Manipulate MD2 */
+ if (PREDICT_FALSE (hdr0->md_type == 2))
+ {
+ nsh_md2_decap (b0, hdr0, &header_len0, &next0,
+ NSH_NODE_NEXT_DROP);
+ if (PREDICT_FALSE (next0 == NSH_NODE_NEXT_DROP))
+ {
+ error0 = NSH_NODE_ERROR_INVALID_OPTIONS;
+ goto trace0;
+ }
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] =
+ map0->rx_sw_if_index;
+ }
+
+ /* Pop NSH header */
+ vlib_buffer_advance (b0, (word) header_len0);
+ goto trace0;
+ }
+
+ entry0 = hash_get_mem (nm->nsh_entry_by_key, &map0->mapped_nsp_nsi);
+ if (PREDICT_FALSE (entry0 == 0))
+ {
+ error0 = NSH_NODE_ERROR_NO_ENTRY;
+ goto trace0;
+ }
+
+ nsh_entry0 =
+ (nsh_entry_t *) pool_elt_at_index (nm->nsh_entries, entry0[0]);
+ encap_hdr0 = (nsh_base_header_t *) (nsh_entry0->rewrite);
+ /* rewrite_size should equal to (encap_hdr0->length * 4) */
+ encap_hdr_len0 = nsh_entry0->rewrite_size;
+
+ if (PREDICT_TRUE (map0->nsh_action == NSH_ACTION_SWAP))
+ {
+ /* Manipulate MD2 */
+ if (PREDICT_FALSE (hdr0->md_type == 2))
+ {
+ nsh_md2_swap (b0, hdr0, header_len0, nsh_entry0,
+ &next0, NSH_NODE_NEXT_DROP);
+ if (PREDICT_FALSE (next0 == NSH_NODE_NEXT_DROP))
+ {
+ error0 = NSH_NODE_ERROR_INVALID_OPTIONS;
+ goto trace0;
+ }
+ }
+
+ /* Pop old NSH header */
+ vlib_buffer_advance (b0, (word) header_len0);
+
+ /* After processing, md2's length may be varied */
+ encap_hdr_len0 = nsh_entry0->rewrite_size;
+ /* Push new NSH header */
+ vlib_buffer_advance (b0, -(word) encap_hdr_len0);
+ hdr0 = vlib_buffer_get_current (b0);
+ clib_memcpy (hdr0, encap_hdr0, (word) encap_hdr_len0);
+
+ goto trace0;
+ }
+
+ if (PREDICT_TRUE (map0->nsh_action == NSH_ACTION_PUSH))
+ {
+ /* After processing, md2's length may be varied */
+ encap_hdr_len0 = nsh_entry0->rewrite_size;
+ /* Push new NSH header */
+ vlib_buffer_advance (b0, -(word) encap_hdr_len0);
+ hdr0 = vlib_buffer_get_current (b0);
+ clib_memcpy (hdr0, encap_hdr0, (word) encap_hdr_len0);
+
+ /* Manipulate MD2 */
+ if (PREDICT_FALSE (nsh_entry0->nsh_base.md_type == 2))
+ {
+ nsh_md2_encap (b0, hdr0, nsh_entry0);
+ }
+
+ }
+
+ trace0:b0->error = error0 ? node->errors[error0] : 0;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ nsh_input_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ clib_memcpy (&(tr->trace_data), hdr0,
+ ((hdr0->length & NSH_LEN_MASK) * 4));
+ }
+
+ /* Process packet 1 */
+ entry1 = hash_get_mem (nm->nsh_mapping_by_key, &nsp_nsi1);
+ if (PREDICT_FALSE (entry1 == 0))
+ {
+ error1 = NSH_NODE_ERROR_NO_MAPPING;
+ goto trace1;
+ }
+
+ /* Entry should point to a mapping ... */
+ map1 = pool_elt_at_index (nm->nsh_mappings, entry1[0]);
+ if (PREDICT_FALSE (map1 == 0))
+ {
+ error1 = NSH_NODE_ERROR_NO_MAPPING;
+ goto trace1;
+ }
+
+ /* set up things for next node to transmit ie which node to handle it and where */
+ next1 = map1->next_node;
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] = map1->sw_if_index;
+ vnet_buffer (b1)->ip.adj_index[VLIB_TX] = map1->adj_index;
+
+ if (PREDICT_FALSE (map1->nsh_action == NSH_ACTION_POP))
+ {
+ /* Manipulate MD2 */
+ if (PREDICT_FALSE (hdr1->md_type == 2))
+ {
+ nsh_md2_decap (b1, hdr1, &header_len1, &next1,
+ NSH_NODE_NEXT_DROP);
+ if (PREDICT_FALSE (next1 == NSH_NODE_NEXT_DROP))
+ {
+ error1 = NSH_NODE_ERROR_INVALID_OPTIONS;
+ goto trace1;
+ }
+ vnet_buffer (b1)->sw_if_index[VLIB_RX] =
+ map0->rx_sw_if_index;
+ }
+
+ /* Pop NSH header */
+ vlib_buffer_advance (b1, (word) header_len1);
+ goto trace1;
+ }
+
+ entry1 = hash_get_mem (nm->nsh_entry_by_key, &map1->mapped_nsp_nsi);
+ if (PREDICT_FALSE (entry1 == 0))
+ {
+ error1 = NSH_NODE_ERROR_NO_ENTRY;
+ goto trace1;
+ }
+
+ nsh_entry1 =
+ (nsh_entry_t *) pool_elt_at_index (nm->nsh_entries, entry1[0]);
+ encap_hdr1 = (nsh_base_header_t *) (nsh_entry1->rewrite);
+ /* rewrite_size should equal to (encap_hdr0->length * 4) */
+ encap_hdr_len1 = nsh_entry1->rewrite_size;
+
+ if (PREDICT_TRUE (map1->nsh_action == NSH_ACTION_SWAP))
+ {
+ /* Manipulate MD2 */
+ if (PREDICT_FALSE (hdr1->md_type == 2))
+ {
+ nsh_md2_swap (b1, hdr1, header_len1, nsh_entry1,
+ &next1, NSH_NODE_NEXT_DROP);
+ if (PREDICT_FALSE (next1 == NSH_NODE_NEXT_DROP))
+ {
+ error1 = NSH_NODE_ERROR_INVALID_OPTIONS;
+ goto trace1;
+ }
+ }
+
+ /* Pop old NSH header */
+ vlib_buffer_advance (b1, (word) header_len1);
+
+ /* After processing, md2's length may be varied */
+ encap_hdr_len1 = nsh_entry1->rewrite_size;
+ /* Push new NSH header */
+ vlib_buffer_advance (b1, -(word) encap_hdr_len1);
+ hdr1 = vlib_buffer_get_current (b1);
+ clib_memcpy (hdr1, encap_hdr1, (word) encap_hdr_len1);
+
+ goto trace1;
+ }
+
+ if (PREDICT_FALSE (map1->nsh_action == NSH_ACTION_PUSH))
+ {
+ /* After processing, md2's length may be varied */
+ encap_hdr_len1 = nsh_entry1->rewrite_size;
+ /* Push new NSH header */
+ vlib_buffer_advance (b1, -(word) encap_hdr_len1);
+ hdr1 = vlib_buffer_get_current (b1);
+ clib_memcpy (hdr1, encap_hdr1, (word) encap_hdr_len1);
+
+ /* Manipulate MD2 */
+ if (PREDICT_FALSE (nsh_entry1->nsh_base.md_type == 2))
+ {
+ nsh_md2_encap (b1, hdr1, nsh_entry1);
+ }
+
+ }
+
+ trace1:b1->error = error1 ? node->errors[error1] : 0;
+
+ if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ nsh_input_trace_t *tr =
+ vlib_add_trace (vm, node, b1, sizeof (*tr));
+ clib_memcpy (&(tr->trace_data), hdr1,
+ ((hdr1->length & NSH_LEN_MASK) * 4));
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, next0,
+ next1);
+
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0 = 0;
+ vlib_buffer_t *b0 = NULL;
+ u32 next0 = NSH_NODE_NEXT_DROP;
+ uword *entry0;
+ nsh_base_header_t *hdr0 = 0;
+ u32 header_len0 = 0;
+ u32 nsp_nsi0;
+ u32 ttl0;
+ u32 error0;
+ nsh_map_t *map0 = 0;
+ nsh_entry_t *nsh_entry0 = 0;
+ nsh_base_header_t *encap_hdr0 = 0;
+ u32 encap_hdr_len0 = 0;
+ nsh_proxy_session_by_key_t key0;
+ uword *p0;
+ nsh_proxy_session_t *proxy0 = 0;
+ u32 sw_if_index0 = 0;
+ ethernet_header_t dummy_eth0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+ error0 = 0;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ hdr0 = vlib_buffer_get_current (b0);
+
+ if (node_type == NSH_INPUT_TYPE)
+ {
+ nsp_nsi0 = hdr0->nsp_nsi;
+ header_len0 = (hdr0->length & NSH_LEN_MASK) * 4;
+ ttl0 = (hdr0->ver_o_c & NSH_TTL_H4_MASK) << 2 |
+ (hdr0->length & NSH_TTL_L2_MASK) >> 6;
+ ttl0 = ttl0 - 1;
+ if (PREDICT_FALSE (ttl0 == 0))
+ {
+ error0 = NSH_NODE_ERROR_INVALID_TTL;
+ goto trace00;
+ }
+ }
+ else if (node_type == NSH_CLASSIFIER_TYPE)
+ {
+ nsp_nsi0 =
+ clib_host_to_net_u32 (vnet_buffer (b0)->
+ l2_classify.opaque_index);
+ }
+ else if (node_type == NSH_AWARE_VNF_PROXY_TYPE)
+ {
+ /* Push dummy Eth header */
+ memset (&dummy_eth0.dst_address[0], 0x11223344, 4);
+ memset (&dummy_eth0.dst_address[4], 0x5566, 2);
+ memset (&dummy_eth0.src_address[0], 0x778899aa, 4);
+ memset (&dummy_eth0.src_address[4], 0xbbcc, 2);
+ dummy_eth0.type = 0x0800;
+ vlib_buffer_advance (b0, -(word) sizeof (ethernet_header_t));
+ hdr0 = vlib_buffer_get_current (b0);
+ clib_memcpy (hdr0, &dummy_eth0,
+ (word) sizeof (ethernet_header_t));
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ nsp_nsi0 = nm->tunnel_index_by_sw_if_index[sw_if_index0];
+ }
+ else
+ {
+ memset (&key0, 0, sizeof (key0));
+ key0.transport_type = NSH_NODE_NEXT_ENCAP_VXLAN4;
+ key0.transport_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ p0 = hash_get_mem (nm->nsh_proxy_session_by_key, &key0);
+ if (PREDICT_FALSE (p0 == 0))
+ {
+ error0 = NSH_NODE_ERROR_NO_PROXY;
+ goto trace00;
+ }
+
+ proxy0 = pool_elt_at_index (nm->nsh_proxy_sessions, p0[0]);
+ if (PREDICT_FALSE (proxy0 == 0))
+ {
+ error0 = NSH_NODE_ERROR_NO_PROXY;
+ goto trace00;
+ }
+ nsp_nsi0 = proxy0->nsp_nsi;
+ }
+
+ entry0 = hash_get_mem (nm->nsh_mapping_by_key, &nsp_nsi0);
+
+ if (PREDICT_FALSE (entry0 == 0))
+ {
+ error0 = NSH_NODE_ERROR_NO_MAPPING;
+ goto trace00;
+ }
+
+ /* Entry should point to a mapping ... */
+ map0 = pool_elt_at_index (nm->nsh_mappings, entry0[0]);
+
+ if (PREDICT_FALSE (map0 == 0))
+ {
+ error0 = NSH_NODE_ERROR_NO_MAPPING;
+ goto trace00;
+ }
+
+ /* set up things for next node to transmit ie which node to handle it and where */
+ next0 = map0->next_node;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = map0->sw_if_index;
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = map0->adj_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = map0->nsh_sw_if;
+
+ if (PREDICT_FALSE (map0->nsh_action == NSH_ACTION_POP))
+ {
+ /* Manipulate MD2 */
+ if (PREDICT_FALSE (hdr0->md_type == 2))
+ {
+ nsh_md2_decap (b0, hdr0, &header_len0, &next0,
+ NSH_NODE_NEXT_DROP);
+ if (PREDICT_FALSE (next0 == NSH_NODE_NEXT_DROP))
+ {
+ error0 = NSH_NODE_ERROR_INVALID_OPTIONS;
+ goto trace00;
+ }
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] =
+ map0->rx_sw_if_index;
+ }
+
+ /* Pop NSH header */
+ vlib_buffer_advance (b0, (word) header_len0);
+ goto trace00;
+ }
+
+ entry0 = hash_get_mem (nm->nsh_entry_by_key, &map0->mapped_nsp_nsi);
+ if (PREDICT_FALSE (entry0 == 0))
+ {
+ error0 = NSH_NODE_ERROR_NO_ENTRY;
+ goto trace00;
+ }
+
+ nsh_entry0 =
+ (nsh_entry_t *) pool_elt_at_index (nm->nsh_entries, entry0[0]);
+ encap_hdr0 = (nsh_base_header_t *) (nsh_entry0->rewrite);
+ /* rewrite_size should equal to (encap_hdr0->length * 4) */
+ encap_hdr_len0 = nsh_entry0->rewrite_size;
+
+ if (PREDICT_TRUE (map0->nsh_action == NSH_ACTION_SWAP))
+ {
+ /* Manipulate MD2 */
+ if (PREDICT_FALSE (hdr0->md_type == 2))
+ {
+ nsh_md2_swap (b0, hdr0, header_len0, nsh_entry0,
+ &next0, NSH_NODE_NEXT_DROP);
+ if (PREDICT_FALSE (next0 == NSH_NODE_NEXT_DROP))
+ {
+ error0 = NSH_NODE_ERROR_INVALID_OPTIONS;
+ goto trace00;
+ }
+ }
+
+ /* Pop old NSH header */
+ vlib_buffer_advance (b0, (word) header_len0);
+
+ /* After processing, md2's length may be varied */
+ encap_hdr_len0 = nsh_entry0->rewrite_size;
+ /* Push new NSH header */
+ vlib_buffer_advance (b0, -(word) encap_hdr_len0);
+ hdr0 = vlib_buffer_get_current (b0);
+ clib_memcpy (hdr0, encap_hdr0, (word) encap_hdr_len0);
+
+ goto trace00;
+ }
+
+ if (PREDICT_TRUE (map0->nsh_action == NSH_ACTION_PUSH))
+ {
+ /* After processing, md2's length may be varied */
+ encap_hdr_len0 = nsh_entry0->rewrite_size;
+ /* Push new NSH header */
+ vlib_buffer_advance (b0, -(word) encap_hdr_len0);
+ hdr0 = vlib_buffer_get_current (b0);
+ clib_memcpy (hdr0, encap_hdr0, (word) encap_hdr_len0);
+ /* Manipulate MD2 */
+ if (PREDICT_FALSE (nsh_entry0->nsh_base.md_type == 2))
+ {
+ nsh_md2_encap (b0, hdr0, nsh_entry0);
+ }
+
+ }
+
+ trace00:b0->error = error0 ? node->errors[error0] : 0;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ nsh_input_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ clib_memcpy (&(tr->trace_data[0]), hdr0,
+ ((hdr0->length & NSH_LEN_MASK) * 4));
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+
+ }
+
+ return from_frame->n_vectors;
+}
+
+/**
+ * @brief Graph processing dispatch function for NSH Input
+ *
+ * @node nsh_input
+ * @param *vm
+ * @param *node
+ * @param *from_frame
+ *
+ * @return from_frame->n_vectors
+ *
+ */
+static uword
+nsh_input (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return nsh_input_map (vm, node, from_frame, NSH_INPUT_TYPE);
+}
+
+/**
+ * @brief Graph processing dispatch function for NSH-Proxy
+ *
+ * @node nsh_proxy
+ * @param *vm
+ * @param *node
+ * @param *from_frame
+ *
+ * @return from_frame->n_vectors
+ *
+ */
+static uword
+nsh_proxy (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return nsh_input_map (vm, node, from_frame, NSH_PROXY_TYPE);
+}
+
+/**
+ * @brief Graph processing dispatch function for NSH Classifier
+ *
+ * @node nsh_classifier
+ * @param *vm
+ * @param *node
+ * @param *from_frame
+ *
+ * @return from_frame->n_vectors
+ *
+ */
+static uword
+nsh_classifier (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return nsh_input_map (vm, node, from_frame, NSH_CLASSIFIER_TYPE);
+}
+
+/**
+ * @brief Graph processing dispatch function for NSH-AWARE-VNF-PROXY
+ *
+ * @node nsh_aware_vnf_proxy
+ * @param *vm
+ * @param *node
+ * @param *from_frame
+ *
+ * @return from_frame->n_vectors
+ *
+ */
+static uword
+nsh_aware_vnf_proxy (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return nsh_input_map (vm, node, from_frame, NSH_AWARE_VNF_PROXY_TYPE);
+}
+
+static char *nsh_node_error_strings[] = {
+#define _(sym,string) string,
+ foreach_nsh_node_error
+#undef _
+};
+
+/* register nsh-input node */
+VLIB_REGISTER_NODE (nsh_input_node) =
+{
+ .function = nsh_input,.name = "nsh-input",.vector_size =
+ sizeof (u32),.format_trace = format_nsh_node_map_trace,.format_buffer =
+ format_nsh_header,.type = VLIB_NODE_TYPE_INTERNAL,.n_errors =
+ ARRAY_LEN (nsh_node_error_strings),.error_strings =
+ nsh_node_error_strings,.n_next_nodes = NSH_NODE_N_NEXT,.next_nodes =
+ {
+#define _(s,n) [NSH_NODE_NEXT_##s] = n,
+ foreach_nsh_node_next
+#undef _
+ }
+,};
+
+VLIB_NODE_FUNCTION_MULTIARCH (nsh_input_node, nsh_input);
+
+/* register nsh-proxy node */
+VLIB_REGISTER_NODE (nsh_proxy_node) =
+{
+ .function = nsh_proxy,.name = "nsh-proxy",.vector_size =
+ sizeof (u32),.format_trace = format_nsh_node_map_trace,.format_buffer =
+ format_nsh_header,.type = VLIB_NODE_TYPE_INTERNAL,.n_errors =
+ ARRAY_LEN (nsh_node_error_strings),.error_strings =
+ nsh_node_error_strings,.n_next_nodes = NSH_NODE_N_NEXT,.next_nodes =
+ {
+#define _(s,n) [NSH_NODE_NEXT_##s] = n,
+ foreach_nsh_node_next
+#undef _
+ }
+,};
+
+VLIB_NODE_FUNCTION_MULTIARCH (nsh_proxy_node, nsh_proxy);
+
+/* register nsh-classifier node */
+VLIB_REGISTER_NODE (nsh_classifier_node) =
+{
+ .function = nsh_classifier,.name = "nsh-classifier",.vector_size =
+ sizeof (u32),.format_trace = format_nsh_node_map_trace,.format_buffer =
+ format_nsh_header,.type = VLIB_NODE_TYPE_INTERNAL,.n_errors =
+ ARRAY_LEN (nsh_node_error_strings),.error_strings =
+ nsh_node_error_strings,.n_next_nodes = NSH_NODE_N_NEXT,.next_nodes =
+ {
+#define _(s,n) [NSH_NODE_NEXT_##s] = n,
+ foreach_nsh_node_next
+#undef _
+ }
+,};
+
+VLIB_NODE_FUNCTION_MULTIARCH (nsh_classifier_node, nsh_classifier);
+
+/* register nsh-aware-vnf-proxy node */
+VLIB_REGISTER_NODE (nsh_aware_vnf_proxy_node) =
+{
+ .function = nsh_aware_vnf_proxy,.name = "nsh-aware-vnf-proxy",.vector_size =
+ sizeof (u32),.format_trace = format_nsh_node_map_trace,.format_buffer =
+ format_nsh_header,.type = VLIB_NODE_TYPE_INTERNAL,.n_errors =
+ ARRAY_LEN (nsh_node_error_strings),.error_strings =
+ nsh_node_error_strings,.n_next_nodes = NSH_NODE_N_NEXT,.next_nodes =
+ {
+#define _(s,n) [NSH_NODE_NEXT_##s] = n,
+ foreach_nsh_node_next
+#undef _
+ }
+,};
+
+VLIB_NODE_FUNCTION_MULTIARCH (nsh_aware_vnf_proxy_node, nsh_aware_vnf_proxy);
+
+void
+nsh_md2_set_next_ioam_export_override (uword next)
+{
+ nsh_main_t *hm = &nsh_main;
+ hm->decap_v4_next_override = next;
+ return;
+}
+
+
+clib_error_t *
+nsh_init (vlib_main_t * vm)
+{
+ nsh_main_t *nm = &nsh_main;
+ clib_error_t *error = 0;
+ u8 *name;
+ uword next_node;
+
+ /* Init the main structures from VPP */
+ nm->vlib_main = vm;
+ nm->vnet_main = vnet_get_main ();
+
+ /* Various state maintenance mappings */
+ nm->nsh_mapping_by_key = hash_create_mem (0, sizeof (u32), sizeof (uword));
+
+ nm->nsh_mapping_by_mapped_key
+ = hash_create_mem (0, sizeof (u32), sizeof (uword));
+
+ nm->nsh_entry_by_key = hash_create_mem (0, sizeof (u32), sizeof (uword));
+
+ nm->nsh_proxy_session_by_key
+ =
+ hash_create_mem (0, sizeof (nsh_proxy_session_by_key_t), sizeof (uword));
+
+ nm->nsh_option_map_by_key
+ = hash_create_mem (0, sizeof (nsh_option_map_by_key_t), sizeof (uword));
+
+ name = format (0, "nsh_%08x%c", api_version, 0);
+
+ /* Set up the API */
+ nm->msg_id_base = vl_msg_api_get_msg_ids
+ ((char *) name, VL_MSG_FIRST_AVAILABLE);
+
+ error = nsh_plugin_api_hookup (vm);
+
+ /* Add our API messages to the global name_crc hash table */
+ setup_message_id_table (nm, &api_main);
+
+ /* Add dispositions to nodes that feed nsh-input */
+ //alagalah - validate we don't really need to use the node value
+ next_node =
+ vlib_node_add_next (vm, vxlan4_gpe_input_node.index,
+ nsh_input_node.index);
+ vlib_node_add_next (vm, vxlan4_gpe_input_node.index, nsh_proxy_node.index);
+ vlib_node_add_next (vm, vxlan4_gpe_input_node.index,
+ nsh_aware_vnf_proxy_node.index);
+ vxlan_gpe_register_decap_protocol (VXLAN_GPE_PROTOCOL_NSH, next_node);
+
+ vlib_node_add_next (vm, vxlan6_gpe_input_node.index, nsh_input_node.index);
+ vlib_node_add_next (vm, vxlan6_gpe_input_node.index, nsh_proxy_node.index);
+ vlib_node_add_next (vm, vxlan6_gpe_input_node.index,
+ nsh_aware_vnf_proxy_node.index);
+
+ vlib_node_add_next (vm, gre4_input_node.index, nsh_input_node.index);
+ vlib_node_add_next (vm, gre4_input_node.index, nsh_proxy_node.index);
+ vlib_node_add_next (vm, gre4_input_node.index,
+ nsh_aware_vnf_proxy_node.index);
+
+ vlib_node_add_next (vm, gre6_input_node.index, nsh_input_node.index);
+ vlib_node_add_next (vm, gre6_input_node.index, nsh_proxy_node.index);
+ vlib_node_add_next (vm, gre6_input_node.index,
+ nsh_aware_vnf_proxy_node.index);
+
+ /* Add NSH-Proxy support */
+ vlib_node_add_next (vm, vxlan4_input_node.index, nsh_proxy_node.index);
+ vlib_node_add_next (vm, vxlan6_input_node.index, nsh_proxy_node.index);
+
+ /* Add NSH-Classifier support */
+ vlib_node_add_next (vm, ip4_classify_node.index, nsh_classifier_node.index);
+ vlib_node_add_next (vm, ip6_classify_node.index, nsh_classifier_node.index);
+ vlib_node_add_next (vm, l2_input_classify_node.index,
+ nsh_classifier_node.index);
+
+ /* Add Ethernet+NSH support */
+ ethernet_register_input_type (vm, ETHERNET_TYPE_NSH, nsh_input_node.index);
+
+ vec_free (name);
+
+ return error;
+}
+
+VLIB_INIT_FUNCTION (nsh_init);
+
+/* *INDENT-OFF* */
+VLIB_PLUGIN_REGISTER () = {
+ .version = VPP_BUILD_VER,
+ .description = "Network Service Header",
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/nsh/nsh.h b/src/plugins/nsh/nsh.h
new file mode 100644
index 00000000000..1b14567078b
--- /dev/null
+++ b/src/plugins/nsh/nsh.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_nsh_h
+#define included_nsh_h
+
+#include <vnet/vnet.h>
+#include <nsh/nsh_packet.h>
+#include <vnet/ip/ip4_packet.h>
+
+typedef struct {
+ u16 class;
+ u8 type;
+ u8 pad;
+} nsh_option_map_by_key_t;
+
+typedef struct {
+ u32 option_id;
+} nsh_option_map_t;
+
+#define MAX_METADATA_LEN 62
+/** Note:
+ * rewrite and rewrite_size used to support varied nsh header
+ */
+typedef struct {
+ /* Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
+ nsh_base_header_t nsh_base;
+ union {
+ nsh_md1_data_t md1_data;
+ nsh_md2_data_t md2_data;
+ } md;
+ u8 tlvs_len; /* configured md2 metadata's length, unit: byte */
+ u8 * tlvs_data; /* configured md2 metadata, network order */
+
+ /** Rewrite string. network order
+ * contains base header and metadata */
+ u8 * rewrite;
+ u8 rewrite_size; /* unit: byte */
+} nsh_entry_t;
+
+typedef struct {
+ u8 is_add;
+ nsh_entry_t nsh_entry;
+} nsh_add_del_entry_args_t;
+
+typedef struct {
+ /* Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
+ /** Key for nsh_header_t entry: 24bit NSP 8bit NSI */
+ u32 nsp_nsi;
+ /** Key for nsh_header_t entry to map to. : 24bit NSP 8bit NSI
+ * This may be ~0 if next action is to decap to NSH next protocol
+ * Note the following heuristic:
+ * if nsp_nsi == mapped_nsp_nsi then use-case is like SFC SFF
+ * if nsp_nsi != mapped_nsp_nsi then use-case is like SFC SF
+ * Note: these are heuristics. Rules about NSI decrement are out of scope
+ */
+ u32 mapped_nsp_nsi;
+ /* NSH Header action: swap, push and pop */
+ u32 nsh_action;
+
+ /** vnet intfc hw_if_index */
+ u32 nsh_hw_if;
+ /* vnet intfc sw_if_index */
+ u32 nsh_sw_if;
+
+ /* encap if index */
+ u32 sw_if_index;
+ u32 rx_sw_if_index;
+ u32 next_node;
+ u32 adj_index;
+} nsh_map_t;
+
+typedef struct {
+ u8 is_add;
+ nsh_map_t map;
+} nsh_add_del_map_args_t;
+
+typedef struct {
+ u32 transport_type; /* 1:vxlan; */
+ u32 transport_index; /* transport's sw_if_index */
+} nsh_proxy_session_by_key_t;
+
+typedef struct {
+ /* 24bit NSP 8bit NSI */
+ u32 nsp_nsi;
+} nsh_proxy_session_t;
+
+#define MAX_MD2_OPTIONS 256
+
+typedef struct {
+ /* API message ID base */
+ u16 msg_id_base;
+
+ /* vector of nsh_header entry instances */
+ nsh_entry_t *nsh_entries;
+
+ /* hash lookup nsh header by key: {u32: nsp_nsi} */
+ uword * nsh_entry_by_key;
+
+ /* vector of nsh_mappings */
+ nsh_map_t *nsh_mappings;
+
+ /* hash lookup nsh mapping by key: {u32: nsp_nsi} */
+ uword * nsh_mapping_by_key;
+ uword * nsh_mapping_by_mapped_key; // for use in NSHSFC
+
+ /* vector of nsh_proxy */
+ nsh_proxy_session_t *nsh_proxy_sessions;
+
+ /* hash lookup nsh_proxy by key */
+ uword * nsh_proxy_session_by_key;
+
+ /** Free vlib hw_if_indices */
+ u32 * free_nsh_tunnel_hw_if_indices;
+ /** Mapping from sw_if_index to tunnel index */
+ u32 * tunnel_index_by_sw_if_index;
+
+ /* vector of nsh_option_map */
+ nsh_option_map_t * nsh_option_mappings;
+ /* hash lookup nsh_option_map by key */
+ uword * nsh_option_map_by_key;
+
+ /* Array of function pointers to process MD-Type 2 handling routines */
+ /*
+ * For API or CLI configuration and construct the rewrite buffer, invokes add_options() function.
+ * In the encap node, i.e. when performing PUSH nsh header, invokes options() function.
+ * In the swap node, i.e. when performing SWAP nsh header, invokes swap_options() function.
+ * In the decap node, i.e. when performing POP nsh header, invokes pop_options() function.
+ */
+ u8 options_size[MAX_MD2_OPTIONS]; /* sum of header and metadata */
+ int (*add_options[MAX_MD2_OPTIONS]) (u8 * opt,
+ u8 * opt_size);
+ int (*options[MAX_MD2_OPTIONS]) (vlib_buffer_t * b,
+ nsh_tlv_header_t * opt);
+ int (*swap_options[MAX_MD2_OPTIONS]) (vlib_buffer_t * b,
+ nsh_tlv_header_t * old_opt,
+ nsh_tlv_header_t * new_opt);
+ int (*pop_options[MAX_MD2_OPTIONS]) (vlib_buffer_t * b,
+ nsh_tlv_header_t * opt);
+ u8 *(*trace[MAX_MD2_OPTIONS]) (u8 * s, nsh_tlv_header_t * opt);
+ uword decap_v4_next_override;
+
+ /* Feature arc indices */
+ u8 input_feature_arc_index;
+ u8 output_feature_arc_index;
+
+ /* convenience */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} nsh_main_t;
+
+nsh_main_t nsh_main;
+
+extern vlib_node_registration_t nsh_aware_vnf_proxy_node;
+extern vlib_node_registration_t nsh_eth_output_node;
+
+typedef struct {
+ u8 trace_data[256];
+} nsh_input_trace_t;
+
+u8 * format_nsh_input_map_trace (u8 * s, va_list * args);
+u8 * format_nsh_header_with_length (u8 * s, va_list * args);
+
+/* Helper macros used in nsh.c and nsh_test.c */
+#define foreach_copy_nsh_base_hdr_field \
+_(ver_o_c) \
+_(length) \
+_(md_type) \
+_(next_protocol) \
+_(nsp_nsi)
+
+/* Statistics (not really errors) */
+#define foreach_nsh_node_error \
+_(MAPPED, "NSH header found and mapped") \
+_(NO_MAPPING, "no mapping for nsh key") \
+_(NO_ENTRY, "no entry for nsh key") \
+_(NO_PROXY, "no proxy for transport key") \
+_(INVALID_NEXT_PROTOCOL, "invalid next protocol") \
+_(INVALID_OPTIONS, "invalid md2 options") \
+_(INVALID_TTL, "ttl equals zero") \
+
+typedef enum {
+#define _(sym,str) NSH_NODE_ERROR_##sym,
+ foreach_nsh_node_error
+#undef _
+ NSH_NODE_N_ERROR,
+
+} nsh_input_error_t;
+
+#define foreach_nsh_node_next \
+ _(DROP, "error-drop") \
+ _(ENCAP_GRE4, "gre4-input" ) \
+ _(ENCAP_GRE6, "gre6-input" ) \
+ _(ENCAP_VXLANGPE, "vxlan-gpe-encap" ) \
+ _(ENCAP_VXLAN4, "vxlan4-encap" ) \
+ _(ENCAP_VXLAN6, "vxlan6-encap" ) \
+ _(DECAP_ETH_INPUT, "ethernet-input" ) \
+ _(ENCAP_LISP_GPE, "interface-output" ) \
+ _(ENCAP_ETHERNET, "nsh-eth-output") \
+/* _(DECAP_IP4_INPUT, "ip4-input") \ */
+/* _(DECAP_IP6_INPUT, "ip6-input" ) \ */
+
+typedef enum {
+#define _(s,n) NSH_NODE_NEXT_##s,
+ foreach_nsh_node_next
+#undef _
+ NSH_NODE_N_NEXT,
+} nsh_node_next_t;
+
+typedef enum {
+ NSH_ACTION_SWAP,
+ NSH_ACTION_PUSH,
+ NSH_ACTION_POP,
+} nsh_action_type;
+
+typedef enum {
+ NSH_INPUT_TYPE,
+ NSH_PROXY_TYPE,
+ NSH_CLASSIFIER_TYPE,
+ NSH_AWARE_VNF_PROXY_TYPE,
+} nsh_entity_type;
+
+#define VNET_SW_INTERFACE_FLAG_ADMIN_DOWN 0
+
+/* md2 class and type definition */
+#define NSH_MD2_IOAM_CLASS 0x9
+#define NSH_MD2_IOAM_OPTION_TYPE_TRACE 0x3B
+#define NSH_MD2_IOAM_OPTION_TYPE_PROOF_OF_TRANSIT 0x3C
+
+#define NSH_MD2_IOAM_TRACE_DUMMY_LEN 0x8
+
+#define MAX_NSH_HEADER_LEN 256
+#define MAX_NSH_OPTION_LEN 128
+
+int
+nsh_md2_register_option (u16 class,
+ u8 type,
+ u8 option_size,
+ int add_options (u8 * opt,
+ u8 * opt_size),
+ int options(vlib_buffer_t * b,
+ nsh_tlv_header_t * opt),
+ int swap_options (vlib_buffer_t * b,
+ nsh_tlv_header_t * old_opt,
+ nsh_tlv_header_t * new_opt),
+ int pop_options (vlib_buffer_t * b,
+ nsh_tlv_header_t * opt),
+ u8 * trace (u8 * s,
+ nsh_tlv_header_t * opt));
+
+typedef struct _nsh_main_dummy
+{
+ u8 output_feature_arc_index;
+} nsh_main_dummy_t;
+
+#endif /* included_nsh_h */
diff --git a/src/plugins/nsh/nsh_error.def b/src/plugins/nsh/nsh_error.def
new file mode 100644
index 00000000000..c54e3b895c8
--- /dev/null
+++ b/src/plugins/nsh/nsh_error.def
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+nsh_input_error (DECAPSULATED, "good packets decapsulated")
+nsh_input_error (NO_MAPPING, "no mapping for nsh key")
+nsh_input_error (INVALID_NEXT_PROTOCOL, "invalid next protocol") \ No newline at end of file
diff --git a/src/plugins/nsh/nsh_output.c b/src/plugins/nsh/nsh_output.c
new file mode 100644
index 00000000000..f0ac673cf61
--- /dev/null
+++ b/src/plugins/nsh/nsh_output.c
@@ -0,0 +1,520 @@
+/*
+ * nsh_output.c: NSH Adj rewrite
+ *
+ * Copyright (c) 2017-2019 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ip/ip.h>
+#include <nsh/nsh.h>
+
+typedef struct {
+ /* Adjacency taken. */
+ u32 adj_index;
+ u32 flow_hash;
+
+ /* Packet data, possibly *after* rewrite. */
+ u8 packet_data[64 - 1*sizeof(u32)];
+} nsh_output_trace_t;
+
+#define foreach_nsh_output_next \
+_(DROP, "error-drop") \
+_(INTERFACE, "interface-output" )
+
+typedef enum {
+#define _(s,n) NSH_OUTPUT_NEXT_##s,
+ foreach_nsh_output_next
+#undef _
+ NSH_OUTPUT_N_NEXT,
+} nsh_output_next_t;
+
+static u8 *
+format_nsh_output_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ nsh_output_trace_t * t = va_arg (*args, nsh_output_trace_t *);
+ uword indent = format_get_indent (s);
+
+ s = format (s, "adj-idx %d : %U flow hash: 0x%08x",
+ t->adj_index,
+ format_ip_adjacency, t->adj_index, FORMAT_IP_ADJACENCY_NONE,
+ t->flow_hash);
+ s = format (s, "\n%U%U",
+ format_white_space, indent,
+ format_ip_adjacency_packet_data,
+ t->adj_index, t->packet_data, sizeof (t->packet_data));
+ return s;
+}
+
+static inline uword
+nsh_output_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame,
+ int is_midchain)
+{
+ u32 n_left_from, next_index, * from, * to_next, thread_index;
+ vlib_node_runtime_t * error_node;
+ u32 n_left_to_next;
+ nsh_main_t *nm;
+
+ thread_index = vlib_get_thread_index();
+ error_node = vlib_node_get_runtime (vm, nsh_eth_output_node.index);
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+ next_index = node->cached_next_index;
+ nm = &nsh_main;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ ip_adjacency_t * adj0;
+ nsh_base_header_t *hdr0;
+ ethernet_header_t * eth_hdr0;
+ vlib_buffer_t * p0;
+ u32 pi0, rw_len0, adj_index0, next0, error0;
+
+ ip_adjacency_t * adj1;
+ nsh_base_header_t *hdr1;
+ ethernet_header_t * eth_hdr1;
+ vlib_buffer_t * p1;
+ u32 pi1, rw_len1, adj_index1, next1, error1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, STORE);
+ vlib_prefetch_buffer_header (p3, STORE);
+
+ CLIB_PREFETCH (p2->data, sizeof (hdr0[0]), STORE);
+ CLIB_PREFETCH (p3->data, sizeof (hdr1[0]), STORE);
+ }
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+
+ adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+ adj_index1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
+
+ adj0 = adj_get(adj_index0);
+ adj1 = adj_get(adj_index1);
+ hdr0 = vlib_buffer_get_current (p0);
+ hdr1 = vlib_buffer_get_current (p1);
+
+ /* Guess we are only writing on simple Ethernet header. */
+ vnet_rewrite_two_headers (adj0[0], adj1[0], hdr0, hdr1,
+ sizeof (ethernet_header_t));
+
+ eth_hdr0 = (ethernet_header_t*)((u8 *)hdr0-sizeof(ethernet_header_t));
+ eth_hdr0->type = clib_host_to_net_u16(ETHERNET_TYPE_NSH);
+ eth_hdr1 = (ethernet_header_t*)((u8 *)hdr1-sizeof(ethernet_header_t));
+ eth_hdr1->type = clib_host_to_net_u16(ETHERNET_TYPE_NSH);
+
+ /* Update packet buffer attributes/set output interface. */
+ rw_len0 = adj0[0].rewrite_header.data_bytes;
+ rw_len1 = adj1[0].rewrite_header.data_bytes;
+
+ /* Bump the adj counters for packet and bytes */
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ thread_index,
+ adj_index0,
+ 1,
+ vlib_buffer_length_in_chain (vm, p0) + rw_len0);
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ thread_index,
+ adj_index1,
+ 1,
+ vlib_buffer_length_in_chain (vm, p1) + rw_len1);
+
+ /* Check MTU of outgoing interface. */
+ if (PREDICT_TRUE(vlib_buffer_length_in_chain (vm, p0) <=
+ adj0[0].rewrite_header.max_l3_packet_bytes))
+ {
+ p0->current_data -= rw_len0;
+ p0->current_length += rw_len0;
+
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] =
+ adj0[0].rewrite_header.sw_if_index;
+ next0 = NSH_OUTPUT_NEXT_INTERFACE;
+ error0 = IP4_ERROR_NONE;
+
+ if (PREDICT_FALSE(adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
+ vnet_feature_arc_start (nm->output_feature_arc_index,
+ adj0[0].rewrite_header.sw_if_index,
+ &next0, p0);
+ }
+ else
+ {
+ error0 = IP4_ERROR_MTU_EXCEEDED;
+ next0 = NSH_OUTPUT_NEXT_DROP;
+ }
+ if (PREDICT_TRUE(vlib_buffer_length_in_chain (vm, p1) <=
+ adj1[0].rewrite_header.max_l3_packet_bytes))
+ {
+ p1->current_data -= rw_len1;
+ p1->current_length += rw_len1;
+
+ vnet_buffer (p1)->sw_if_index[VLIB_TX] =
+ adj1[0].rewrite_header.sw_if_index;
+ next1 = NSH_OUTPUT_NEXT_INTERFACE;
+ error1 = IP4_ERROR_NONE;
+
+ if (PREDICT_FALSE(adj1[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
+ vnet_feature_arc_start (nm->output_feature_arc_index,
+ adj1[0].rewrite_header.sw_if_index,
+ &next1, p1);
+ }
+ else
+ {
+ error1 = IP4_ERROR_MTU_EXCEEDED;
+ next1 = NSH_OUTPUT_NEXT_DROP;
+ }
+ if (is_midchain)
+ {
+ adj0->sub_type.midchain.fixup_func
+ (vm, adj0, p0, adj0->sub_type.midchain.fixup_data);
+ adj1->sub_type.midchain.fixup_func
+ (vm, adj1, p1, adj1->sub_type.midchain.fixup_data);
+ }
+
+ p0->error = error_node->errors[error0];
+ p1->error = error_node->errors[error1];
+
+ if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ nsh_output_trace_t *tr = vlib_add_trace (vm, node,
+ p0, sizeof (*tr));
+ tr->adj_index = vnet_buffer(p0)->ip.adj_index[VLIB_TX];
+ tr->flow_hash = vnet_buffer(p0)->ip.flow_hash;
+ }
+ if (PREDICT_FALSE(p1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ nsh_output_trace_t *tr = vlib_add_trace (vm, node,
+ p1, sizeof (*tr));
+ tr->adj_index = vnet_buffer(p1)->ip.adj_index[VLIB_TX];
+ tr->flow_hash = vnet_buffer(p1)->ip.flow_hash;
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, pi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ ip_adjacency_t * adj0;
+ nsh_base_header_t *hdr0;
+ ethernet_header_t * eth_hdr0;
+ vlib_buffer_t * p0;
+ u32 pi0, rw_len0, adj_index0, next0, error0;
+
+ pi0 = to_next[0] = from[0];
+
+ p0 = vlib_get_buffer (vm, pi0);
+
+ adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+
+ adj0 = adj_get(adj_index0);
+ hdr0 = vlib_buffer_get_current (p0);
+
+ /* Guess we are only writing on simple Ethernet header. */
+ vnet_rewrite_one_header (adj0[0], hdr0,
+ sizeof (ethernet_header_t));
+
+ eth_hdr0 = (ethernet_header_t*)((u8 *)hdr0-sizeof(ethernet_header_t));
+ eth_hdr0->type = clib_host_to_net_u16(ETHERNET_TYPE_NSH);
+
+ /* Update packet buffer attributes/set output interface. */
+ rw_len0 = adj0[0].rewrite_header.data_bytes;
+
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ thread_index,
+ adj_index0,
+ 1,
+ vlib_buffer_length_in_chain (vm, p0) + rw_len0);
+
+ /* Check MTU of outgoing interface. */
+ if (PREDICT_TRUE(vlib_buffer_length_in_chain (vm, p0) <=
+ adj0[0].rewrite_header.max_l3_packet_bytes))
+ {
+ p0->current_data -= rw_len0;
+ p0->current_length += rw_len0;
+
+ vnet_buffer (p0)->sw_if_index[VLIB_TX] =
+ adj0[0].rewrite_header.sw_if_index;
+ next0 = NSH_OUTPUT_NEXT_INTERFACE;
+ error0 = IP4_ERROR_NONE;
+
+ if (PREDICT_FALSE(adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
+ vnet_feature_arc_start (nm->output_feature_arc_index,
+ adj0[0].rewrite_header.sw_if_index,
+ &next0, p0);
+ }
+ else
+ {
+ error0 = IP4_ERROR_MTU_EXCEEDED;
+ next0 = NSH_OUTPUT_NEXT_DROP;
+ }
+ if (is_midchain)
+ {
+ adj0->sub_type.midchain.fixup_func
+ (vm, adj0, p0, adj0->sub_type.midchain.fixup_data);
+ }
+
+ p0->error = error_node->errors[error0];
+
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ nsh_output_trace_t *tr = vlib_add_trace (vm, node,
+ p0, sizeof (*tr));
+ tr->adj_index = vnet_buffer(p0)->ip.adj_index[VLIB_TX];
+ tr->flow_hash = vnet_buffer(p0)->ip.flow_hash;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+typedef enum nsh_midchain_next_t_
+{
+ NSH_MIDCHAIN_NEXT_DROP,
+} nsh_midchain_next_t;
+
+static inline uword
+nsh_eth_output (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (nsh_output_inline(vm, node, from_frame, /* is_midchain */ 0));
+}
+
+VLIB_REGISTER_NODE (nsh_eth_output_node) = {
+ .function = nsh_eth_output,
+ .name = "nsh-eth-output",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .n_next_nodes = NSH_OUTPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [NSH_OUTPUT_NEXT_##s] = n,
+ foreach_nsh_output_next
+#undef _
+ },
+
+ .format_trace = format_nsh_output_trace,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (nsh_eth_output_node, nsh_eth_output)
+
+static inline uword
+nsh_midchain (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (nsh_output_inline(vm, node, from_frame, /* is_midchain */ 1));
+}
+
+VLIB_REGISTER_NODE (nsh_midchain_node) = {
+ .function = nsh_midchain,
+ .name = "nsh-midchain",
+ .vector_size = sizeof (u32),
+ .format_trace = format_nsh_output_trace,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [NSH_MIDCHAIN_NEXT_DROP] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (nsh_midchain_node, nsh_midchain)
+
+/* Built-in nsh tx feature path definition */
+VNET_FEATURE_INIT (nsh_interface_output, static) = {
+ .arc_name = "nsh-eth-output",
+ .node_name = "interface-output",
+ .runs_before = 0, /* not before any other features */
+};
+
+/* Built-in ip4 tx feature path definition */
+/* *INDENT-OFF* */
+VNET_FEATURE_ARC_INIT (nsh_eth_output, static) =
+{
+ .arc_name = "nsh-eth-output",
+ .start_nodes = VNET_FEATURES ("nsh-midchain"),
+};
+
+VNET_FEATURE_INIT (nsh_eth_tx_drop, static) =
+{
+ .arc_name = "nsh-eth-output",
+ .node_name = "error-drop",
+ .runs_before = 0, /* not before any other features */
+};
+/* *INDENT-ON* */
+/**
+ * @brief Next index values from the NSH incomplete adj node
+ */
+#define foreach_nsh_adj_incomplete_next \
+_(DROP, "error-drop") \
+_(IP4, "ip4-arp") \
+_(IP6, "ip6-discover-neighbor")
+
+typedef enum {
+#define _(s,n) NSH_ADJ_INCOMPLETE_NEXT_##s,
+ foreach_nsh_adj_incomplete_next
+#undef _
+ NSH_ADJ_INCOMPLETE_N_NEXT,
+} nsh_adj_incomplete_next_t;
+
+/**
+ * @brief A struct to hold tracing information for the NSH label imposition
+ * node.
+ */
+typedef struct nsh_adj_incomplete_trace_t_
+{
+ u32 next;
+} nsh_adj_incomplete_trace_t;
+
+
+/**
+ * @brief Graph node for incomplete NSH adjacency.
+ * This node will push traffic to either the v4-arp or v6-nd node
+ * based on the next-hop proto of the adj.
+ * We pay a cost for this 'routing' node, but an incomplete adj is the
+ * exception case.
+ */
+static inline uword
+nsh_adj_incomplete (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, * from, * to_next;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0, next0, adj_index0;
+ ip_adjacency_t * adj0;
+ vlib_buffer_t * p0;
+
+ pi0 = to_next[0] = from[0];
+ p0 = vlib_get_buffer (vm, pi0);
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+
+ adj0 = adj_get(adj_index0);
+
+ if (PREDICT_TRUE(FIB_PROTOCOL_IP4 == adj0->ia_nh_proto))
+ {
+ next0 = NSH_ADJ_INCOMPLETE_NEXT_IP4;
+ }
+ else
+ {
+ next0 = NSH_ADJ_INCOMPLETE_NEXT_IP6;
+ }
+
+ if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ nsh_adj_incomplete_trace_t *tr =
+ vlib_add_trace (vm, node, p0, sizeof (*tr));
+ tr->next = next0;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ pi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+static u8 *
+format_nsh_adj_incomplete_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ nsh_adj_incomplete_trace_t * t;
+ uword indent;
+
+ t = va_arg (*args, nsh_adj_incomplete_trace_t *);
+ indent = format_get_indent (s);
+
+ s = format (s, "%Unext:%d",
+ format_white_space, indent,
+ t->next);
+ return (s);
+}
+
+VLIB_REGISTER_NODE (nsh_adj_incomplete_node) = {
+ .function = nsh_adj_incomplete,
+ .name = "nsh-adj-incomplete",
+ .format_trace = format_nsh_adj_incomplete_trace,
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .n_next_nodes = NSH_ADJ_INCOMPLETE_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [NSH_ADJ_INCOMPLETE_NEXT_##s] = n,
+ foreach_nsh_adj_incomplete_next
+#undef _
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (nsh_adj_incomplete_node,
+ nsh_adj_incomplete)
diff --git a/src/plugins/nsh/nsh_packet.h b/src/plugins/nsh/nsh_packet.h
new file mode 100644
index 00000000000..9d6237af551
--- /dev/null
+++ b/src/plugins/nsh/nsh_packet.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_nsh_packet_h
+#define included_nsh_packet_h
+
+/*
+ * NSH packet format from draft-quinn-sfc-nsh-03.txt
+ *
+ * NSH Base Header
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |Ver|O|C| TTL | Length | MD Type | Next Protocol |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ *
+ * Base Header Field Descriptions:
+ *
+ * Version: The version field is used to ensure backward compatibility
+ * going forward with future NSH updates.
+ *
+ * O bit: Indicates that this packet is an operations and management
+ * (OAM) packet. SFF and SFs nodes MUST examine the payload and take
+ * appropriate action (e.g. return status information).
+ *
+ * OAM message specifics and handling details are outside the scope of
+ * this document.
+ *
+ * C bit: Indicates that a critical metadata TLV is present (see section
+ * 7). This bit acts as an indication for hardware implementers to
+ * decide how to handle the presence of a critical TLV without
+ * necessarily needing to parse all TLVs present. The C bit MUST be set
+ * to 1 if one or more critical TLVs are present.
+ *
+ * All other flag fields are reserved.
+ *
+ * Length: total length, in 4 byte words, of the NSH header, including
+ * optional variable TLVs. Length must be equal or greater than 6.
+ *
+ * MD Type: indicates the format of NSH beyond the base header and the
+ * type of metadata being carried. This typing is used to describe the
+ * use for the metadata. A new registry will be requested from IANA for
+ * the MD Type. NSH defines one type, type = 0x1 which indicates that
+ * the format of the header is as per this draft.
+ *
+ * The format of the base header is invariant, and not described by MD
+ * Type.
+ *
+ * Next Protocol: indicates the protocol type of the original packet. A
+ * new IANA registry will be created for protocol type.
+ *
+ * This draft defines the following Next Protocol values:
+ *
+ * 0x1 : IPv4
+ * 0x2 : IPv6
+ * 0x3 : Ethernet
+ */
+
+typedef CLIB_PACKED(struct {
+ u8 ver_o_c; //TTL: high 4 bit
+ u8 length; //TTL: low 2 bit
+ u8 md_type;
+ u8 next_protocol;
+ u32 nsp_nsi; // nsp 24 bits, nsi 8 bits
+}) nsh_base_header_t;
+
+typedef CLIB_PACKED(struct {
+ /* Context headers, always present */
+ u32 c1;
+ u32 c2;
+ u32 c3;
+ u32 c4;
+}) nsh_md1_data_t;
+
+typedef CLIB_PACKED(struct {
+ u16 class;
+ u8 type;
+ u8 length;
+}) nsh_tlv_header_t;
+
+typedef nsh_tlv_header_t nsh_md2_data_t;
+
+typedef CLIB_PACKED(struct {
+ nsh_base_header_t nsh_base;
+ union {
+ nsh_md1_data_t md1_data;
+ nsh_md2_data_t md2_data;
+ } md;
+}) nsh_header_t;
+
+#define NSH_VERSION (0<<6)
+#define NSH_O_BIT (1<<5)
+#define NSH_C_BIT (1<<4)
+
+#define NSH_TTL_H4_MASK 0xF
+#define NSH_TTL_L2_MASK 0xC0
+#define NSH_LEN_MASK 0x3F
+
+/* Network byte order shift / mask */
+#define NSH_NSI_MASK 0xFF
+#define NSH_NSP_MASK (0x00FFFFFF)
+#define NSH_NSP_SHIFT 8
+
+#endif /* included_nsh_packet_h */
diff --git a/src/plugins/nsh/nsh_pop.c b/src/plugins/nsh/nsh_pop.c
new file mode 100644
index 00000000000..cd25bb9e3f7
--- /dev/null
+++ b/src/plugins/nsh/nsh_pop.c
@@ -0,0 +1,365 @@
+/*
+ * nsh_pop.c - nsh POP only processing
+ *
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/plugin/plugin.h>
+#include <nsh/nsh.h>
+#include <vnet/gre/gre.h>
+#include <vnet/vxlan/vxlan.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/l2/l2_classify.h>
+
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+extern nsh_option_map_t * nsh_md2_lookup_option (u16 class, u8 type);
+
+extern u8 * format_nsh_header (u8 * s, va_list * args);
+extern u8 * format_nsh_node_map_trace (u8 * s, va_list * args);
+
+/* format from network order */
+u8 * format_nsh_pop_header (u8 * s, va_list * args)
+{
+ return format_nsh_header(s, args);
+}
+
+
+
+u8 * format_nsh_pop_node_map_trace (u8 * s, va_list * args)
+{
+ return format_nsh_node_map_trace(s, args);
+}
+
+
+static uword
+nsh_pop_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+ nsh_main_t * nm = &nsh_main;
+
+ from = vlib_frame_vector_args(from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, *b1;
+ u32 next0 = NSH_NODE_NEXT_DROP, next1 = NSH_NODE_NEXT_DROP;
+ uword * entry0, *entry1;
+ nsh_base_header_t * hdr0 = 0, *hdr1 = 0;
+ u32 header_len0 = 0, header_len1 = 0;
+ u32 nsp_nsi0, nsp_nsi1;
+ u32 error0, error1;
+ nsh_map_t * map0 = 0, *map1 = 0;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, *p3;
+
+ p2 = vlib_get_buffer(vm, from[2]);
+ p3 = vlib_get_buffer(vm, from[3]);
+
+ vlib_prefetch_buffer_header(p2, LOAD);
+ vlib_prefetch_buffer_header(p3, LOAD);
+
+ CLIB_PREFETCH(p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH(p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ error0 = 0;
+ error1 = 0;
+
+ b0 = vlib_get_buffer(vm, bi0);
+ b1 = vlib_get_buffer(vm, bi1);
+ hdr0 = vlib_buffer_get_current(b0);
+ nsp_nsi0 = hdr0->nsp_nsi;
+ header_len0 = hdr0->length * 4;
+
+ hdr1 = vlib_buffer_get_current(b1);
+ nsp_nsi1 = hdr1->nsp_nsi;
+ header_len1 = hdr1->length * 4;
+
+ /* Process packet 0 */
+ entry0 = hash_get_mem(nm->nsh_mapping_by_key, &nsp_nsi0);
+ if (PREDICT_FALSE(entry0 == 0))
+ {
+ error0 = NSH_NODE_ERROR_NO_MAPPING;
+ goto trace0;
+ }
+
+ /* Entry should point to a mapping ...*/
+ map0 = pool_elt_at_index(nm->nsh_mappings, entry0[0]);
+ if (PREDICT_FALSE(map0 == 0))
+ {
+ error0 = NSH_NODE_ERROR_NO_MAPPING;
+ goto trace0;
+ }
+
+ /* set up things for next node to transmit ie which node to handle it and where */
+ next0 = map0->next_node;
+ //vnet_buffer(b0)->sw_if_index[VLIB_TX] = map0->sw_if_index;
+
+ if(PREDICT_FALSE(map0->nsh_action == NSH_ACTION_POP))
+ {
+ /* Manipulate MD2 */
+ if(PREDICT_FALSE(hdr0->md_type == 2))
+ {
+ if (PREDICT_FALSE(next0 == NSH_NODE_NEXT_DROP))
+ {
+ error0 = NSH_NODE_ERROR_INVALID_OPTIONS;
+ goto trace0;
+ }
+ //vnet_buffer(b0)->sw_if_index[VLIB_RX] = map0->sw_if_index;
+ }
+
+ /* Pop NSH header */
+ vlib_buffer_advance(b0, (word)header_len0);
+ goto trace0;
+ }
+
+ entry0 = hash_get_mem(nm->nsh_entry_by_key, &map0->mapped_nsp_nsi);
+ if (PREDICT_FALSE(entry0 == 0))
+ {
+ error0 = NSH_NODE_ERROR_NO_ENTRY;
+ goto trace0;
+ }
+
+ trace0: b0->error = error0 ? node->errors[error0] : 0;
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ nsh_input_trace_t *tr = vlib_add_trace(vm, node, b0, sizeof(*tr));
+ clib_memcpy ( &(tr->trace_data), hdr0, (hdr0->length*4) );
+ }
+
+ /* Process packet 1 */
+ entry1 = hash_get_mem(nm->nsh_mapping_by_key, &nsp_nsi1);
+ if (PREDICT_FALSE(entry1 == 0))
+ {
+ error1 = NSH_NODE_ERROR_NO_MAPPING;
+ goto trace1;
+ }
+
+ /* Entry should point to a mapping ...*/
+ map1 = pool_elt_at_index(nm->nsh_mappings, entry1[0]);
+ if (PREDICT_FALSE(map1 == 0))
+ {
+ error1 = NSH_NODE_ERROR_NO_MAPPING;
+ goto trace1;
+ }
+
+ /* set up things for next node to transmit ie which node to handle it and where */
+ next1 = map1->next_node;
+ //vnet_buffer(b1)->sw_if_index[VLIB_TX] = map1->sw_if_index;
+
+ if(PREDICT_FALSE(map1->nsh_action == NSH_ACTION_POP))
+ {
+ /* Manipulate MD2 */
+ if(PREDICT_FALSE(hdr1->md_type == 2))
+ {
+ if (PREDICT_FALSE(next1 == NSH_NODE_NEXT_DROP))
+ {
+ error1 = NSH_NODE_ERROR_INVALID_OPTIONS;
+ goto trace1;
+ }
+ //vnet_buffer(b1)->sw_if_index[VLIB_RX] = map1->sw_if_index;
+ }
+
+ /* Pop NSH header */
+ vlib_buffer_advance(b1, (word)header_len1);
+ goto trace1;
+ }
+
+ entry1 = hash_get_mem(nm->nsh_entry_by_key, &map1->mapped_nsp_nsi);
+ if (PREDICT_FALSE(entry1 == 0))
+ {
+ error1 = NSH_NODE_ERROR_NO_ENTRY;
+ goto trace1;
+ }
+
+
+ trace1: b1->error = error1 ? node->errors[error1] : 0;
+
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ nsh_input_trace_t *tr = vlib_add_trace(vm, node, b1, sizeof(*tr));
+ clib_memcpy ( &(tr->trace_data), hdr1, (hdr1->length*4) );
+ }
+
+ vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, next0, next1);
+
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0 = 0;
+ vlib_buffer_t * b0 = NULL;
+ u32 next0 = NSH_NODE_NEXT_DROP;
+ uword * entry0;
+ nsh_base_header_t * hdr0 = 0;
+ u32 header_len0 = 0;
+ u32 nsp_nsi0;
+ u32 error0;
+ nsh_map_t * map0 = 0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+ error0 = 0;
+
+ b0 = vlib_get_buffer(vm, bi0);
+ hdr0 = vlib_buffer_get_current(b0);
+
+ nsp_nsi0 = hdr0->nsp_nsi;
+ header_len0 = hdr0->length * 4;
+
+ entry0 = hash_get_mem(nm->nsh_mapping_by_key, &nsp_nsi0);
+
+ if (PREDICT_FALSE(entry0 == 0))
+ {
+ error0 = NSH_NODE_ERROR_NO_MAPPING;
+ goto trace00;
+ }
+
+ /* Entry should point to a mapping ...*/
+ map0 = pool_elt_at_index(nm->nsh_mappings, entry0[0]);
+
+ if (PREDICT_FALSE(map0 == 0))
+ {
+ error0 = NSH_NODE_ERROR_NO_MAPPING;
+ goto trace00;
+ }
+
+ /* set up things for next node to transmit ie which node to handle it and where */
+ next0 = map0->next_node;
+ //vnet_buffer(b0)->sw_if_index[VLIB_TX] = map0->sw_if_index;
+
+ if(PREDICT_FALSE(map0->nsh_action == NSH_ACTION_POP))
+ {
+ /* Manipulate MD2 */
+ if(PREDICT_FALSE(hdr0->md_type == 2))
+ {
+ if (PREDICT_FALSE(next0 == NSH_NODE_NEXT_DROP))
+ {
+ error0 = NSH_NODE_ERROR_INVALID_OPTIONS;
+ goto trace00;
+ }
+ //vnet_buffer(b0)->sw_if_index[VLIB_RX] = map0->sw_if_index;
+ }
+
+ /* Pop NSH header */
+ vlib_buffer_advance(b0, (word)header_len0);
+ goto trace00;
+ }
+
+ entry0 = hash_get_mem(nm->nsh_entry_by_key, &map0->mapped_nsp_nsi);
+ if (PREDICT_FALSE(entry0 == 0))
+ {
+ error0 = NSH_NODE_ERROR_NO_ENTRY;
+ goto trace00;
+ }
+
+ trace00: b0->error = error0 ? node->errors[error0] : 0;
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ nsh_input_trace_t *tr = vlib_add_trace(vm, node, b0, sizeof(*tr));
+ clib_memcpy ( &(tr->trace_data[0]), hdr0, (hdr0->length*4) );
+ }
+
+ vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame(vm, node, next_index, n_left_to_next);
+
+ }
+
+ return from_frame->n_vectors;
+}
+
+/**
+ * @brief Graph processing dispatch function for NSH Input
+ *
+ * @node nsh_input
+ * @param *vm
+ * @param *node
+ * @param *from_frame
+ *
+ * @return from_frame->n_vectors
+ *
+ */
+static uword
+nsh_pop (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return nsh_pop_inline (vm, node, from_frame);
+}
+
+static char * nsh_pop_node_error_strings[] = {
+#define _(sym,string) string,
+ foreach_nsh_node_error
+#undef _
+};
+
+/* register nsh-input node */
+VLIB_REGISTER_NODE (nsh_pop_node) = {
+ .function = nsh_pop,
+ .name = "nsh-pop",
+ .vector_size = sizeof (u32),
+ .format_trace = format_nsh_pop_node_map_trace,
+ .format_buffer = format_nsh_pop_header,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(nsh_pop_node_error_strings),
+ .error_strings = nsh_pop_node_error_strings,
+
+ .n_next_nodes = NSH_NODE_N_NEXT,
+
+ .next_nodes = {
+#define _(s,n) [NSH_NODE_NEXT_##s] = n,
+ foreach_nsh_node_next
+#undef _
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (nsh_pop_node, nsh_pop);
+
+
diff --git a/src/plugins/nsh/nsh_test.c b/src/plugins/nsh/nsh_test.c
new file mode 100644
index 00000000000..396e84ca30d
--- /dev/null
+++ b/src/plugins/nsh/nsh_test.c
@@ -0,0 +1,411 @@
+/*
+ * nsh.c - skeleton vpp-api-test plug-in
+ *
+ * Copyright (c) <current-year> <your-organization>
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vat/vat.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vppinfra/error.h>
+#include <nsh/nsh.h>
+
+uword unformat_sw_if_index (unformat_input_t * input, va_list * args);
+
+/* define message IDs */
+#define vl_msg_id(n,h) n,
+typedef enum {
+#include <nsh/nsh.api.h>
+ /* We'll want to know how many messages IDs we need... */
+ VL_MSG_FIRST_AVAILABLE,
+} vl_msg_id_t;
+#undef vl_msg_id
+
+/* define message structures */
+#define vl_typedefs
+#include <nsh/nsh.api.h>
+#undef vl_typedefs
+
+/* define generated endian-swappers */
+#define vl_endianfun
+#include <nsh/nsh.api.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <nsh/nsh.api.h>
+#undef vl_printfun
+
+/* Get the API version number */
+#define vl_api_version(n,v) static u32 api_version=(v);
+#include <nsh/nsh.api.h>
+#undef vl_api_version
+
+#define vl_msg_name_crc_list
+#include <nsh/nsh.api.h>
+#undef vl_msg_name_crc_list
+
+
+typedef struct {
+ /* API message ID base */
+ u16 msg_id_base;
+ vat_main_t *vat_main;
+} nsh_test_main_t;
+
+nsh_test_main_t nsh_test_main;
+
+#define foreach_standard_reply_retval_handler \
+_(nsh_add_del_entry_reply) \
+_(nsh_add_del_map_reply) \
+
+#define _(n) \
+ static void vl_api_##n##_t_handler \
+ (vl_api_##n##_t * mp) \
+ { \
+ vat_main_t * vam = nsh_test_main.vat_main; \
+ i32 retval = ntohl(mp->retval); \
+ if (vam->async_mode) { \
+ vam->async_errors += (retval < 0); \
+ } else { \
+ vam->retval = retval; \
+ vam->result_ready = 1; \
+ } \
+ }
+foreach_standard_reply_retval_handler;
+#undef _
+
+/*
+ * Table of message reply handlers, must include boilerplate handlers
+ * we just generated
+ */
+#define foreach_vpe_api_reply_msg \
+_(NSH_ADD_DEL_ENTRY_REPLY, nsh_add_del_entry_reply) \
+_(NSH_ENTRY_DETAILS, nsh_entry_details) \
+_(NSH_ADD_DEL_MAP_REPLY, nsh_add_del_map_reply) \
+_(NSH_MAP_DETAILS, nsh_map_details)
+
+
+/* M: construct, but don't yet send a message */
+
+#define M(T,t) \
+do { \
+ vam->result_ready = 0; \
+ mp = vl_msg_api_alloc(sizeof(*mp)); \
+ memset (mp, 0, sizeof (*mp)); \
+ mp->_vl_msg_id = ntohs (VL_API_##T + sm->msg_id_base); \
+ mp->client_index = vam->my_client_index; \
+} while(0);
+
+#define M2(T,t,n) \
+do { \
+ vam->result_ready = 0; \
+ mp = vl_msg_api_alloc(sizeof(*mp)+(n)); \
+ memset (mp, 0, sizeof (*mp)); \
+ mp->_vl_msg_id = ntohs (VL_API_##T + sm->msg_id_base); \
+ mp->client_index = vam->my_client_index; \
+} while(0);
+
+/* S: send a message */
+#define S (vl_msg_api_send_shmem (vam->vl_input_queue, (u8 *)&mp))
+
+/* W: wait for results, with timeout */
+#define W \
+do { \
+ timeout = vat_time_now (vam) + 1.0; \
+ \
+ while (vat_time_now (vam) < timeout) { \
+ if (vam->result_ready == 1) { \
+ return (vam->retval); \
+ } \
+ } \
+ return -99; \
+} while(0);
+
+static int api_nsh_add_del_entry (vat_main_t * vam)
+{
+ nsh_test_main_t * sm = &nsh_test_main;
+ unformat_input_t * line_input = vam->input;
+ f64 timeout;
+ u8 is_add = 1;
+ u8 ver_o_c = 0;
+ u8 length = 0;
+ u8 md_type = 0;
+ u8 next_protocol = 1; /* default: ip4 */
+ u32 nsp;
+ u8 nsp_set = 0;
+ u32 nsi;
+ u8 nsi_set = 0;
+ u32 nsp_nsi;
+ u32 c1 = 0;
+ u32 c2 = 0;
+ u32 c3 = 0;
+ u32 c4 = 0;
+ u32 tmp;
+ vl_api_nsh_add_del_entry_t * mp;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "version %d", &tmp))
+ ver_o_c |= (tmp & 3) << 6;
+ else if (unformat (line_input, "o-bit %d", &tmp))
+ ver_o_c |= (tmp & 1) << 5;
+ else if (unformat (line_input, "c-bit %d", &tmp))
+ ver_o_c |= (tmp & 1) << 4;
+ else if (unformat (line_input, "md-type %d", &tmp))
+ md_type = tmp;
+ else if (unformat(line_input, "next-ip4"))
+ next_protocol = 1;
+ else if (unformat(line_input, "next-ip6"))
+ next_protocol = 2;
+ else if (unformat(line_input, "next-ethernet"))
+ next_protocol = 3;
+ else if (unformat (line_input, "c1 %d", &c1))
+ ;
+ else if (unformat (line_input, "c2 %d", &c2))
+ ;
+ else if (unformat (line_input, "c3 %d", &c3))
+ ;
+ else if (unformat (line_input, "c4 %d", &c4))
+ ;
+ else if (unformat (line_input, "nsp %d", &nsp))
+ nsp_set = 1;
+ else if (unformat (line_input, "nsi %d", &nsi))
+ nsi_set = 1;
+ else
+ return -99; // PARSE ERROR;
+ }
+
+ unformat_free (line_input);
+
+ if (nsp_set == 0)
+ return -1; //TODO Error type for this cond: clib_error_return (0, "nsp not specified");
+
+ if (nsi_set == 0)
+ return -2; //TODO Error type for this cond:clib_error_return (0, "nsi not specified");
+
+ if (md_type == 1)
+ length = 6;
+ else if (md_type == 2)
+ length = 2; /* base header length */
+
+ nsp_nsi = (nsp<<8) | nsi;
+
+ /* Construct the API message */
+ M(NSH_ADD_DEL_ENTRY, nsh_add_del_entry);
+ mp->is_add = is_add;
+
+#define _(x) mp->x = x;
+ foreach_copy_nsh_base_hdr_field;
+#undef _
+
+
+ /* send it... */
+ S;
+
+ /* Wait for a reply... */
+ W;
+}
+
+static void vl_api_nsh_entry_details_t_handler
+(vl_api_nsh_entry_details_t * mp)
+{
+ vat_main_t * vam = &vat_main;
+
+ fformat(vam->ofp, "%11d%11d%11d%11d%14d%14d%14d%14d%14d\n",
+ mp->ver_o_c,
+ mp->length,
+ mp->md_type,
+ mp->next_protocol,
+ ntohl(mp->nsp_nsi),
+ ntohl(mp->c1),
+ ntohl(mp->c2),
+ ntohl(mp->c3),
+ ntohl(mp->c4));
+}
+
+static int api_nsh_entry_dump (vat_main_t * vam)
+{
+ nsh_test_main_t * sm = &nsh_test_main;
+ vl_api_nsh_entry_dump_t *mp;
+ f64 timeout;
+
+ if (!vam->json_output) {
+ fformat(vam->ofp, "%11s%11s%15s%14s%14s%13s%13s%13s%13s\n",
+ "ver_o_c", "length", "md_type", "next_protocol",
+ "nsp_nsi", "c1", "c2", "c3", "c4");
+ }
+
+ /* Get list of nsh entries */
+ M(NSH_ENTRY_DUMP, nsh_entry_dump);
+
+ /* send it... */
+ S;
+
+ /* Wait for a reply... */
+ W;
+}
+
+static int api_nsh_add_del_map (vat_main_t * vam)
+{
+ nsh_test_main_t * sm = &nsh_test_main;
+ unformat_input_t * line_input = vam->input;
+ f64 timeout;
+ u8 is_add = 1;
+ u32 nsp, nsi, mapped_nsp, mapped_nsi;
+ int nsp_set = 0, nsi_set = 0, mapped_nsp_set = 0, mapped_nsi_set = 0;
+ u32 next_node = ~0;
+ u32 sw_if_index = ~0; // temporary requirement to get this moved over to NSHSFC
+ vl_api_nsh_add_del_map_t * mp;
+
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "nsp %d", &nsp))
+ nsp_set = 1;
+ else if (unformat (line_input, "nsi %d", &nsi))
+ nsi_set = 1;
+ else if (unformat (line_input, "mapped-nsp %d", &mapped_nsp))
+ mapped_nsp_set = 1;
+ else if (unformat (line_input, "mapped-nsi %d", &mapped_nsi))
+ mapped_nsi_set = 1;
+ else if (unformat (line_input, "encap-gre4-intf %d", &sw_if_index))
+ next_node = NSH_NODE_NEXT_ENCAP_GRE4;
+ else if (unformat (line_input, "encap-gre6-intf %d", &sw_if_index))
+ next_node = NSH_NODE_NEXT_ENCAP_GRE6;
+ else if (unformat (line_input, "encap-vxlan-gpe-intf %d", &sw_if_index))
+ next_node = NSH_NODE_NEXT_ENCAP_VXLANGPE;
+ else if (unformat (line_input, "encap-none"))
+ next_node = NSH_NODE_NEXT_DROP; // Once moved to NSHSFC see nsh.h:foreach_nsh_input_next to handle this case
+ else
+ return -99; //TODO clib_error_return (0, "parse error: '%U'",
+ }
+
+ unformat_free (line_input);
+
+ if (nsp_set == 0 || nsi_set == 0)
+ return -1; // TODO create return value: clib_error_return (0, "nsp nsi pair required. Key: for NSH entry");
+
+ if (mapped_nsp_set == 0 || mapped_nsi_set == 0)
+ return -2; // TODO create return valuee clib_error_return (0, "mapped-nsp mapped-nsi pair required. Key: for NSH entry");
+
+ if (next_node == ~0)
+ return -3; //TODO clib_error_return (0, "must specific action: [encap-gre-intf <nn> | encap-vxlan-gpe-intf <nn> | encap-none]");
+
+
+ M(NSH_ADD_DEL_MAP, nsh_add_del_map);
+ /* set args structure */
+ mp->is_add = is_add;
+ mp->nsp_nsi = (nsp<< NSH_NSP_SHIFT) | nsi;
+ mp->mapped_nsp_nsi = (mapped_nsp<< NSH_NSP_SHIFT) | mapped_nsi;
+ mp->sw_if_index = sw_if_index;
+ mp->next_node = next_node;
+
+ /* send it... */
+ S;
+
+ /* Wait for a reply... */
+ W;
+
+
+}
+
+static void vl_api_nsh_map_details_t_handler
+(vl_api_nsh_map_details_t * mp)
+{
+ vat_main_t * vam = &vat_main;
+
+ fformat(vam->ofp, "%14d%14d%14d%14d\n",
+ ntohl(mp->nsp_nsi),
+ ntohl(mp->mapped_nsp_nsi),
+ ntohl(mp->sw_if_index),
+ ntohl(mp->next_node));
+}
+
+static int api_nsh_map_dump (vat_main_t * vam)
+{
+ nsh_test_main_t * sm = &nsh_test_main;
+ vl_api_nsh_map_dump_t *mp;
+ f64 timeout;
+
+ if (!vam->json_output) {
+ fformat(vam->ofp, "%16s%16s%13s%13s\n",
+ "nsp_nsi", "mapped_nsp_nsi", "sw_if_index", "next_node");
+ }
+
+ /* Get list of nsh entries */
+ M(NSH_MAP_DUMP, nsh_map_dump);
+
+ /* send it... */
+ S;
+
+ /* Wait for a reply... */
+ W;
+}
+
+/*
+ * List of messages that the api test plugin sends,
+ * and that the data plane plugin processes
+ */
+#define foreach_vpe_api_msg \
+_(nsh_add_del_entry, "{nsp <nn> nsi <nn>} c1 <nn> c2 <nn> c3 <nn> c4 <nn> [md-type <nn>] [tlv <xx>] [del]") \
+_(nsh_entry_dump, "") \
+_(nsh_add_del_map, "nsp <nn> nsi <nn> [del] mapped-nsp <nn> mapped-nsi <nn> [encap-gre-intf <nn> | encap-vxlan-gpe-intf <nn> | encap-none]") \
+_(nsh_map_dump, "")
+
+void vat_api_hookup (vat_main_t *vam)
+{
+ nsh_test_main_t * sm = &nsh_test_main;
+ /* Hook up handlers for replies from the data plane plug-in */
+#define _(N,n) \
+ vl_msg_api_set_handlers((VL_API_##N + sm->msg_id_base), \
+ #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_reply_msg;
+#undef _
+
+ /* API messages we can send */
+#define _(n,h) hash_set_mem (vam->function_by_name, #n, api_##n);
+ foreach_vpe_api_msg;
+#undef _
+
+ /* Help strings */
+#define _(n,h) hash_set_mem (vam->help_by_name, #n, h);
+ foreach_vpe_api_msg;
+#undef _
+}
+
+clib_error_t * vat_plugin_register (vat_main_t *vam)
+{
+ nsh_test_main_t * sm = &nsh_test_main;
+ u8 * name;
+
+ sm->vat_main = vam;
+
+ /* Ask the vpp engine for the first assigned message-id */
+ name = format (0, "nsh_%08x%c", api_version, 0);
+ sm->msg_id_base = vl_client_get_first_plugin_msg_id ((char *) name);
+
+ if (sm->msg_id_base != (u16) ~0)
+ vat_api_hookup (vam);
+
+ vec_free(name);
+
+ return 0;
+}