summaryrefslogtreecommitdiffstats
path: root/src/plugins/ioam/lib-vxlan-gpe
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2016-12-28 18:38:59 +0100
committerDamjan Marion <damarion@cisco.com>2017-01-01 18:11:43 +0100
commitcb034b9b374927c7552e36dcbc306d8456b2a0cb (patch)
tree9ff64f9792560630c8cf8faa2f74fc20671c30f1 /src/plugins/ioam/lib-vxlan-gpe
parentfdc62abdc113ea63dc867375bd49ef3043dcd290 (diff)
Move java,lua api and remaining plugins to src/
Change-Id: I1c3b87e886603678368428ae56a6bd3327cbc90d Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/plugins/ioam/lib-vxlan-gpe')
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/ioam_decap.c223
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/ioam_encap.c194
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/ioam_pop.c353
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/ioam_transit.c188
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/ioam_vxlan_gpe.api181
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h16
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_api.c378
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c773
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h183
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h61
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_trace.c552
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h172
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_msg_enum.h28
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_test.c600
14 files changed, 3902 insertions, 0 deletions
diff --git a/src/plugins/ioam/lib-vxlan-gpe/ioam_decap.c b/src/plugins/ioam/lib-vxlan-gpe/ioam_decap.c
new file mode 100644
index 00000000000..fd3086571eb
--- /dev/null
+++ b/src/plugins/ioam/lib-vxlan-gpe/ioam_decap.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h>
+
+/* Statistics (not really errors) */
+#define foreach_vxlan_gpe_decap_ioam_v4_error \
+_(DECAPSULATED, "good packets decapsulated")
+
+static char *vxlan_gpe_decap_ioam_v4_error_strings[] = {
+#define _(sym,string) string,
+ foreach_vxlan_gpe_decap_ioam_v4_error
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) VXLAN_GPE_DECAP_IOAM_V4_ERROR_##sym,
+ foreach_vxlan_gpe_decap_ioam_v4_error
+#undef _
+ VXLAN_GPE_DECAP_IOAM_V4_N_ERROR,
+} vxlan_gpe_decap_ioam_v4_error_t;
+
+
+always_inline void
+vxlan_gpe_decap_ioam_v4_two_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vxlan_gpe_main_t * ngm,
+ vlib_buffer_t * b0, vlib_buffer_t * b1,
+ u32 * next0, u32 * next1)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+ next0[0] = next1[0] = hm->decap_v4_next_override;
+ vxlan_gpe_encap_decap_ioam_v4_one_inline (vm, node, b0, &next0[0],
+ VXLAN_GPE_DECAP_IOAM_V4_NEXT_DROP,
+ 0 /* use_adj */ );
+ vxlan_gpe_encap_decap_ioam_v4_one_inline (vm, node, b1, &next0[1],
+ VXLAN_GPE_DECAP_IOAM_V4_NEXT_DROP,
+ 0 /* use_adj */ );
+}
+
+
+
+static uword
+vxlan_gpe_decap_ioam (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame, u8 is_ipv6)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+ vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u32 next0, next1;
+
+ next0 = next1 = hm->decap_v4_next_override;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+
+ vlib_buffer_advance (b0,
+ -(word) (sizeof (udp_header_t) +
+ sizeof (ip4_header_t) +
+ sizeof (vxlan_gpe_header_t)));
+ vlib_buffer_advance (b1,
+ -(word) (sizeof (udp_header_t) +
+ sizeof (ip4_header_t) +
+ sizeof (vxlan_gpe_header_t)));
+
+ vxlan_gpe_decap_ioam_v4_two_inline (vm, node, ngm, b0, b1,
+ &next0, &next1);
+
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, next0,
+ next1);
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_ioam_v4_trace_t *tr = vlib_add_trace (vm, node, b0,
+ sizeof (*tr));
+ }
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0 = hm->decap_v4_next_override;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+
+ vlib_buffer_advance (b0,
+ -(word) (sizeof (udp_header_t) +
+ sizeof (ip4_header_t) +
+ sizeof (vxlan_gpe_header_t)));
+
+ next0 = hm->decap_v4_next_override;
+ vxlan_gpe_encap_decap_ioam_v4_one_inline (vm, node, b0,
+ &next0,
+ VXLAN_GPE_DECAP_IOAM_V4_NEXT_DROP,
+ 0 /* use_adj */ );
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_ioam_v4_trace_t *tr = vlib_add_trace (vm, node, b0,
+ sizeof (*tr));
+ }
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+
+static uword
+vxlan_gpe_decap_ioam_v4 (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return vxlan_gpe_decap_ioam (vm, node, from_frame, 0);
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (vxlan_gpe_decap_ioam_v4_node) = {
+ .function = vxlan_gpe_decap_ioam_v4,
+ .name = "vxlan-gpe-decap-ioam-v4",
+ .vector_size = sizeof (u32),
+ .format_trace = format_vxlan_gpe_ioam_v4_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(vxlan_gpe_decap_ioam_v4_error_strings),
+ .error_strings = vxlan_gpe_decap_ioam_v4_error_strings,
+
+ .n_next_nodes = VXLAN_GPE_DECAP_IOAM_V4_N_NEXT,
+
+ .next_nodes = {
+ [VXLAN_GPE_DECAP_IOAM_V4_NEXT_POP] = "vxlan-gpe-pop-ioam-v4",
+ [VXLAN_GPE_DECAP_IOAM_V4_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/ioam/lib-vxlan-gpe/ioam_encap.c b/src/plugins/ioam/lib-vxlan-gpe/ioam_encap.c
new file mode 100644
index 00000000000..4b18bfea533
--- /dev/null
+++ b/src/plugins/ioam/lib-vxlan-gpe/ioam_encap.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h>
+
+/* Statistics (not really errors) */
+#define foreach_vxlan_gpe_encap_ioam_v4_error \
+_(ENCAPSULATED, "good packets encapsulated")
+
+static char *vxlan_gpe_encap_ioam_v4_error_strings[] = {
+#define _(sym,string) string,
+ foreach_vxlan_gpe_encap_ioam_v4_error
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) VXLAN_GPE_ENCAP_IOAM_V4_ERROR_##sym,
+ foreach_vxlan_gpe_encap_ioam_v4_error
+#undef _
+ VXLAN_GPE_ENCAP_IOAM_V4_N_ERROR,
+} vxlan_gpe_encap_ioam_v4_error_t;
+
+typedef enum
+{
+ VXLAN_GPE_ENCAP_IOAM_V4_NEXT_IP4_LOOKUP,
+ VXLAN_GPE_ENCAP_IOAM_V4_NEXT_DROP,
+ VXLAN_GPE_ENCAP_IOAM_V4_N_NEXT
+} vxlan_gpe_encap_ioam_v4_next_t;
+
+
+always_inline void
+vxlan_gpe_encap_ioam_v4_two_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vxlan_gpe_main_t * ngm,
+ vlib_buffer_t * b0, vlib_buffer_t * b1,
+ u32 * next0, u32 * next1)
+{
+ next0[0] = next1[0] = VXLAN_GPE_ENCAP_IOAM_V4_NEXT_IP4_LOOKUP;
+ vxlan_gpe_encap_decap_ioam_v4_one_inline (vm, node, b0, next0,
+ VXLAN_GPE_ENCAP_IOAM_V4_NEXT_DROP,
+ 0 /* use_adj */ );
+ vxlan_gpe_encap_decap_ioam_v4_one_inline (vm, node, b1, next1,
+ VXLAN_GPE_ENCAP_IOAM_V4_NEXT_DROP,
+ 0 /* use_adj */ );
+}
+
+
+static uword
+vxlan_gpe_encap_ioam_v4 (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+ vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u32 next0, next1;
+
+ next0 = next1 = VXLAN_GPE_ENCAP_IOAM_V4_NEXT_IP4_LOOKUP;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ vxlan_gpe_encap_ioam_v4_two_inline (vm, node, ngm, b0, b1,
+ &next0, &next1);
+
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, next0,
+ next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0 = VXLAN_GPE_ENCAP_IOAM_V4_NEXT_IP4_LOOKUP;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ vxlan_gpe_encap_decap_ioam_v4_one_inline (vm, node, b0,
+ &next0,
+ VXLAN_GPE_ENCAP_IOAM_V4_NEXT_DROP,
+ 0 /* use_adj */ );
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_ioam_v4_trace_t *tr = vlib_add_trace (vm, node, b0,
+ sizeof (*tr));
+ }
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (vxlan_gpe_encap_ioam_v4_node) = {
+ .function = vxlan_gpe_encap_ioam_v4,
+ .name = "vxlan-gpe-encap-ioam-v4",
+ .vector_size = sizeof (u32),
+ .format_trace = format_vxlan_gpe_ioam_v4_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(vxlan_gpe_encap_ioam_v4_error_strings),
+ .error_strings = vxlan_gpe_encap_ioam_v4_error_strings,
+
+ .n_next_nodes = VXLAN_GPE_ENCAP_IOAM_V4_N_NEXT,
+
+ .next_nodes = {
+ [VXLAN_GPE_ENCAP_IOAM_V4_NEXT_IP4_LOOKUP] = "ip4-lookup",
+ [VXLAN_GPE_ENCAP_IOAM_V4_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/ioam/lib-vxlan-gpe/ioam_pop.c b/src/plugins/ioam/lib-vxlan-gpe/ioam_pop.c
new file mode 100644
index 00000000000..55c33b144a1
--- /dev/null
+++ b/src/plugins/ioam/lib-vxlan-gpe/ioam_pop.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
+
+/* Statistics (not really errors) */
+#define foreach_vxlan_gpe_pop_ioam_v4_error \
+_(POPPED, "good packets popped")
+
+static char *vxlan_gpe_pop_ioam_v4_error_strings[] = {
+#define _(sym,string) string,
+ foreach_vxlan_gpe_pop_ioam_v4_error
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) VXLAN_GPE_POP_IOAM_V4_ERROR_##sym,
+ foreach_vxlan_gpe_pop_ioam_v4_error
+#undef _
+ VXLAN_GPE_POP_IOAM_V4_N_ERROR,
+} vxlan_gpe_pop_ioam_v4_error_t;
+
+typedef struct
+{
+ ioam_trace_t fmt_trace;
+} vxlan_gpe_pop_ioam_v4_trace_t;
+
+
+u8 *
+format_vxlan_gpe_pop_ioam_v4_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ vxlan_gpe_pop_ioam_v4_trace_t *t1
+ = va_arg (*args, vxlan_gpe_pop_ioam_v4_trace_t *);
+ ioam_trace_t *t = &(t1->fmt_trace);
+ vxlan_gpe_ioam_option_t *fmt_trace0;
+ vxlan_gpe_ioam_option_t *opt0, *limit0;
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+ u8 type0;
+
+ fmt_trace0 = (vxlan_gpe_ioam_option_t *) t->option_data;
+
+ s = format (s, "VXLAN_GPE_IOAM_POP: next_index %d len %d traced %d",
+ t->next_index, fmt_trace0->length, t->trace_len);
+
+ opt0 = (vxlan_gpe_ioam_option_t *) (fmt_trace0 + 1);
+ limit0 = (vxlan_gpe_ioam_option_t *) ((u8 *) fmt_trace0) + t->trace_len;
+
+ while (opt0 < limit0)
+ {
+ type0 = opt0->type;
+ switch (type0)
+ {
+ case 0: /* Pad, just stop */
+ opt0 = (vxlan_gpe_ioam_option_t *) ((u8 *) opt0) + 1;
+ break;
+
+ default:
+ if (hm->trace[type0])
+ {
+ s = (*hm->trace[type0]) (s, opt0);
+ }
+ else
+ {
+ s =
+ format (s, "\n unrecognized option %d length %d", type0,
+ opt0->length);
+ }
+ opt0 =
+ (vxlan_gpe_ioam_option_t *) (((u8 *) opt0) + opt0->length +
+ sizeof (vxlan_gpe_ioam_option_t));
+ break;
+ }
+ }
+
+ return s;
+}
+
+always_inline void
+vxlan_gpe_ioam_pop_v4 (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_buffer_t * b0)
+{
+ ip4_header_t *ip0;
+ udp_header_t *udp_hdr0;
+ vxlan_gpe_header_t *gpe_hdr0;
+ vxlan_gpe_ioam_hdr_t *gpe_ioam0;
+
+ ip0 = vlib_buffer_get_current (b0);
+
+ udp_hdr0 = (udp_header_t *) (ip0 + 1);
+ gpe_hdr0 = (vxlan_gpe_header_t *) (udp_hdr0 + 1);
+ gpe_ioam0 = (vxlan_gpe_ioam_hdr_t *) (gpe_hdr0 + 1);
+
+ /* Pop the iOAM data */
+ vlib_buffer_advance (b0,
+ (word) (sizeof (udp_header_t) +
+ sizeof (ip4_header_t) +
+ sizeof (vxlan_gpe_header_t) +
+ gpe_ioam0->length));
+
+ return;
+}
+
+
+
+always_inline void
+vxlan_gpe_pop_ioam_v4_one_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vxlan_gpe_main_t * ngm,
+ vlib_buffer_t * b0, u32 * next0)
+{
+ CLIB_UNUSED (ip4_header_t * ip0);
+ CLIB_UNUSED (udp_header_t * udp_hdr0);
+ CLIB_UNUSED (vxlan_gpe_header_t * gpe_hdr0);
+ CLIB_UNUSED (vxlan_gpe_ioam_hdr_t * gpe_ioam0);
+ CLIB_UNUSED (vxlan_gpe_ioam_option_t * opt0);
+ CLIB_UNUSED (vxlan_gpe_ioam_option_t * limit0);
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+
+ /* Pop the iOAM header */
+ ip0 = vlib_buffer_get_current (b0);
+ udp_hdr0 = (udp_header_t *) (ip0 + 1);
+ gpe_hdr0 = (vxlan_gpe_header_t *) (udp_hdr0 + 1);
+ gpe_ioam0 = (vxlan_gpe_ioam_hdr_t *) (gpe_hdr0 + 1);
+ opt0 = (vxlan_gpe_ioam_option_t *) (gpe_ioam0 + 1);
+ limit0 = (vxlan_gpe_ioam_option_t *) ((u8 *) gpe_ioam0 + gpe_ioam0->length);
+
+ /*
+ * Basic validity checks
+ */
+ if (gpe_ioam0->length > clib_net_to_host_u16 (ip0->length))
+ {
+ next0[0] = VXLAN_GPE_INPUT_NEXT_DROP;
+ goto trace00;
+ }
+
+ /* Scan the set of h-b-h options, process ones that we understand */
+ while (opt0 < limit0)
+ {
+ u8 type0;
+ type0 = opt0->type;
+ switch (type0)
+ {
+ case 0: /* Pad1 */
+ opt0 = (vxlan_gpe_ioam_option_t *) ((u8 *) opt0) + 1;
+ continue;
+ case 1: /* PadN */
+ break;
+ default:
+ if (hm->pop_options[type0])
+ {
+ if ((*hm->pop_options[type0]) (ip0, opt0) < 0)
+ {
+ next0[0] = VXLAN_GPE_INPUT_NEXT_DROP;
+ goto trace00;
+ }
+ }
+ break;
+ }
+ opt0 =
+ (vxlan_gpe_ioam_option_t *) (((u8 *) opt0) + opt0->length +
+ sizeof (vxlan_gpe_ioam_hdr_t));
+ }
+
+
+ next0[0] =
+ (gpe_ioam0->protocol < VXLAN_GPE_PROTOCOL_MAX) ?
+ ngm->
+ decap_next_node_list[gpe_ioam0->protocol] : VXLAN_GPE_INPUT_NEXT_DROP;
+
+trace00:
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_pop_ioam_v4_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ u32 trace_len = gpe_ioam0->length;
+ t->fmt_trace.next_index = next0[0];
+ /* Capture the h-b-h option verbatim */
+ trace_len =
+ trace_len <
+ ARRAY_LEN (t->fmt_trace.
+ option_data) ? trace_len : ARRAY_LEN (t->fmt_trace.
+ option_data);
+ t->fmt_trace.trace_len = trace_len;
+ clib_memcpy (&(t->fmt_trace.option_data), gpe_ioam0, trace_len);
+ }
+
+ /* Remove the iOAM header inside the VxLAN-GPE header */
+ vxlan_gpe_ioam_pop_v4 (vm, node, b0);
+ return;
+}
+
+always_inline void
+vxlan_gpe_pop_ioam_v4_two_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vxlan_gpe_main_t * ngm,
+ vlib_buffer_t * b0, vlib_buffer_t * b1,
+ u32 * next0, u32 * next1)
+{
+
+ vxlan_gpe_pop_ioam_v4_one_inline (vm, node, ngm, b0, next0);
+ vxlan_gpe_pop_ioam_v4_one_inline (vm, node, ngm, b1, next1);
+}
+
+
+
+static uword
+vxlan_gpe_pop_ioam (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame, u8 is_ipv6)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+ vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u32 next0, next1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ vxlan_gpe_pop_ioam_v4_two_inline (vm, node, ngm, b0, b1, &next0,
+ &next1);
+
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, next0,
+ next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ vxlan_gpe_pop_ioam_v4_one_inline (vm, node, ngm, b0, &next0);
+
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+
+static uword
+vxlan_gpe_pop_ioam_v4 (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ return vxlan_gpe_pop_ioam (vm, node, from_frame, 0);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (vxlan_gpe_pop_ioam_v4_node) = {
+ .function = vxlan_gpe_pop_ioam_v4,
+ .name = "vxlan-gpe-pop-ioam-v4",
+ .vector_size = sizeof (u32),
+ .format_trace = format_vxlan_gpe_pop_ioam_v4_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(vxlan_gpe_pop_ioam_v4_error_strings),
+ .error_strings = vxlan_gpe_pop_ioam_v4_error_strings,
+
+ .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
+
+ .next_nodes = {
+#define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
+ foreach_vxlan_gpe_input_next
+#undef _
+ },
+};
+/* *INDENT-ON* */
+
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/ioam/lib-vxlan-gpe/ioam_transit.c b/src/plugins/ioam/lib-vxlan-gpe/ioam_transit.c
new file mode 100644
index 00000000000..b42c357c79b
--- /dev/null
+++ b/src/plugins/ioam/lib-vxlan-gpe/ioam_transit.c
@@ -0,0 +1,188 @@
+ /*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ip/udp.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/fib_entry.h>
+
+/* Statistics (not really errors) */
+#define foreach_vxlan_gpe_transit_ioam_error \
+_(ENCAPSULATED, "good packets encapsulated")
+
+static char *vxlan_gpe_transit_ioam_error_strings[] = {
+#define _(sym,string) string,
+ foreach_vxlan_gpe_transit_ioam_error
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) VXLAN_GPE_TRANSIT_IOAM_ERROR_##sym,
+ foreach_vxlan_gpe_transit_ioam_error
+#undef _
+ VXLAN_GPE_TRANSIT_IOAM_N_ERROR,
+} vxlan_gpe_transit_ioam_error_t;
+
+typedef enum
+{
+ VXLAN_GPE_TRANSIT_IOAM_NEXT_OUTPUT,
+ VXLAN_GPE_TRANSIT_IOAM_NEXT_DROP,
+ VXLAN_GPE_TRANSIT_IOAM_N_NEXT
+} vxlan_gpe_transit_ioam_next_t;
+
+
+/* *INDENT-OFF* */
+VNET_FEATURE_INIT (vxlan_gpe_transit_ioam, static) =
+{
+ .arc_name = "ip4-output",
+ .node_name = "vxlan-gpe-transit-ioam",
+ .runs_before = VNET_FEATURES ("interface-output"),
+};
+/* *INDENT-ON* */
+
+static uword
+vxlan_gpe_transit_ioam (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0 = VXLAN_GPE_TRANSIT_IOAM_NEXT_OUTPUT;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+ ip4_header_t *ip0;
+ u32 iph_offset = 0;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ iph_offset = vnet_buffer (b0)->ip.save_rewrite_length;
+ ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0)
+ + iph_offset);
+
+ /* just forward non ipv4 packets */
+ if (PREDICT_FALSE
+ ((ip0->ip_version_and_header_length & 0xF0) == 0x40))
+ {
+ /* ipv4 packets */
+ udp_header_t *udp_hdr0 = (udp_header_t *) (ip0 + 1);
+ if (PREDICT_FALSE
+ ((ip0->protocol == IP_PROTOCOL_UDP) &&
+ (clib_net_to_host_u16 (udp_hdr0->dst_port) ==
+ UDP_DST_PORT_vxlan_gpe)))
+ {
+
+ /* Check the iOAM header */
+ vxlan_gpe_header_t *gpe_hdr0 =
+ (vxlan_gpe_header_t *) (udp_hdr0 + 1);
+
+ if (PREDICT_FALSE
+ (gpe_hdr0->protocol == VXLAN_GPE_PROTOCOL_IOAM))
+ {
+ uword *t = NULL;
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ fib_prefix_t key4;
+ memset (&key4, 0, sizeof (key4));
+ key4.fp_proto = FIB_PROTOCOL_IP4;
+ key4.fp_addr.ip4.as_u32 = ip0->dst_address.as_u32;
+ t = hash_get_mem (hm->dst_by_ip4, &key4);
+ if (t)
+ {
+
+
+ vlib_buffer_advance (b0,
+ (word) (sizeof
+ (ethernet_header_t)));
+ vxlan_gpe_encap_decap_ioam_v4_one_inline (vm, node,
+ b0,
+ &next0,
+ VXLAN_GPE_TRANSIT_IOAM_NEXT_DROP,
+ 1
+ /* use_adj */
+ );
+ vlib_buffer_advance (b0,
+ -(word) (sizeof
+ (ethernet_header_t)));
+ }
+ }
+ }
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (vxlan_gpe_transit_ioam_node) = {
+ .function = vxlan_gpe_transit_ioam,
+ .name = "vxlan-gpe-transit-ioam",
+ .vector_size = sizeof (u32),
+ .format_trace = format_vxlan_gpe_ioam_v4_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(vxlan_gpe_transit_ioam_error_strings),
+ .error_strings = vxlan_gpe_transit_ioam_error_strings,
+
+ .n_next_nodes = VXLAN_GPE_TRANSIT_IOAM_N_NEXT,
+
+ .next_nodes = {
+ [VXLAN_GPE_TRANSIT_IOAM_NEXT_OUTPUT] = "interface-output",
+ [VXLAN_GPE_TRANSIT_IOAM_NEXT_DROP] = "error-drop",
+ },
+
+};
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/ioam/lib-vxlan-gpe/ioam_vxlan_gpe.api b/src/plugins/ioam/lib-vxlan-gpe/ioam_vxlan_gpe.api
new file mode 100644
index 00000000000..056529a4e8a
--- /dev/null
+++ b/src/plugins/ioam/lib-vxlan-gpe/ioam_vxlan_gpe.api
@@ -0,0 +1,181 @@
+/* Hey Emacs use -*- mode: C -*- */
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/** \brief iOAM Over VxLAN-GPE - Set iOAM transport for VxLAN-GPE
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param id - profile id
+ @param trace_ppc - Trace PPC (none/encap/decap)
+ @param pow_enable - Proof of Work enabled or not flag
+ @param trace_enable - iOAM Trace enabled or not flag
+
+*/
+define vxlan_gpe_ioam_enable {
+ u32 client_index;
+ u32 context;
+ u16 id;
+ u8 trace_ppc;
+ u8 pow_enable;
+ u8 trace_enable;
+};
+
+/** \brief iOAM Over VxLAN-GPE - Set iOAM transport for VXLAN-GPE reply
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define vxlan_gpe_ioam_enable_reply {
+ u32 context;
+ i32 retval;
+};
+
+
+/** \brief iOAM for VxLAN-GPE disable
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param id - profile id
+*/
+define vxlan_gpe_ioam_disable
+{
+ u32 client_index;
+ u32 context;
+ u16 id;
+};
+
+/** \brief vxlan_gpe_ioam disable response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define vxlan_gpe_ioam_disable_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Enable iOAM for a VNI (VXLAN-GPE)
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param vni - VXLAN-GPE VNI
+ @param local - IPv4/6 Address of the local VTEP
+ @param remote - IPv4/6 Address of the remote VTEP
+
+*/
+define vxlan_gpe_ioam_vni_enable {
+ u32 client_index;
+ u32 context;
+ u32 vni;
+ u8 local[16];
+ u8 remote[16];
+ u8 is_ipv6;
+};
+
+/** \brief Reply to enable iOAM for a VNI (VXLAN-GPE)
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+
+*/
+define vxlan_gpe_ioam_vni_enable_reply {
+ u32 client_index;
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Disable iOAM for a VNI (VXLAN-GPE)
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param vni - VXLAN-GPE VNI
+ @param local - IPv4/6 Address of the local VTEP
+ @param remote - IPv4/6 Address of the remote VTEP
+
+*/
+define vxlan_gpe_ioam_vni_disable {
+ u32 client_index;
+ u32 context;
+ u32 vni;
+ u8 local[16];
+ u8 remote[16];
+ u8 is_ipv6;
+};
+
+/** \brief Reply to disable iOAM for a VNI (VXLAN-GPE)
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+
+*/
+define vxlan_gpe_ioam_vni_disable_reply {
+ u32 client_index;
+ u32 context;
+ i32 retval;
+};
+
+
+/** \brief Enable iOAM for a VXLAN-GPE transit
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param dst_addr - IPv4/6 Address of the local VTEP
+ @param outer_fib_index- FIB index
+
+*/
+define vxlan_gpe_ioam_transit_enable {
+ u32 client_index;
+ u32 context;
+ u32 outer_fib_index;
+ u8 dst_addr[16];
+ u8 is_ipv6;
+};
+
+/** \brief Reply to enable iOAM for VXLAN-GPE transit
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+
+*/
+define vxlan_gpe_ioam_transit_enable_reply {
+ u32 client_index;
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Disable iOAM for VXLAN-GPE transit
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param dst_addr - IPv4/6 Address of the local VTEP
+ @param outer_fib_index- FIB index
+
+*/
+define vxlan_gpe_ioam_transit_disable {
+ u32 client_index;
+ u32 context;
+ u32 outer_fib_index;
+ u8 dst_addr[16];
+ u8 is_ipv6;
+};
+
+/** \brief Reply to disable iOAM for VXLAN-GPE transit
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+
+*/
+define vxlan_gpe_ioam_transit_disable_reply {
+ u32 client_index;
+ u32 context;
+ i32 retval;
+};
+
+
diff --git a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h
new file mode 100644
index 00000000000..06fa0d2cd34
--- /dev/null
+++ b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/* Include the generated file, see BUILT_SOURCES in Makefile.am */
+#include <ioam/lib-vxlan-gpe/ioam_vxlan_gpe.api.h>
diff --git a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_api.c b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_api.c
new file mode 100644
index 00000000000..68752365f82
--- /dev/null
+++ b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_api.c
@@ -0,0 +1,378 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ *------------------------------------------------------------------
+ * vxlan_gpe_api.c - iOAM VxLAN-GPE related APIs to create
+ * and maintain profiles
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/plugin/plugin.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
+
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vlibsocket/api.h>
+
+/* define message IDs */
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_msg_enum.h>
+
+/* define message structures */
+#define vl_typedefs
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h>
+#undef vl_typedefs
+
+/* define generated endian-swappers */
+#define vl_endianfun
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h>
+#undef vl_printfun
+
+/* Get the API version number */
+#define vl_api_version(n,v) static u32 api_version=(v);
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h>
+#undef vl_api_version
+
+/*
+ * A handy macro to set up a message reply.
+ * Assumes that the following variables are available:
+ * mp - pointer to request message
+ * rmp - pointer to reply message type
+ * rv - return value
+ */
+
+#define VXLAN_GPE_REPLY_MACRO(t) \
+do { \
+ unix_shared_memory_queue_t * q = \
+ vl_api_client_index_to_input_queue (mp->client_index); \
+ if (!q) \
+ return; \
+ \
+ rmp = vl_msg_api_alloc (sizeof (*rmp)); \
+ rmp->_vl_msg_id = ntohs((t)+sm->msg_id_base); \
+ rmp->context = mp->context; \
+ rmp->retval = ntohl(rv); \
+ \
+ vl_msg_api_send_shmem (q, (u8 *)&rmp); \
+} while(0);
+
+/* *INDENT-OFF* */
+#define VXLAN_GPE_REPLY_MACRO2(t, body) \
+do { \
+ unix_shared_memory_queue_t * q; \
+ rv = vl_msg_api_pd_handler (mp, rv); \
+ q = vl_api_client_index_to_input_queue (mp->client_index); \
+ if (!q) \
+ return; \
+ \
+ rmp = vl_msg_api_alloc (sizeof (*rmp)); \
+ rmp->_vl_msg_id = ntohs((t)); \
+ rmp->context = mp->context; \
+ rmp->retval = ntohl(rv); \
+ do {body;} while (0); \
+ vl_msg_api_send_shmem (q, (u8 *)&rmp); \
+} while(0);
+/* *INDENT-ON* */
+
+/* List of message types that this plugin understands */
+
+#define foreach_vxlan_gpe_plugin_api_msg \
+_(VXLAN_GPE_IOAM_ENABLE, vxlan_gpe_ioam_enable) \
+_(VXLAN_GPE_IOAM_DISABLE, vxlan_gpe_ioam_disable) \
+_(VXLAN_GPE_IOAM_VNI_ENABLE, vxlan_gpe_ioam_vni_enable) \
+_(VXLAN_GPE_IOAM_VNI_DISABLE, vxlan_gpe_ioam_vni_disable) \
+_(VXLAN_GPE_IOAM_TRANSIT_ENABLE, vxlan_gpe_ioam_transit_enable) \
+_(VXLAN_GPE_IOAM_TRANSIT_DISABLE, vxlan_gpe_ioam_transit_disable) \
+
+
+static void vl_api_vxlan_gpe_ioam_enable_t_handler
+ (vl_api_vxlan_gpe_ioam_enable_t * mp)
+{
+ int rv = 0;
+ vl_api_vxlan_gpe_ioam_enable_reply_t *rmp;
+ clib_error_t *error;
+ vxlan_gpe_ioam_main_t *sm = &vxlan_gpe_ioam_main;
+
+ /* Ignoring the profile id as currently a single profile
+ * is supported */
+ error =
+ vxlan_gpe_ioam_enable (mp->trace_enable, mp->pow_enable, mp->trace_ppc);
+ if (error)
+ {
+ clib_error_report (error);
+ rv = clib_error_get_code (error);
+ }
+
+ VXLAN_GPE_REPLY_MACRO (VL_API_VXLAN_GPE_IOAM_ENABLE_REPLY);
+}
+
+static void vl_api_vxlan_gpe_ioam_disable_t_handler
+ (vl_api_vxlan_gpe_ioam_disable_t * mp)
+{
+ int rv = 0;
+ vl_api_vxlan_gpe_ioam_disable_reply_t *rmp;
+ clib_error_t *error;
+ vxlan_gpe_ioam_main_t *sm = &vxlan_gpe_ioam_main;
+
+ /* Ignoring the profile id as currently a single profile
+ * is supported */
+ error = vxlan_gpe_ioam_disable (0, 0, 0);
+ if (error)
+ {
+ clib_error_report (error);
+ rv = clib_error_get_code (error);
+ }
+
+ VXLAN_GPE_REPLY_MACRO (VL_API_VXLAN_GPE_IOAM_DISABLE_REPLY);
+}
+
+static void vl_api_vxlan_gpe_ioam_vni_enable_t_handler
+ (vl_api_vxlan_gpe_ioam_vni_enable_t * mp)
+{
+ int rv = 0;
+ vl_api_vxlan_gpe_ioam_vni_enable_reply_t *rmp;
+ clib_error_t *error;
+ vxlan_gpe_ioam_main_t *sm = &vxlan_gpe_ioam_main;
+ vxlan4_gpe_tunnel_key_t key4;
+ uword *p = NULL;
+ vxlan_gpe_main_t *gm = &vxlan_gpe_main;
+ vxlan_gpe_tunnel_t *t = 0;
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ u32 vni;
+
+
+ if (!mp->is_ipv6)
+ {
+ clib_memcpy (&key4.local, &mp->local, sizeof (key4.local));
+ clib_memcpy (&key4.remote, &mp->remote, sizeof (key4.remote));
+ vni = clib_net_to_host_u32 (mp->vni);
+ key4.vni = clib_host_to_net_u32 (vni << 8);
+ key4.pad = 0;
+
+ p = hash_get_mem (gm->vxlan4_gpe_tunnel_by_key, &key4);
+ }
+ else
+ {
+ return;
+ }
+
+ if (!p)
+ return;
+
+ t = pool_elt_at_index (gm->tunnels, p[0]);
+
+ error = vxlan_gpe_ioam_set (t, hm->has_trace_option,
+ hm->has_pot_option,
+ hm->has_ppc_option, mp->is_ipv6);
+
+
+ if (error)
+ {
+ clib_error_report (error);
+ rv = clib_error_get_code (error);
+ }
+
+ VXLAN_GPE_REPLY_MACRO (VL_API_VXLAN_GPE_IOAM_VNI_ENABLE_REPLY);
+}
+
+
+static void vl_api_vxlan_gpe_ioam_vni_disable_t_handler
+ (vl_api_vxlan_gpe_ioam_vni_disable_t * mp)
+{
+ int rv = 0;
+ vl_api_vxlan_gpe_ioam_vni_enable_reply_t *rmp;
+ clib_error_t *error;
+ vxlan_gpe_ioam_main_t *sm = &vxlan_gpe_ioam_main;
+ vxlan4_gpe_tunnel_key_t key4;
+ uword *p = NULL;
+ vxlan_gpe_main_t *gm = &vxlan_gpe_main;
+ vxlan_gpe_tunnel_t *t = 0;
+ u32 vni;
+
+
+ if (!mp->is_ipv6)
+ {
+ clib_memcpy (&key4.local, &mp->local, sizeof (key4.local));
+ clib_memcpy (&key4.remote, &mp->remote, sizeof (key4.remote));
+ vni = clib_net_to_host_u32 (mp->vni);
+ key4.vni = clib_host_to_net_u32 (vni << 8);
+ key4.pad = 0;
+
+ p = hash_get_mem (gm->vxlan4_gpe_tunnel_by_key, &key4);
+ }
+ else
+ {
+ return;
+ }
+
+ if (!p)
+ return;
+
+ t = pool_elt_at_index (gm->tunnels, p[0]);
+
+ error = vxlan_gpe_ioam_clear (t, 0, 0, 0, 0);
+
+
+ if (error)
+ {
+ clib_error_report (error);
+ rv = clib_error_get_code (error);
+ }
+
+
+ VXLAN_GPE_REPLY_MACRO (VL_API_VXLAN_GPE_IOAM_VNI_DISABLE_REPLY);
+}
+
+static void vl_api_vxlan_gpe_ioam_transit_enable_t_handler
+ (vl_api_vxlan_gpe_ioam_transit_enable_t * mp)
+{
+ int rv = 0;
+ vl_api_vxlan_gpe_ioam_transit_enable_reply_t *rmp;
+ vxlan_gpe_ioam_main_t *sm = &vxlan_gpe_ioam_main;
+ ip46_address_t dst_addr;
+
+ memset (&dst_addr.ip4, 0, sizeof (dst_addr.ip4));
+ if (!mp->is_ipv6)
+ {
+ clib_memcpy (&dst_addr.ip4, &mp->dst_addr, sizeof (dst_addr.ip4));
+ }
+ rv = vxlan_gpe_enable_disable_ioam_for_dest (sm->vlib_main,
+ dst_addr,
+ ntohl (mp->outer_fib_index),
+ mp->is_ipv6 ? 0 : 1,
+ 1 /* is_add */ );
+
+ VXLAN_GPE_REPLY_MACRO (VL_API_VXLAN_GPE_IOAM_TRANSIT_ENABLE_REPLY);
+}
+
+static void vl_api_vxlan_gpe_ioam_transit_disable_t_handler
+ (vl_api_vxlan_gpe_ioam_transit_disable_t * mp)
+{
+ int rv = 0;
+ vl_api_vxlan_gpe_ioam_transit_disable_reply_t *rmp;
+ vxlan_gpe_ioam_main_t *sm = &vxlan_gpe_ioam_main;
+ ip46_address_t dst_addr;
+
+ memset (&dst_addr.ip4, 0, sizeof (dst_addr.ip4));
+ if (!mp->is_ipv6)
+ {
+ clib_memcpy (&dst_addr.ip4, &mp->dst_addr, sizeof (dst_addr.ip4));
+ }
+
+ rv = vxlan_gpe_ioam_disable_for_dest (sm->vlib_main,
+ dst_addr,
+ ntohl (mp->outer_fib_index),
+ mp->is_ipv6 ? 0 : 1);
+ VXLAN_GPE_REPLY_MACRO (VL_API_VXLAN_GPE_IOAM_TRANSIT_DISABLE_REPLY);
+}
+
+
+/*
+ * This routine exists to convince the vlib plugin framework that
+ * we haven't accidentally copied a random .dll into the plugin directory.
+ *
+ * Also collects global variable pointers passed from the vpp engine
+ */
+
+clib_error_t *
+vlib_plugin_register (vlib_main_t * vm, vnet_plugin_handoff_t * h,
+ int from_early_init)
+{
+ vxlan_gpe_ioam_main_t *sm = &vxlan_gpe_ioam_main;
+ clib_error_t *error = 0;
+
+ sm->vlib_main = vm;
+ sm->vnet_main = h->vnet_main;
+ sm->unix_time_0 = (u32) time (0); /* Store starting time */
+ sm->vlib_time_0 = vlib_time_now (vm);
+ return error;
+}
+
+/* Set up the API message handling tables */
+static clib_error_t *
+vxlan_gpe_plugin_api_hookup (vlib_main_t * vm)
+{
+ vxlan_gpe_ioam_main_t *sm = &vxlan_gpe_ioam_main;
+#define _(N,n) \
+ vl_msg_api_set_handlers((VL_API_##N + sm->msg_id_base), \
+ #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vxlan_gpe_plugin_api_msg;
+#undef _
+
+ return 0;
+}
+
+static clib_error_t *
+vxlan_gpe_init (vlib_main_t * vm)
+{
+ vxlan_gpe_ioam_main_t *sm = &vxlan_gpe_ioam_main;
+ clib_error_t *error = 0;
+ u8 *name;
+ u32 encap_node_index = vxlan_gpe_encap_ioam_v4_node.index;
+ u32 decap_node_index = vxlan_gpe_decap_ioam_v4_node.index;
+ vlib_node_t *vxlan_gpe_encap_node = NULL;
+ vlib_node_t *vxlan_gpe_decap_node = NULL;
+ uword next_node = 0;
+
+ name = format (0, "ioam_vxlan_gpe_%08x%c", api_version, 0);
+
+ /* Ask for a correctly-sized block of API message decode slots */
+ sm->msg_id_base = vl_msg_api_get_msg_ids
+ ((char *) name, VL_MSG_FIRST_AVAILABLE);
+
+ error = vxlan_gpe_plugin_api_hookup (vm);
+
+ /* Hook the ioam-encap node to vxlan-gpe-encap */
+ vxlan_gpe_encap_node = vlib_get_node_by_name (vm, (u8 *) "vxlan-gpe-encap");
+ sm->encap_v4_next_node =
+ vlib_node_add_next (vm, vxlan_gpe_encap_node->index, encap_node_index);
+
+ vxlan_gpe_decap_node =
+ vlib_get_node_by_name (vm, (u8 *) "vxlan4-gpe-input");
+ next_node =
+ vlib_node_add_next (vm, vxlan_gpe_decap_node->index, decap_node_index);
+ vxlan_gpe_register_decap_protocol (VXLAN_GPE_PROTOCOL_IOAM, next_node);
+
+ vec_new (vxlan_gpe_ioam_sw_interface_t, pool_elts (sm->sw_interfaces));
+ sm->dst_by_ip4 = hash_create_mem (0, sizeof (fib_prefix_t), sizeof (uword));
+
+ sm->dst_by_ip6 = hash_create_mem (0, sizeof (fib_prefix_t), sizeof (uword));
+
+ vxlan_gpe_ioam_interface_init ();
+ vec_free (name);
+
+ return error;
+}
+
+VLIB_INIT_FUNCTION (vxlan_gpe_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c
new file mode 100644
index 00000000000..6c04d9af210
--- /dev/null
+++ b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c
@@ -0,0 +1,773 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
+#include <vnet/ip/format.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/fib_entry.h>
+
+vxlan_gpe_ioam_main_t vxlan_gpe_ioam_main;
+
+int
+vxlan_gpe_ioam_set_rewrite (vxlan_gpe_tunnel_t * t, int has_trace_option,
+ int has_pot_option, int has_ppc_option,
+ u8 ipv6_set)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ u32 size;
+ vxlan_gpe_ioam_hdr_t *vxlan_gpe_ioam_hdr;
+ u8 *current;
+ u8 trace_data_size = 0;
+ u8 pot_data_size = 0;
+
+ if (has_trace_option == 0 && has_pot_option == 0)
+ return -1;
+
+ /* Work out how much space we need */
+ size = sizeof (vxlan_gpe_ioam_hdr_t);
+
+ if (has_trace_option
+ && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] != 0)
+ {
+ size += sizeof (vxlan_gpe_ioam_option_t);
+ size += hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE];
+ }
+ if (has_pot_option
+ && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT] != 0)
+ {
+ size += sizeof (vxlan_gpe_ioam_option_t);
+ size += hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT];
+ }
+
+ t->rewrite_size = size;
+
+ if (!ipv6_set)
+ {
+ vxlan4_gpe_rewrite (t, size, VXLAN_GPE_PROTOCOL_IOAM,
+ hm->encap_v4_next_node);
+ vxlan_gpe_ioam_hdr =
+ (vxlan_gpe_ioam_hdr_t *) (t->rewrite +
+ sizeof (ip4_vxlan_gpe_header_t));
+ }
+ else
+ {
+ vxlan6_gpe_rewrite (t, size, VXLAN_GPE_PROTOCOL_IOAM,
+ VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP);
+ vxlan_gpe_ioam_hdr =
+ (vxlan_gpe_ioam_hdr_t *) (t->rewrite +
+ sizeof (ip6_vxlan_gpe_header_t));
+ }
+
+
+ vxlan_gpe_ioam_hdr->type = VXLAN_GPE_PROTOCOL_IOAM;
+ /* Length of the header in octets */
+ vxlan_gpe_ioam_hdr->length = size;
+ vxlan_gpe_ioam_hdr->protocol = t->protocol;
+ current = (u8 *) vxlan_gpe_ioam_hdr + sizeof (vxlan_gpe_ioam_hdr_t);
+
+ if (has_trace_option
+ && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] != 0)
+ {
+ if (0 != hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] (current,
+ &trace_data_size))
+ return -1;
+ current += trace_data_size;
+ }
+ if (has_pot_option
+ && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT] != 0)
+ {
+ pot_data_size =
+ hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT];
+ if (0 ==
+ hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT]
+ (current, &pot_data_size))
+ current += pot_data_size;
+ }
+
+ return 0;
+}
+
+int
+vxlan_gpe_ioam_clear_rewrite (vxlan_gpe_tunnel_t * t, int has_trace_option,
+ int has_pot_option, int has_ppc_option,
+ u8 ipv6_set)
+{
+
+ t->rewrite_size = 0;
+
+ if (!ipv6_set)
+ {
+ vxlan4_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP);
+ }
+ else
+ {
+ vxlan6_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP);
+ }
+
+
+ return 0;
+}
+
+clib_error_t *
+vxlan_gpe_ioam_clear (vxlan_gpe_tunnel_t * t,
+ int has_trace_option, int has_pot_option,
+ int has_ppc_option, u8 ipv6_set)
+{
+ int rv;
+ rv = vxlan_gpe_ioam_clear_rewrite (t, 0, 0, 0, 0);
+
+ if (rv == 0)
+ {
+ return (0);
+ }
+ else
+ {
+ return clib_error_return_code (0, rv, 0,
+ "vxlan_gpe_ioam_clear_rewrite returned %d",
+ rv);
+ }
+
+}
+
+
+clib_error_t *
+vxlan_gpe_ioam_set (vxlan_gpe_tunnel_t * t,
+ int has_trace_option, int has_pot_option,
+ int has_ppc_option, u8 ipv6_set)
+{
+ int rv;
+ rv = vxlan_gpe_ioam_set_rewrite (t, has_trace_option,
+ has_pot_option, has_ppc_option, ipv6_set);
+
+ if (rv == 0)
+ {
+ return (0);
+ }
+ else
+ {
+ return clib_error_return_code (0, rv, 0,
+ "vxlan_gpe_ioam_set_rewrite returned %d",
+ rv);
+ }
+
+}
+
+static void
+vxlan_gpe_set_clear_output_feature_on_intf (vlib_main_t * vm,
+ u32 sw_if_index0, u8 is_add)
+{
+
+
+
+ vnet_feature_enable_disable ("ip4-output", "vxlan-gpe-transit-ioam",
+ sw_if_index0, is_add,
+ 0 /* void *feature_config */ ,
+ 0 /* u32 n_feature_config_bytes */ );
+ return;
+}
+
+void
+vxlan_gpe_clear_output_feature_on_all_intfs (vlib_main_t * vm)
+{
+ vnet_sw_interface_t *si = 0;
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+
+ pool_foreach (si, im->sw_interfaces, (
+ {
+ vxlan_gpe_set_clear_output_feature_on_intf
+ (vm, si->sw_if_index, 0);
+ }));
+ return;
+}
+
+
+extern fib_forward_chain_type_t
+fib_entry_get_default_chain_type (const fib_entry_t * fib_entry);
+
+int
+vxlan_gpe_enable_disable_ioam_for_dest (vlib_main_t * vm,
+ ip46_address_t dst_addr,
+ u32 outer_fib_index,
+ u8 is_ipv4, u8 is_add)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ u32 fib_index0 = 0;
+ u32 sw_if_index0 = ~0;
+
+ fib_node_index_t fei = ~0;
+ fib_entry_t *fib_entry;
+ u32 adj_index0;
+ ip_adjacency_t *adj0;
+ fib_prefix_t fib_prefix;
+ //fib_forward_chain_type_t fct;
+ load_balance_t *lb_m, *lb_b;
+ const dpo_id_t *dpo0, *dpo1;
+ u32 i, j;
+ //vnet_hw_interface_t *hw;
+
+ if (is_ipv4)
+ {
+ memset (&fib_prefix, 0, sizeof (fib_prefix_t));
+ fib_prefix.fp_len = 32;
+ fib_prefix.fp_proto = FIB_PROTOCOL_IP4;
+ fib_prefix.fp_addr = dst_addr;
+ }
+ else
+ {
+ return 0;
+ }
+
+ fei = fib_table_lookup (fib_index0, &fib_prefix);
+ fib_entry = fib_entry_get (fei);
+
+ //fct = fib_entry_get_default_chain_type (fib_entry);
+
+ if (!dpo_id_is_valid (&fib_entry->fe_lb /*[fct] */ ))
+ {
+ return (-1);
+ }
+
+ lb_m = load_balance_get (fib_entry->fe_lb /*[fct] */ .dpoi_index);
+
+ for (i = 0; i < lb_m->lb_n_buckets; i++)
+ {
+ dpo0 = load_balance_get_bucket_i (lb_m, i);
+
+ if (dpo0->dpoi_type == DPO_LOAD_BALANCE)
+ {
+ lb_b = load_balance_get (dpo0->dpoi_index);
+
+ for (j = 0; j < lb_b->lb_n_buckets; j++)
+ {
+ dpo1 = load_balance_get_bucket_i (lb_b, j);
+
+ if (dpo1->dpoi_type == DPO_ADJACENCY)
+ {
+ adj_index0 = dpo1->dpoi_index;
+
+ if (ADJ_INDEX_INVALID == adj_index0)
+ {
+ continue;
+ }
+
+ adj0 =
+ ip_get_adjacency (&(ip4_main.lookup_main), adj_index0);
+ sw_if_index0 = adj0->rewrite_header.sw_if_index;
+
+ if (~0 == sw_if_index0)
+ {
+ continue;
+ }
+
+
+ if (is_add)
+ {
+ vnet_feature_enable_disable ("ip4-output",
+ "vxlan-gpe-transit-ioam",
+ sw_if_index0, is_add, 0
+ /* void *feature_config */
+ , 0 /* u32 n_feature_config_bytes */
+ );
+
+ vec_validate_init_empty (hm->bool_ref_by_sw_if_index,
+ sw_if_index0, ~0);
+ hm->bool_ref_by_sw_if_index[sw_if_index0] = 1;
+ }
+ else
+ {
+ hm->bool_ref_by_sw_if_index[sw_if_index0] = ~0;
+ }
+ }
+ }
+ }
+ }
+
+ if (is_ipv4)
+ {
+
+ uword *t = NULL;
+ vxlan_gpe_ioam_dest_tunnels_t *t1;
+ fib_prefix_t key4, *key4_copy;
+ hash_pair_t *hp;
+ memset (&key4, 0, sizeof (key4));
+ key4.fp_proto = FIB_PROTOCOL_IP4;
+ key4.fp_addr.ip4.as_u32 = fib_prefix.fp_addr.ip4.as_u32;
+ t = hash_get_mem (hm->dst_by_ip4, &key4);
+ if (is_add)
+ {
+ if (t)
+ {
+ return 0;
+ }
+ pool_get_aligned (hm->dst_tunnels, t1, CLIB_CACHE_LINE_BYTES);
+ memset (t1, 0, sizeof (*t1));
+ t1->fp_proto = FIB_PROTOCOL_IP4;
+ t1->dst_addr.ip4.as_u32 = fib_prefix.fp_addr.ip4.as_u32;
+ key4_copy = clib_mem_alloc (sizeof (*key4_copy));
+ clib_memcpy (key4_copy, &key4, sizeof (*key4_copy));
+ hash_set_mem (hm->dst_by_ip4, key4_copy, t1 - hm->dst_tunnels);
+ /*
+ * Attach to the FIB entry for the VxLAN-GPE destination
+ * and become its child. The dest route will invoke a callback
+ * when the fib entry changes, it can be used to
+ * re-program the output feature on the egress interface.
+ */
+
+ const fib_prefix_t tun_dst_pfx = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {.ip4 = t1->dst_addr.ip4,}
+ };
+
+ t1->fib_entry_index =
+ fib_table_entry_special_add (outer_fib_index,
+ &tun_dst_pfx,
+ FIB_SOURCE_RR,
+ FIB_ENTRY_FLAG_NONE,
+ ADJ_INDEX_INVALID);
+ t1->sibling_index =
+ fib_entry_child_add (t1->fib_entry_index,
+ hm->fib_entry_type, t1 - hm->dst_tunnels);
+ t1->outer_fib_index = outer_fib_index;
+
+ }
+ else
+ {
+ if (!t)
+ {
+ return 0;
+ }
+ t1 = pool_elt_at_index (hm->dst_tunnels, t[0]);
+ hp = hash_get_pair (hm->dst_by_ip4, &key4);
+ key4_copy = (void *) (hp->key);
+ hash_unset_mem (hm->dst_by_ip4, &key4);
+ clib_mem_free (key4_copy);
+ pool_put (hm->dst_tunnels, t1);
+ }
+ }
+ else
+ {
+ // TBD for IPv6
+ }
+
+ return 0;
+}
+
+void
+vxlan_gpe_refresh_output_feature_on_all_dest (void)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ vxlan_gpe_ioam_dest_tunnels_t *t;
+ u32 i;
+ if (pool_elts (hm->dst_tunnels) == 0)
+ return;
+ vxlan_gpe_clear_output_feature_on_all_intfs (hm->vlib_main);
+ i = vec_len (hm->bool_ref_by_sw_if_index);
+ vec_free (hm->bool_ref_by_sw_if_index);
+ vec_validate_init_empty (hm->bool_ref_by_sw_if_index, i, ~0);
+ pool_foreach (t, hm->dst_tunnels, (
+ {
+ vxlan_gpe_enable_disable_ioam_for_dest
+ (hm->vlib_main,
+ t->dst_addr,
+ t->outer_fib_index,
+ (t->fp_proto == FIB_PROTOCOL_IP4), 1
+ /* is_add */
+ );
+ }
+ ));
+ return;
+}
+
+void
+vxlan_gpe_clear_output_feature_on_select_intfs (void)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ u32 sw_if_index0 = 0;
+ for (sw_if_index0 = 0;
+ sw_if_index0 < vec_len (hm->bool_ref_by_sw_if_index); sw_if_index0++)
+ {
+ if (hm->bool_ref_by_sw_if_index[sw_if_index0] == 0xFF)
+ {
+ vxlan_gpe_set_clear_output_feature_on_intf
+ (hm->vlib_main, sw_if_index0, 0);
+ }
+ }
+
+ return;
+}
+
+static clib_error_t *
+vxlan_gpe_set_ioam_rewrite_command_fn (vlib_main_t *
+ vm,
+ unformat_input_t
+ * input, vlib_cli_command_t * cmd)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ ip46_address_t local, remote;
+ u8 local_set = 0;
+ u8 remote_set = 0;
+ u8 ipv4_set = 0;
+ u8 ipv6_set = 0;
+ u32 vni;
+ u8 vni_set = 0;
+ u8 disable = 0;
+ clib_error_t *rv = 0;
+ vxlan4_gpe_tunnel_key_t key4;
+ vxlan6_gpe_tunnel_key_t key6;
+ uword *p;
+ vxlan_gpe_main_t *gm = &vxlan_gpe_main;
+ vxlan_gpe_tunnel_t *t = 0;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "local %U", unformat_ip4_address, &local.ip4))
+ {
+ local_set = 1;
+ ipv4_set = 1;
+ }
+ else
+ if (unformat (input, "remote %U", unformat_ip4_address, &remote.ip4))
+ {
+ remote_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (input, "local %U", unformat_ip6_address, &local.ip6))
+ {
+ local_set = 1;
+ ipv6_set = 1;
+ }
+ else
+ if (unformat (input, "remote %U", unformat_ip6_address, &remote.ip6))
+ {
+ remote_set = 1;
+ ipv6_set = 1;
+ }
+ else if (unformat (input, "vni %d", &vni))
+ vni_set = 1;
+ else if (unformat (input, "disable"))
+ disable = 1;
+ else
+ break;
+ }
+
+ if (local_set == 0)
+ return clib_error_return (0, "tunnel local address not specified");
+ if (remote_set == 0)
+ return clib_error_return (0, "tunnel remote address not specified");
+ if (ipv4_set && ipv6_set)
+ return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
+ if ((ipv4_set
+ && memcmp (&local.ip4, &remote.ip4,
+ sizeof (local.ip4)) == 0) || (ipv6_set
+ &&
+ memcmp
+ (&local.ip6,
+ &remote.ip6,
+ sizeof (local.ip6)) == 0))
+ return clib_error_return (0, "src and dst addresses are identical");
+ if (vni_set == 0)
+ return clib_error_return (0, "vni not specified");
+ if (!ipv6_set)
+ {
+ key4.local = local.ip4.as_u32;
+ key4.remote = remote.ip4.as_u32;
+ key4.vni = clib_host_to_net_u32 (vni << 8);
+ key4.pad = 0;
+ p = hash_get_mem (gm->vxlan4_gpe_tunnel_by_key, &key4);
+ }
+ else
+ {
+ key6.local.as_u64[0] = local.ip6.as_u64[0];
+ key6.local.as_u64[1] = local.ip6.as_u64[1];
+ key6.remote.as_u64[0] = remote.ip6.as_u64[0];
+ key6.remote.as_u64[1] = remote.ip6.as_u64[1];
+ key6.vni = clib_host_to_net_u32 (vni << 8);
+ p = hash_get_mem (gm->vxlan6_gpe_tunnel_by_key, &key6);
+ }
+
+ if (!p)
+ return clib_error_return (0, "VxLAN Tunnel not found");
+ t = pool_elt_at_index (gm->tunnels, p[0]);
+ if (!disable)
+ {
+ rv =
+ vxlan_gpe_ioam_set (t, hm->has_trace_option,
+ hm->has_pot_option, hm->has_ppc_option, ipv6_set);
+ }
+ else
+ {
+ rv = vxlan_gpe_ioam_clear (t, 0, 0, 0, 0);
+ }
+ return rv;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_rewrite_cmd, static) = {
+ .path = "set vxlan-gpe-ioam",
+ .short_help = "set vxlan-gpe-ioam vxlan <src-ip> <dst_ip> <vnid> [disable]",
+ .function = vxlan_gpe_set_ioam_rewrite_command_fn,
+};
+/* *INDENT-ON* */
+
+
+
+clib_error_t *
+vxlan_gpe_ioam_enable (int has_trace_option,
+ int has_pot_option, int has_ppc_option)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ hm->has_trace_option = has_trace_option;
+ hm->has_pot_option = has_pot_option;
+ hm->has_ppc_option = has_ppc_option;
+ if (hm->has_trace_option)
+ {
+ vxlan_gpe_trace_profile_setup ();
+ }
+
+ return 0;
+}
+
+clib_error_t *
+vxlan_gpe_ioam_disable (int
+ has_trace_option,
+ int has_pot_option, int has_ppc_option)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ hm->has_trace_option = has_trace_option;
+ hm->has_pot_option = has_pot_option;
+ hm->has_ppc_option = has_ppc_option;
+ if (!hm->has_trace_option)
+ {
+ vxlan_gpe_trace_profile_cleanup ();
+ }
+
+ return 0;
+}
+
+void
+vxlan_gpe_set_next_override (uword next)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ hm->decap_v4_next_override = next;
+ return;
+}
+
+static clib_error_t *
+vxlan_gpe_set_ioam_flags_command_fn (vlib_main_t * vm,
+ unformat_input_t
+ * input, vlib_cli_command_t * cmd)
+{
+ int has_trace_option = 0;
+ int has_pot_option = 0;
+ int has_ppc_option = 0;
+ clib_error_t *rv = 0;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "trace"))
+ has_trace_option = 1;
+ else if (unformat (input, "pot"))
+ has_pot_option = 1;
+ else if (unformat (input, "ppc encap"))
+ has_ppc_option = PPC_ENCAP;
+ else if (unformat (input, "ppc decap"))
+ has_ppc_option = PPC_DECAP;
+ else if (unformat (input, "ppc none"))
+ has_ppc_option = PPC_NONE;
+ else
+ break;
+ }
+
+
+ rv =
+ vxlan_gpe_ioam_enable (has_trace_option, has_pot_option, has_ppc_option);
+ return rv;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_flags_cmd, static) =
+{
+.path = "set vxlan-gpe-ioam rewrite",
+.short_help = "set vxlan-gpe-ioam [trace] [pot] [ppc <encap|decap>]",
+.function = vxlan_gpe_set_ioam_flags_command_fn,};
+/* *INDENT-ON* */
+
+
+int vxlan_gpe_ioam_disable_for_dest
+ (vlib_main_t * vm, ip46_address_t dst_addr, u32 outer_fib_index,
+ u8 ipv4_set)
+{
+ vxlan_gpe_ioam_dest_tunnels_t *t;
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+ vxlan_gpe_enable_disable_ioam_for_dest (hm->vlib_main,
+ dst_addr, outer_fib_index, ipv4_set,
+ 0);
+ if (pool_elts (hm->dst_tunnels) == 0)
+ {
+ vxlan_gpe_clear_output_feature_on_select_intfs ();
+ return 0;
+ }
+
+ pool_foreach (t, hm->dst_tunnels, (
+ {
+ vxlan_gpe_enable_disable_ioam_for_dest
+ (hm->vlib_main,
+ t->dst_addr,
+ t->outer_fib_index,
+ (t->fp_proto ==
+ FIB_PROTOCOL_IP4), 1 /* is_add */ );
+ }
+ ));
+ vxlan_gpe_clear_output_feature_on_select_intfs ();
+ return (0);
+
+}
+
+static clib_error_t *vxlan_gpe_set_ioam_transit_rewrite_command_fn
+ (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ ip46_address_t dst_addr;
+ u8 dst_addr_set = 0;
+ u8 ipv4_set = 0;
+ u8 ipv6_set = 0;
+ u8 disable = 0;
+ clib_error_t *rv = 0;
+ u32 outer_fib_index = 0;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "dst-ip %U", unformat_ip4_address, &dst_addr.ip4))
+ {
+ dst_addr_set = 1;
+ ipv4_set = 1;
+ }
+ else
+ if (unformat
+ (input, "dst-ip %U", unformat_ip6_address, &dst_addr.ip6))
+ {
+ dst_addr_set = 1;
+ ipv6_set = 1;
+ }
+ else if (unformat (input, "outer-fib-index %d", &outer_fib_index))
+ {
+ }
+
+ else if (unformat (input, "disable"))
+ disable = 1;
+ else
+ break;
+ }
+
+ if (dst_addr_set == 0)
+ return clib_error_return (0, "tunnel destination address not specified");
+ if (ipv4_set && ipv6_set)
+ return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
+ if (!disable)
+ {
+ vxlan_gpe_enable_disable_ioam_for_dest (hm->vlib_main,
+ dst_addr, outer_fib_index,
+ ipv4_set, 1);
+ }
+ else
+ {
+ vxlan_gpe_ioam_disable_for_dest
+ (vm, dst_addr, outer_fib_index, ipv4_set);
+ }
+ return rv;
+}
+
+ /* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_transit_rewrite_cmd, static) = {
+ .path = "set vxlan-gpe-ioam-transit",
+ .short_help = "set vxlan-gpe-ioam-transit dst-ip <dst_ip> [outer-fib-index <outer_fib_index>] [disable]",
+ .function = vxlan_gpe_set_ioam_transit_rewrite_command_fn,
+};
+/* *INDENT-ON* */
+
+clib_error_t *clear_vxlan_gpe_ioam_rewrite_command_fn
+ (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ return (vxlan_gpe_ioam_disable (0, 0, 0));
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vxlan_gpe_clear_ioam_flags_cmd, static) =
+{
+.path = "clear vxlan-gpe-ioam rewrite",
+.short_help = "clear vxlan-gpe-ioam rewrite",
+.function = clear_vxlan_gpe_ioam_rewrite_command_fn,
+};
+/* *INDENT-ON* */
+
+
+/**
+ * Function definition to backwalk a FIB node
+ */
+static fib_node_back_walk_rc_t
+vxlan_gpe_ioam_back_walk (fib_node_t * node, fib_node_back_walk_ctx_t * ctx)
+{
+ vxlan_gpe_refresh_output_feature_on_all_dest ();
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+/**
+ * Function definition to get a FIB node from its index
+ */
+static fib_node_t *
+vxlan_gpe_ioam_fib_node_get (fib_node_index_t index)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ return (&hm->node);
+}
+
+/**
+ * Function definition to inform the FIB node that its last lock has gone.
+ */
+static void
+vxlan_gpe_ioam_last_lock_gone (fib_node_t * node)
+{
+ ASSERT (0);
+}
+
+
+/*
+ * Virtual function table registered by MPLS GRE tunnels
+ * for participation in the FIB object graph.
+ */
+const static fib_node_vft_t vxlan_gpe_ioam_vft = {
+ .fnv_get = vxlan_gpe_ioam_fib_node_get,
+ .fnv_last_lock = vxlan_gpe_ioam_last_lock_gone,
+ .fnv_back_walk = vxlan_gpe_ioam_back_walk,
+};
+
+void
+vxlan_gpe_ioam_interface_init (void)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ hm->fib_entry_type = fib_node_register_new_type (&vxlan_gpe_ioam_vft);
+ return;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h
new file mode 100644
index 00000000000..0711b87abbe
--- /dev/null
+++ b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_vxlan_gpe_ioam_h__
+#define __included_vxlan_gpe_ioam_h__
+
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h>
+#include <vnet/ip/ip.h>
+
+
+typedef struct vxlan_gpe_sw_interface_
+{
+ u32 sw_if_index;
+} vxlan_gpe_ioam_sw_interface_t;
+
+typedef struct vxlan_gpe_dest_tunnels_
+{
+ ip46_address_t dst_addr;
+ u32 fp_proto;
+ u32 sibling_index;
+ fib_node_index_t fib_entry_index;
+ u32 outer_fib_index;
+} vxlan_gpe_ioam_dest_tunnels_t;
+
+typedef struct vxlan_gpe_ioam_main_
+{
+ /**
+ * Linkage into the FIB object graph
+ */
+ fib_node_t node;
+
+ /* time scale transform. Joy. */
+ u32 unix_time_0;
+ f64 vlib_time_0;
+
+
+ /* Trace option */
+ u8 has_trace_option;
+
+ /* Pot option */
+ u8 has_pot_option;
+
+#define PPC_NONE 0
+#define PPC_ENCAP 1
+#define PPC_DECAP 2
+ u8 has_ppc_option;
+
+#define TSP_SECONDS 0
+#define TSP_MILLISECONDS 1
+#define TSP_MICROSECONDS 2
+#define TSP_NANOSECONDS 3
+
+ /* Array of function pointers to ADD and POP VxLAN-GPE iOAM option handling routines */
+ u8 options_size[256];
+ int (*add_options[256]) (u8 * rewrite_string, u8 * rewrite_size);
+ int (*pop_options[256]) (ip4_header_t * ip, vxlan_gpe_ioam_option_t * opt);
+
+ /* Array of function pointers to iOAM option handling routines */
+ int (*options[256]) (vlib_buffer_t * b, vxlan_gpe_ioam_option_t * opt,
+ u8 is_ipv4, u8 use_adj);
+ u8 *(*trace[256]) (u8 * s, vxlan_gpe_ioam_option_t * opt);
+
+ /* API message ID base */
+ u16 msg_id_base;
+
+ /* Override to export for iOAM */
+ uword decap_v4_next_override;
+ uword decap_v6_next_override;
+
+ /* sequence of node graph for encap */
+ uword encap_v4_next_node;
+ uword encap_v6_next_node;
+
+ /* Software interfaces. */
+ vxlan_gpe_ioam_sw_interface_t *sw_interfaces;
+
+ /* hash ip4/ip6 -> list of destinations for doing transit iOAM operation */
+ vxlan_gpe_ioam_dest_tunnels_t *dst_tunnels;
+ uword *dst_by_ip4;
+ uword *dst_by_ip6;
+
+ /** per sw_if_index, to maintain bitmap */
+ u8 *bool_ref_by_sw_if_index;
+ fib_node_type_t fib_entry_type;
+
+ /** State convenience vlib_main_t */
+ vlib_main_t *vlib_main;
+ /** State convenience vnet_main_t */
+ vnet_main_t *vnet_main;
+
+
+} vxlan_gpe_ioam_main_t;
+extern vxlan_gpe_ioam_main_t vxlan_gpe_ioam_main;
+
+/*
+ * Primary h-b-h handler trace support
+ */
+typedef struct
+{
+ u32 next_index;
+ u32 trace_len;
+ u8 option_data[256];
+} ioam_trace_t;
+
+
+extern vlib_node_registration_t vxlan_gpe_encap_ioam_v4_node;
+extern vlib_node_registration_t vxlan_gpe_decap_ioam_v4_node;
+extern vlib_node_registration_t vxlan_gpe_transit_ioam_v4_node;
+
+clib_error_t *vxlan_gpe_ioam_enable (int has_trace_option, int has_pot_option,
+ int has_ppc_option);
+
+clib_error_t *vxlan_gpe_ioam_disable (int has_trace_option,
+ int has_pot_option, int has_ppc_option);
+
+clib_error_t *vxlan_gpe_ioam_set (vxlan_gpe_tunnel_t * t,
+ int has_trace_option,
+ int has_pot_option,
+ int has_ppc_option, u8 ipv6_set);
+clib_error_t *vxlan_gpe_ioam_clear (vxlan_gpe_tunnel_t * t,
+ int has_trace_option, int has_pot_option,
+ int has_ppc_option, u8 ipv6_set);
+
+int vxlan_gpe_ioam_add_register_option (u8 option,
+ u8 size,
+ int rewrite_options (u8 *
+ rewrite_string,
+ u8 *
+ rewrite_size));
+
+int vxlan_gpe_add_unregister_option (u8 option);
+
+int vxlan_gpe_ioam_register_option (u8 option,
+ int options (vlib_buffer_t * b,
+ vxlan_gpe_ioam_option_t *
+ opt, u8 is_ipv4, u8 use_adj),
+ u8 * trace (u8 * s,
+ vxlan_gpe_ioam_option_t *
+ opt));
+int vxlan_gpe_ioam_unregister_option (u8 option);
+
+int vxlan_gpe_trace_profile_setup (void);
+
+int vxlan_gpe_trace_profile_cleanup (void);
+extern void vxlan_gpe_ioam_interface_init (void);
+int
+vxlan_gpe_enable_disable_ioam_for_dest (vlib_main_t * vm,
+ ip46_address_t dst_addr,
+ u32 outer_fib_index,
+ u8 is_ipv4, u8 is_add);
+int vxlan_gpe_ioam_disable_for_dest
+ (vlib_main_t * vm, ip46_address_t dst_addr, u32 outer_fib_index,
+ u8 ipv4_set);
+
+typedef enum
+{
+ VXLAN_GPE_DECAP_IOAM_V4_NEXT_POP,
+ VXLAN_GPE_DECAP_IOAM_V4_NEXT_DROP,
+ VXLAN_GPE_DECAP_IOAM_V4_N_NEXT
+} vxlan_gpe_decap_ioam_v4_next_t;
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h
new file mode 100644
index 00000000000..a7ef859ec58
--- /dev/null
+++ b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_vxlan_gpe_ioam_packet_h__
+#define __included_vxlan_gpe_ioam_packet_h__
+
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
+#include <vnet/ip/ip.h>
+
+
+
+#define VXLAN_GPE_OPTION_TYPE_IOAM_TRACE 59
+#define VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT 60
+
+/**
+ * @brief VXLAN GPE Extension (iOAM) Header definition
+ */
+typedef struct
+{
+ u8 type;
+ u8 length;
+ /** Reserved */
+ u8 reserved;
+ /** see vxlan_gpe_protocol_t */
+ u8 protocol;
+} vxlan_gpe_ioam_hdr_t;
+
+/*
+ * @brief VxLAN GPE iOAM Option definition
+ */
+typedef struct
+{
+ /* Option Type */
+ u8 type;
+ /* Length in octets of the option data field */
+ u8 length;
+} vxlan_gpe_ioam_option_t;
+
+
+#endif
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_trace.c b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_trace.c
new file mode 100644
index 00000000000..e37b1642d96
--- /dev/null
+++ b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_trace.c
@@ -0,0 +1,552 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vppinfra/error.h>
+
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
+
+#include <vppinfra/hash.h>
+#include <vppinfra/error.h>
+#include <vppinfra/elog.h>
+
+#include <ioam/lib-trace/trace_util.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
+
+/* Timestamp precision multipliers for seconds, milliseconds, microseconds
+ * and nanoseconds respectively.
+ */
+static f64 trace_tsp_mul[4] = { 1, 1e3, 1e6, 1e9 };
+
+typedef union
+{
+ u64 as_u64;
+ u32 as_u32[2];
+} time_u64_t;
+
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED(struct {
+ vxlan_gpe_ioam_option_t hdr;
+ u8 ioam_trace_type;
+ u8 data_list_elts_left;
+ u32 elts[0]; /* Variable type. So keep it generic */
+}) vxlan_gpe_ioam_trace_option_t;
+/* *INDENT-ON* */
+
+
+#define foreach_vxlan_gpe_ioam_trace_stats \
+ _(SUCCESS, "Pkts updated with TRACE records") \
+ _(FAILED, "Errors in TRACE due to lack of TRACE records")
+
+static char *vxlan_gpe_ioam_trace_stats_strings[] = {
+#define _(sym,string) string,
+ foreach_vxlan_gpe_ioam_trace_stats
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) VXLAN_GPE_IOAM_TRACE_##sym,
+ foreach_vxlan_gpe_ioam_trace_stats
+#undef _
+ VXLAN_GPE_IOAM_TRACE_N_STATS,
+} vxlan_gpe_ioam_trace_stats_t;
+
+
+typedef struct
+{
+ /* stats */
+ u64 counters[ARRAY_LEN (vxlan_gpe_ioam_trace_stats_strings)];
+
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} vxlan_gpe_ioam_trace_main_t;
+
+vxlan_gpe_ioam_trace_main_t vxlan_gpe_ioam_trace_main;
+
+int
+vxlan_gpe_ioam_add_register_option (u8 option,
+ u8 size,
+ int rewrite_options (u8 * rewrite_string,
+ u8 * rewrite_size))
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+ ASSERT (option < ARRAY_LEN (hm->add_options));
+
+ /* Already registered */
+ if (hm->add_options[option])
+ return (-1);
+
+ hm->add_options[option] = rewrite_options;
+ hm->options_size[option] = size;
+
+ return (0);
+}
+
+int
+vxlan_gpe_add_unregister_option (u8 option)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+ ASSERT (option < ARRAY_LEN (hm->add_options));
+
+ /* Not registered */
+ if (!hm->add_options[option])
+ return (-1);
+
+ hm->add_options[option] = NULL;
+ hm->options_size[option] = 0;
+ return (0);
+}
+
+
+int
+vxlan_gpe_ioam_register_option (u8 option,
+ int options (vlib_buffer_t * b,
+ vxlan_gpe_ioam_option_t * opt,
+ u8 is_ipv4, u8 use_adj),
+ u8 * trace (u8 * s,
+ vxlan_gpe_ioam_option_t * opt))
+{
+ vxlan_gpe_ioam_main_t *im = &vxlan_gpe_ioam_main;
+
+ ASSERT (option < ARRAY_LEN (im->options));
+
+ /* Already registered */
+ if (im->options[option])
+ return (-1);
+
+ im->options[option] = options;
+ im->trace[option] = trace;
+
+ return (0);
+}
+
+int
+vxlan_gpe_ioam_unregister_option (u8 option)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+ ASSERT (option < ARRAY_LEN (hm->options));
+
+ /* Not registered */
+ if (!hm->options[option])
+ return (-1);
+
+ hm->options[option] = NULL;
+ hm->trace[option] = NULL;
+
+ return (0);
+}
+
+
+always_inline void
+vxlan_gpe_ioam_trace_stats_increment_counter (u32 counter_index,
+ u64 increment)
+{
+ vxlan_gpe_ioam_trace_main_t *hm = &vxlan_gpe_ioam_trace_main;
+
+ hm->counters[counter_index] += increment;
+}
+
+
+static u8 *
+format_ioam_data_list_element (u8 * s, va_list * args)
+{
+ u32 *elt = va_arg (*args, u32 *);
+ u8 *trace_type_p = va_arg (*args, u8 *);
+ u8 trace_type = *trace_type_p;
+
+
+ if (trace_type & BIT_TTL_NODEID)
+ {
+ u32 ttl_node_id_host_byte_order = clib_net_to_host_u32 (*elt);
+ s = format (s, "ttl 0x%x node id 0x%x ",
+ ttl_node_id_host_byte_order >> 24,
+ ttl_node_id_host_byte_order & 0x00FFFFFF);
+
+ elt++;
+ }
+
+ if (trace_type & BIT_ING_INTERFACE && trace_type & BIT_ING_INTERFACE)
+ {
+ u32 ingress_host_byte_order = clib_net_to_host_u32 (*elt);
+ s = format (s, "ingress 0x%x egress 0x%x ",
+ ingress_host_byte_order >> 16,
+ ingress_host_byte_order & 0xFFFF);
+ elt++;
+ }
+
+ if (trace_type & BIT_TIMESTAMP)
+ {
+ u32 ts_in_host_byte_order = clib_net_to_host_u32 (*elt);
+ s = format (s, "ts 0x%x \n", ts_in_host_byte_order);
+ elt++;
+ }
+
+ if (trace_type & BIT_APPDATA)
+ {
+ u32 appdata_in_host_byte_order = clib_net_to_host_u32 (*elt);
+ s = format (s, "app 0x%x ", appdata_in_host_byte_order);
+ elt++;
+ }
+
+ return s;
+}
+
+
+
+int
+vxlan_gpe_ioam_trace_rewrite_handler (u8 * rewrite_string, u8 * rewrite_size)
+{
+ vxlan_gpe_ioam_trace_option_t *trace_option = NULL;
+ u8 trace_data_size = 0;
+ u8 trace_option_elts = 0;
+ trace_profile *profile = NULL;
+
+
+ profile = trace_profile_find ();
+
+ if (PREDICT_FALSE (!profile))
+ {
+ return (-1);
+ }
+
+ if (PREDICT_FALSE (!rewrite_string))
+ return -1;
+
+ trace_option_elts = profile->num_elts;
+ trace_data_size = fetch_trace_data_size (profile->trace_type);
+ trace_option = (vxlan_gpe_ioam_trace_option_t *) rewrite_string;
+ trace_option->hdr.type = VXLAN_GPE_OPTION_TYPE_IOAM_TRACE;
+ trace_option->hdr.length = 2 /*ioam_trace_type,data_list_elts_left */ +
+ trace_option_elts * trace_data_size;
+ trace_option->ioam_trace_type = profile->trace_type & TRACE_TYPE_MASK;
+ trace_option->data_list_elts_left = trace_option_elts;
+ *rewrite_size =
+ sizeof (vxlan_gpe_ioam_trace_option_t) +
+ (trace_option_elts * trace_data_size);
+
+ return 0;
+}
+
+
+int
+vxlan_gpe_ioam_trace_data_list_handler (vlib_buffer_t * b,
+ vxlan_gpe_ioam_option_t * opt,
+ u8 is_ipv4, u8 use_adj)
+{
+ u8 elt_index = 0;
+ vxlan_gpe_ioam_trace_option_t *trace =
+ (vxlan_gpe_ioam_trace_option_t *) opt;
+ time_u64_t time_u64;
+ u32 *elt;
+ int rv = 0;
+ trace_profile *profile = NULL;
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+
+ profile = trace_profile_find ();
+
+ if (PREDICT_FALSE (!profile))
+ {
+ return (-1);
+ }
+
+
+ time_u64.as_u64 = 0;
+
+ if (PREDICT_TRUE (trace->data_list_elts_left))
+ {
+ trace->data_list_elts_left--;
+ /* fetch_trace_data_size returns in bytes. Convert it to 4-bytes
+ * to skip to this node's location.
+ */
+ elt_index =
+ trace->data_list_elts_left *
+ fetch_trace_data_size (trace->ioam_trace_type) / 4;
+ elt = &trace->elts[elt_index];
+ if (is_ipv4)
+ {
+ if (trace->ioam_trace_type & BIT_TTL_NODEID)
+ {
+ ip4_header_t *ip0 = vlib_buffer_get_current (b);
+ /* The transit case is the only case where the TTL decrement happens
+ * before iOAM processing. For now, use the use_adj flag as an overload.
+ * We can probably use a separate flag instead of overloading the use_adj flag.
+ */
+ *elt = clib_host_to_net_u32 (((ip0->ttl - 1 + use_adj) << 24) |
+ profile->node_id);
+ elt++;
+ }
+
+ if (trace->ioam_trace_type & BIT_ING_INTERFACE)
+ {
+ u16 tx_if = 0;
+ u32 adj_index = vnet_buffer (b)->ip.adj_index[VLIB_TX];
+ ip4_main_t *im4 = &ip4_main;
+ ip_lookup_main_t *lm = &im4->lookup_main;
+ if (use_adj)
+ {
+ ip_adjacency_t *adj = ip_get_adjacency (lm, adj_index);
+ tx_if = adj->rewrite_header.sw_if_index & 0xFFFF;
+ }
+
+ *elt =
+ (vnet_buffer (b)->sw_if_index[VLIB_RX] & 0xFFFF) << 16 |
+ tx_if;
+ *elt = clib_host_to_net_u32 (*elt);
+ elt++;
+ }
+ }
+ else
+ {
+ if (trace->ioam_trace_type & BIT_TTL_NODEID)
+ {
+ ip6_header_t *ip0 = vlib_buffer_get_current (b);
+ *elt = clib_host_to_net_u32 ((ip0->hop_limit << 24) |
+ profile->node_id);
+ elt++;
+ }
+ if (trace->ioam_trace_type & BIT_ING_INTERFACE)
+ {
+ u16 tx_if = 0;
+ u32 adj_index = vnet_buffer (b)->ip.adj_index[VLIB_TX];
+ ip6_main_t *im6 = &ip6_main;
+ ip_lookup_main_t *lm = &im6->lookup_main;
+ if (use_adj)
+ {
+ ip_adjacency_t *adj = ip_get_adjacency (lm, adj_index);
+ tx_if = adj->rewrite_header.sw_if_index & 0xFFFF;
+ }
+
+ *elt =
+ (vnet_buffer (b)->sw_if_index[VLIB_RX] & 0xFFFF) << 16 |
+ tx_if;
+ *elt = clib_host_to_net_u32 (*elt);
+ elt++;
+ }
+ }
+
+ if (trace->ioam_trace_type & BIT_TIMESTAMP)
+ {
+ /* Send least significant 32 bits */
+ f64 time_f64 =
+ (f64) (((f64) hm->unix_time_0) +
+ (vlib_time_now (hm->vlib_main) - hm->vlib_time_0));
+
+ time_u64.as_u64 = time_f64 * trace_tsp_mul[profile->trace_tsp];
+ *elt = clib_host_to_net_u32 (time_u64.as_u32[0]);
+ elt++;
+ }
+
+ if (trace->ioam_trace_type & BIT_APPDATA)
+ {
+ /* $$$ set elt0->app_data */
+ *elt = clib_host_to_net_u32 (profile->app_data);
+ elt++;
+ }
+ vxlan_gpe_ioam_trace_stats_increment_counter
+ (VXLAN_GPE_IOAM_TRACE_SUCCESS, 1);
+ }
+ else
+ {
+ vxlan_gpe_ioam_trace_stats_increment_counter
+ (VXLAN_GPE_IOAM_TRACE_FAILED, 1);
+ }
+ return (rv);
+}
+
+u8 *
+vxlan_gpe_ioam_trace_data_list_trace_handler (u8 * s,
+ vxlan_gpe_ioam_option_t * opt)
+{
+ vxlan_gpe_ioam_trace_option_t *trace;
+ u8 trace_data_size_in_words = 0;
+ u32 *elt;
+ int elt_index = 0;
+
+ trace = (vxlan_gpe_ioam_trace_option_t *) opt;
+ s =
+ format (s, " Trace Type 0x%x , %d elts left\n", trace->ioam_trace_type,
+ trace->data_list_elts_left);
+ trace_data_size_in_words =
+ fetch_trace_data_size (trace->ioam_trace_type) / 4;
+ elt = &trace->elts[0];
+ while ((u8 *) elt < ((u8 *) (&trace->elts[0]) + trace->hdr.length - 2
+ /* -2 accounts for ioam_trace_type,elts_left */ ))
+ {
+ s = format (s, " [%d] %U\n", elt_index,
+ format_ioam_data_list_element,
+ elt, &trace->ioam_trace_type);
+ elt_index++;
+ elt += trace_data_size_in_words;
+ }
+ return (s);
+}
+
+
+static clib_error_t *
+vxlan_gpe_show_ioam_trace_cmd_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vxlan_gpe_ioam_trace_main_t *hm = &vxlan_gpe_ioam_trace_main;
+ u8 *s = 0;
+ int i = 0;
+
+ for (i = 0; i < VXLAN_GPE_IOAM_TRACE_N_STATS; i++)
+ {
+ s = format (s, " %s - %lu\n", vxlan_gpe_ioam_trace_stats_strings[i],
+ hm->counters[i]);
+ }
+
+ vlib_cli_output (vm, "%v", s);
+ vec_free (s);
+ return 0;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vxlan_gpe_show_ioam_trace_cmd, static) = {
+ .path = "show ioam vxlan-gpe trace",
+ .short_help = "iOAM trace statistics",
+ .function = vxlan_gpe_show_ioam_trace_cmd_fn,
+};
+/* *INDENT-ON* */
+
+
+static clib_error_t *
+vxlan_gpe_ioam_trace_init (vlib_main_t * vm)
+{
+ vxlan_gpe_ioam_trace_main_t *hm = &vxlan_gpe_ioam_trace_main;
+ clib_error_t *error;
+
+ if ((error = vlib_call_init_function (vm, ip_main_init)))
+ return (error);
+
+ if ((error = vlib_call_init_function (vm, ip6_lookup_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, vxlan_gpe_init)))
+ return (error);
+
+ hm->vlib_main = vm;
+ hm->vnet_main = vnet_get_main ();
+ memset (hm->counters, 0, sizeof (hm->counters));
+
+ if (vxlan_gpe_ioam_register_option
+ (VXLAN_GPE_OPTION_TYPE_IOAM_TRACE,
+ vxlan_gpe_ioam_trace_data_list_handler,
+ vxlan_gpe_ioam_trace_data_list_trace_handler) < 0)
+ return (clib_error_create
+ ("registration of VXLAN_GPE_OPTION_TYPE_IOAM_TRACE failed"));
+
+
+ if (vxlan_gpe_ioam_add_register_option
+ (VXLAN_GPE_OPTION_TYPE_IOAM_TRACE,
+ sizeof (vxlan_gpe_ioam_trace_option_t),
+ vxlan_gpe_ioam_trace_rewrite_handler) < 0)
+ return (clib_error_create
+ ("registration of VXLAN_GPE_OPTION_TYPE_IOAM_TRACE for rewrite failed"));
+
+
+ return (0);
+}
+
+VLIB_INIT_FUNCTION (vxlan_gpe_ioam_trace_init);
+
+int
+vxlan_gpe_trace_profile_cleanup (void)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+ hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] = 0;
+
+ return 0;
+
+}
+
+static int
+vxlan_gpe_ioam_trace_get_sizeof_handler (u32 * result)
+{
+ u16 size = 0;
+ u8 trace_data_size = 0;
+ trace_profile *profile = NULL;
+
+ *result = 0;
+
+ profile = trace_profile_find ();
+
+ if (PREDICT_FALSE (!profile))
+ {
+ return (-1);
+ }
+
+ trace_data_size = fetch_trace_data_size (profile->trace_type);
+ if (PREDICT_FALSE (trace_data_size == 0))
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ if (PREDICT_FALSE (profile->num_elts * trace_data_size > 254))
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ size +=
+ sizeof (vxlan_gpe_ioam_trace_option_t) +
+ profile->num_elts * trace_data_size;
+ *result = size;
+
+ return 0;
+}
+
+
+int
+vxlan_gpe_trace_profile_setup (void)
+{
+ u32 trace_size = 0;
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+ trace_profile *profile = NULL;
+
+
+ profile = trace_profile_find ();
+
+ if (PREDICT_FALSE (!profile))
+ {
+ return (-1);
+ }
+
+
+ if (vxlan_gpe_ioam_trace_get_sizeof_handler (&trace_size) < 0)
+ return (-1);
+
+ hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] = trace_size;
+
+ return (0);
+}
+
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h
new file mode 100644
index 00000000000..c0ad8d9d03a
--- /dev/null
+++ b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_vxlan_gpe_ioam_util_h__
+#define __included_vxlan_gpe_ioam_util_h__
+
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
+#include <vnet/ip/ip.h>
+
+
+typedef struct
+{
+ u32 tunnel_index;
+ ioam_trace_t fmt_trace;
+} vxlan_gpe_ioam_v4_trace_t;
+
+
+static u8 *
+format_vxlan_gpe_ioam_v4_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ vxlan_gpe_ioam_v4_trace_t *t1 = va_arg (*args, vxlan_gpe_ioam_v4_trace_t *);
+ ioam_trace_t *t = &(t1->fmt_trace);
+ vxlan_gpe_ioam_option_t *fmt_trace0;
+ vxlan_gpe_ioam_option_t *opt0, *limit0;
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+ u8 type0;
+
+ fmt_trace0 = (vxlan_gpe_ioam_option_t *) t->option_data;
+
+ s = format (s, "VXLAN-GPE-IOAM: next_index %d len %d traced %d",
+ t->next_index, fmt_trace0->length, t->trace_len);
+
+ opt0 = (vxlan_gpe_ioam_option_t *) (fmt_trace0 + 1);
+ limit0 = (vxlan_gpe_ioam_option_t *) ((u8 *) fmt_trace0) + t->trace_len;
+
+ while (opt0 < limit0)
+ {
+ type0 = opt0->type;
+ switch (type0)
+ {
+ case 0: /* Pad, just stop */
+ opt0 = (vxlan_gpe_ioam_option_t *) ((u8 *) opt0) + 1;
+ break;
+
+ default:
+ if (hm->trace[type0])
+ {
+ s = (*hm->trace[type0]) (s, opt0);
+ }
+ else
+ {
+ s =
+ format (s, "\n unrecognized option %d length %d", type0,
+ opt0->length);
+ }
+ opt0 =
+ (vxlan_gpe_ioam_option_t *) (((u8 *) opt0) + opt0->length +
+ sizeof (vxlan_gpe_ioam_option_t));
+ break;
+ }
+ }
+
+ s = format (s, "VXLAN-GPE-IOAM: tunnel %d", t1->tunnel_index);
+ return s;
+}
+
+
+always_inline void
+vxlan_gpe_encap_decap_ioam_v4_one_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_buffer_t * b0,
+ u32 * next0, u32 drop_node_val,
+ u8 use_adj)
+{
+ ip4_header_t *ip0;
+ udp_header_t *udp_hdr0;
+ vxlan_gpe_header_t *gpe_hdr0;
+ vxlan_gpe_ioam_hdr_t *gpe_ioam0;
+ vxlan_gpe_ioam_option_t *opt0;
+ vxlan_gpe_ioam_option_t *limit0;
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+ /* Populate the iOAM header */
+ ip0 = vlib_buffer_get_current (b0);
+ udp_hdr0 = (udp_header_t *) (ip0 + 1);
+ gpe_hdr0 = (vxlan_gpe_header_t *) (udp_hdr0 + 1);
+ gpe_ioam0 = (vxlan_gpe_ioam_hdr_t *) (gpe_hdr0 + 1);
+ opt0 = (vxlan_gpe_ioam_option_t *) (gpe_ioam0 + 1);
+ limit0 = (vxlan_gpe_ioam_option_t *) ((u8 *) gpe_ioam0 + gpe_ioam0->length);
+
+ /*
+ * Basic validity checks
+ */
+ if (gpe_ioam0->length > clib_net_to_host_u16 (ip0->length))
+ {
+ *next0 = drop_node_val;
+ return;
+ }
+
+ /* Scan the set of h-b-h options, process ones that we understand */
+ while (opt0 < limit0)
+ {
+ u8 type0;
+ type0 = opt0->type;
+ switch (type0)
+ {
+ case 0: /* Pad1 */
+ opt0 = (vxlan_gpe_ioam_option_t *) ((u8 *) opt0) + 1;
+ continue;
+ case 1: /* PadN */
+ break;
+ default:
+ if (hm->options[type0])
+ {
+ if ((*hm->options[type0]) (b0, opt0, 1 /* is_ipv4 */ ,
+ use_adj) < 0)
+ {
+ *next0 = drop_node_val;
+ return;
+ }
+ }
+ break;
+ }
+ opt0 =
+ (vxlan_gpe_ioam_option_t *) (((u8 *) opt0) + opt0->length +
+ sizeof (vxlan_gpe_ioam_hdr_t));
+ }
+
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_ioam_v4_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ u32 trace_len = gpe_ioam0->length;
+ t->fmt_trace.next_index = *next0;
+ /* Capture the ioam option verbatim */
+ trace_len =
+ trace_len <
+ ARRAY_LEN (t->fmt_trace.
+ option_data) ? trace_len : ARRAY_LEN (t->fmt_trace.
+ option_data);
+ t->fmt_trace.trace_len = trace_len;
+ clib_memcpy (&(t->fmt_trace.option_data), gpe_ioam0, trace_len);
+ }
+ return;
+}
+
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_msg_enum.h b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_msg_enum.h
new file mode 100644
index 00000000000..cc0a10a3fec
--- /dev/null
+++ b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_msg_enum.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_vxlan_gpe_msg_enum_h
+#define included_vxlan_gpe_msg_enum_h
+
+#include <vppinfra/byte_order.h>
+
+#define vl_msg_id(n,h) n,
+typedef enum {
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h>
+ /* We'll want to know how many messages IDs we need... */
+ VL_MSG_FIRST_AVAILABLE,
+} vl_msg_id_t;
+#undef vl_msg_id
+
+#endif /* included_vxlan_gpe_msg_enum_h */
diff --git a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_test.c b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_test.c
new file mode 100644
index 00000000000..47253eb67ab
--- /dev/null
+++ b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_test.c
@@ -0,0 +1,600 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ *------------------------------------------------------------------
+ * vxlan_gpe_test.c - test harness for vxlan_gpe plugin
+ *------------------------------------------------------------------
+ */
+
+#include <vat/vat.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vlibsocket/api.h>
+#include <vppinfra/error.h>
+
+/* Declare message IDs */
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_msg_enum.h>
+
+/* define message structures */
+#define vl_typedefs
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h>
+#undef vl_typedefs
+
+/* declare message handlers for each api */
+
+#define vl_endianfun /* define message structures */
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...)
+#define vl_printfun
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h>
+#undef vl_printfun
+
+/* Get the API version number. */
+#define vl_api_version(n,v) static u32 api_version=(v);
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h>
+#undef vl_api_version
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
+
+typedef struct
+{
+ /* API message ID base */
+ u16 msg_id_base;
+ vat_main_t *vat_main;
+} vxlan_gpe_test_main_t;
+
+vxlan_gpe_test_main_t vxlan_gpe_test_main;
+
+#define foreach_standard_reply_retval_handler \
+_(vxlan_gpe_ioam_enable_reply) \
+_(vxlan_gpe_ioam_disable_reply) \
+_(vxlan_gpe_ioam_vni_enable_reply) \
+_(vxlan_gpe_ioam_vni_disable_reply) \
+_(vxlan_gpe_ioam_transit_enable_reply) \
+_(vxlan_gpe_ioam_transit_disable_reply)
+
+#define _(n) \
+ static void vl_api_##n##_t_handler \
+ (vl_api_##n##_t * mp) \
+ { \
+ vat_main_t * vam = vxlan_gpe_test_main.vat_main; \
+ i32 retval = ntohl(mp->retval); \
+ if (vam->async_mode) { \
+ vam->async_errors += (retval < 0); \
+ } else { \
+ vam->retval = retval; \
+ vam->result_ready = 1; \
+ } \
+ }
+foreach_standard_reply_retval_handler;
+#undef _
+
+/*
+ * Table of message reply handlers, must include boilerplate handlers
+ * we just generated
+ */
+#define foreach_vpe_api_reply_msg \
+_(VXLAN_GPE_IOAM_ENABLE_REPLY, vxlan_gpe_ioam_enable_reply) \
+_(VXLAN_GPE_IOAM_DISABLE_REPLY, vxlan_gpe_ioam_disable_reply) \
+_(VXLAN_GPE_IOAM_VNI_ENABLE_REPLY, vxlan_gpe_ioam_vni_enable_reply) \
+_(VXLAN_GPE_IOAM_VNI_DISABLE_REPLY, vxlan_gpe_ioam_vni_disable_reply) \
+_(VXLAN_GPE_IOAM_TRANSIT_ENABLE_REPLY, vxlan_gpe_ioam_transit_enable_reply) \
+_(VXLAN_GPE_IOAM_TRANSIT_DISABLE_REPLY, vxlan_gpe_ioam_transit_disable_reply) \
+
+
+/* M: construct, but don't yet send a message */
+
+#define M(T,t) \
+do { \
+ vam->result_ready = 0; \
+ mp = vl_msg_api_alloc(sizeof(*mp)); \
+ memset (mp, 0, sizeof (*mp)); \
+ mp->_vl_msg_id = ntohs (VL_API_##T + sm->msg_id_base); \
+ mp->client_index = vam->my_client_index; \
+} while(0);
+
+#define M2(T,t,n) \
+do { \
+ vam->result_ready = 0; \
+ mp = vl_msg_api_alloc(sizeof(*mp)+(n)); \
+ memset (mp, 0, sizeof (*mp)); \
+ mp->_vl_msg_id = ntohs (VL_API_##T + sm->msg_id_base); \
+ mp->client_index = vam->my_client_index; \
+} while(0);
+
+/* S: send a message */
+#define S (vl_msg_api_send_shmem (vam->vl_input_queue, (u8 *)&mp))
+
+/* W: wait for results, with timeout */
+#define W \
+do { \
+ timeout = vat_time_now (vam) + 1.0; \
+ \
+ while (vat_time_now (vam) < timeout) { \
+ if (vam->result_ready == 1) { \
+ return (vam->retval); \
+ } \
+ } \
+ return -99; \
+} while(0);
+
+
+static int
+api_vxlan_gpe_ioam_enable (vat_main_t * vam)
+{
+ vxlan_gpe_test_main_t *sm = &vxlan_gpe_test_main;
+
+ unformat_input_t *input = vam->input;
+ vl_api_vxlan_gpe_ioam_enable_t *mp;
+ f64 timeout;
+ u32 id = 0;
+ int has_trace_option = 0;
+ int has_pow_option = 0;
+ int has_ppc_option = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "trace"))
+ has_trace_option = 1;
+ else if (unformat (input, "pow"))
+ has_pow_option = 1;
+ else if (unformat (input, "ppc encap"))
+ has_ppc_option = PPC_ENCAP;
+ else if (unformat (input, "ppc decap"))
+ has_ppc_option = PPC_DECAP;
+ else if (unformat (input, "ppc none"))
+ has_ppc_option = PPC_NONE;
+ else
+ break;
+ }
+ M (VXLAN_GPE_IOAM_ENABLE, vxlan_gpe_ioam_enable);
+ mp->id = htons (id);
+ mp->trace_ppc = has_ppc_option;
+ mp->pow_enable = has_pow_option;
+ mp->trace_enable = has_trace_option;
+
+
+ S;
+ W;
+
+ return (0);
+}
+
+
+static int
+api_vxlan_gpe_ioam_disable (vat_main_t * vam)
+{
+ vxlan_gpe_test_main_t *sm = &vxlan_gpe_test_main;
+ vl_api_vxlan_gpe_ioam_disable_t *mp;
+ f64 timeout;
+
+ M (VXLAN_GPE_IOAM_DISABLE, vxlan_gpe_ioam_disable);
+ S;
+ W;
+ return 0;
+}
+
+static int
+api_vxlan_gpe_ioam_vni_enable (vat_main_t * vam)
+{
+ vxlan_gpe_test_main_t *sm = &vxlan_gpe_test_main;
+
+ unformat_input_t *line_input = vam->input;
+ vl_api_vxlan_gpe_ioam_vni_enable_t *mp;
+ ip4_address_t local4, remote4;
+ ip6_address_t local6, remote6;
+ u8 ipv4_set = 0, ipv6_set = 0;
+ u8 local_set = 0;
+ u8 remote_set = 0;
+ u32 vni;
+ u8 vni_set = 0;
+ f64 timeout;
+
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "local %U", unformat_ip4_address, &local4))
+ {
+ local_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "remote %U",
+ unformat_ip4_address, &remote4))
+ {
+ remote_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "local %U",
+ unformat_ip6_address, &local6))
+ {
+ local_set = 1;
+ ipv6_set = 1;
+ }
+ else if (unformat (line_input, "remote %U",
+ unformat_ip6_address, &remote6))
+ {
+ remote_set = 1;
+ ipv6_set = 1;
+ }
+
+ else if (unformat (line_input, "vni %d", &vni))
+ vni_set = 1;
+ else
+ {
+ errmsg ("parse error '%U'\n", format_unformat_error, line_input);
+ return -99;
+ }
+ }
+
+ if (local_set == 0)
+ {
+ errmsg ("tunnel local address not specified\n");
+ return -99;
+ }
+ if (remote_set == 0)
+ {
+ errmsg ("tunnel remote address not specified\n");
+ return -99;
+ }
+ if (ipv4_set && ipv6_set)
+ {
+ errmsg ("both IPv4 and IPv6 addresses specified");
+ return -99;
+ }
+
+ if (vni_set == 0)
+ {
+ errmsg ("vni not specified\n");
+ return -99;
+ }
+
+ M (VXLAN_GPE_IOAM_VNI_ENABLE, vxlan_gpe_ioam_vni_enable);
+
+
+ if (ipv6_set)
+ {
+ clib_memcpy (&mp->local, &local6, sizeof (local6));
+ clib_memcpy (&mp->remote, &remote6, sizeof (remote6));
+ }
+ else
+ {
+ clib_memcpy (&mp->local, &local4, sizeof (local4));
+ clib_memcpy (&mp->remote, &remote4, sizeof (remote4));
+ }
+
+ mp->vni = ntohl (vni);
+ mp->is_ipv6 = ipv6_set;
+
+ S;
+ W;
+
+ return (0);
+}
+
+static int
+api_vxlan_gpe_ioam_vni_disable (vat_main_t * vam)
+{
+ vxlan_gpe_test_main_t *sm = &vxlan_gpe_test_main;
+
+ unformat_input_t *line_input = vam->input;
+ vl_api_vxlan_gpe_ioam_vni_disable_t *mp;
+ ip4_address_t local4, remote4;
+ ip6_address_t local6, remote6;
+ u8 ipv4_set = 0, ipv6_set = 0;
+ u8 local_set = 0;
+ u8 remote_set = 0;
+ u32 vni;
+ u8 vni_set = 0;
+ f64 timeout;
+
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "local %U", unformat_ip4_address, &local4))
+ {
+ local_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "remote %U",
+ unformat_ip4_address, &remote4))
+ {
+ remote_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "local %U",
+ unformat_ip6_address, &local6))
+ {
+ local_set = 1;
+ ipv6_set = 1;
+ }
+ else if (unformat (line_input, "remote %U",
+ unformat_ip6_address, &remote6))
+ {
+ remote_set = 1;
+ ipv6_set = 1;
+ }
+
+ else if (unformat (line_input, "vni %d", &vni))
+ vni_set = 1;
+ else
+ {
+ errmsg ("parse error '%U'\n", format_unformat_error, line_input);
+ return -99;
+ }
+ }
+
+ if (local_set == 0)
+ {
+ errmsg ("tunnel local address not specified\n");
+ return -99;
+ }
+ if (remote_set == 0)
+ {
+ errmsg ("tunnel remote address not specified\n");
+ return -99;
+ }
+ if (ipv4_set && ipv6_set)
+ {
+ errmsg ("both IPv4 and IPv6 addresses specified");
+ return -99;
+ }
+
+ if (vni_set == 0)
+ {
+ errmsg ("vni not specified\n");
+ return -99;
+ }
+
+ M (VXLAN_GPE_IOAM_VNI_DISABLE, vxlan_gpe_ioam_vni_disable);
+
+
+ if (ipv6_set)
+ {
+ clib_memcpy (&mp->local, &local6, sizeof (local6));
+ clib_memcpy (&mp->remote, &remote6, sizeof (remote6));
+ }
+ else
+ {
+ clib_memcpy (&mp->local, &local4, sizeof (local4));
+ clib_memcpy (&mp->remote, &remote4, sizeof (remote4));
+ }
+
+ mp->vni = ntohl (vni);
+ mp->is_ipv6 = ipv6_set;
+
+ S;
+ W;
+
+ return 0;
+}
+
+static int
+api_vxlan_gpe_ioam_transit_enable (vat_main_t * vam)
+{
+ vxlan_gpe_test_main_t *sm = &vxlan_gpe_test_main;
+
+ unformat_input_t *line_input = vam->input;
+ vl_api_vxlan_gpe_ioam_transit_enable_t *mp;
+ ip4_address_t local4;
+ ip6_address_t local6;
+ u8 ipv4_set = 0, ipv6_set = 0;
+ u8 local_set = 0;
+ u32 outer_fib_index = 0;
+ f64 timeout;
+
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "dst-ip %U", unformat_ip4_address, &local4))
+ {
+ local_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "dst-ip %U",
+ unformat_ip6_address, &local6))
+ {
+ local_set = 1;
+ ipv6_set = 1;
+ }
+
+ else if (unformat (line_input, "outer-fib-index %d", &outer_fib_index))
+ ;
+ else
+ {
+ errmsg ("parse error '%U'\n", format_unformat_error, line_input);
+ return -99;
+ }
+ }
+
+ if (local_set == 0)
+ {
+ errmsg ("destination address not specified\n");
+ return -99;
+ }
+ if (ipv4_set && ipv6_set)
+ {
+ errmsg ("both IPv4 and IPv6 addresses specified");
+ return -99;
+ }
+
+
+ M (VXLAN_GPE_IOAM_TRANSIT_ENABLE, vxlan_gpe_ioam_transit_enable);
+
+
+ if (ipv6_set)
+ {
+ errmsg ("IPv6 currently unsupported");
+ return -1;
+ }
+ else
+ {
+ clib_memcpy (&mp->dst_addr, &local4, sizeof (local4));
+ }
+
+ mp->outer_fib_index = htonl (outer_fib_index);
+ mp->is_ipv6 = ipv6_set;
+
+ S;
+ W;
+
+ return (0);
+}
+
+static int
+api_vxlan_gpe_ioam_transit_disable (vat_main_t * vam)
+{
+ vxlan_gpe_test_main_t *sm = &vxlan_gpe_test_main;
+
+ unformat_input_t *line_input = vam->input;
+ vl_api_vxlan_gpe_ioam_transit_disable_t *mp;
+ ip4_address_t local4;
+ ip6_address_t local6;
+ u8 ipv4_set = 0, ipv6_set = 0;
+ u8 local_set = 0;
+ u32 outer_fib_index;
+ f64 timeout;
+
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "dst-ip %U", unformat_ip4_address, &local4))
+ {
+ local_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "dst-ip %U",
+ unformat_ip6_address, &local6))
+ {
+ local_set = 1;
+ ipv6_set = 1;
+ }
+
+ else if (unformat (line_input, "outer-fib-index %d", &outer_fib_index))
+ ;
+ else
+ {
+ errmsg ("parse error '%U'\n", format_unformat_error, line_input);
+ return -99;
+ }
+ }
+
+ if (local_set == 0)
+ {
+ errmsg ("destination address not specified\n");
+ return -99;
+ }
+ if (ipv4_set && ipv6_set)
+ {
+ errmsg ("both IPv4 and IPv6 addresses specified");
+ return -99;
+ }
+
+
+ M (VXLAN_GPE_IOAM_TRANSIT_DISABLE, vxlan_gpe_ioam_transit_disable);
+
+
+ if (ipv6_set)
+ {
+ return -1;
+ }
+ else
+ {
+ clib_memcpy (&mp->dst_addr, &local4, sizeof (local4));
+ }
+
+ mp->outer_fib_index = htonl (outer_fib_index);
+ mp->is_ipv6 = ipv6_set;
+
+ S;
+ W;
+
+
+ return (0);
+}
+
+/*
+ * List of messages that the api test plugin sends,
+ * and that the data plane plugin processes
+ */
+#define foreach_vpe_api_msg \
+_(vxlan_gpe_ioam_enable, ""\
+ "[trace] [pow] [ppc <encap|ppc decap>]") \
+_(vxlan_gpe_ioam_disable, "") \
+_(vxlan_gpe_ioam_vni_enable, ""\
+ "local <local_vtep_ip> remote <remote_vtep_ip> vni <vnid>") \
+_(vxlan_gpe_ioam_vni_disable, ""\
+ "local <local_vtep_ip> remote <remote_vtep_ip> vni <vnid>") \
+_(vxlan_gpe_ioam_transit_enable, ""\
+ "dst-ip <dst_ip> [outer-fib-index <outer_fib_index>]") \
+_(vxlan_gpe_ioam_transit_disable, ""\
+ "dst-ip <dst_ip> [outer-fib-index <outer_fib_index>]") \
+
+
+void
+vat_api_hookup (vat_main_t * vam)
+{
+ vxlan_gpe_test_main_t *sm = &vxlan_gpe_test_main;
+ /* Hook up handlers for replies from the data plane plug-in */
+#define _(N,n) \
+ vl_msg_api_set_handlers((VL_API_##N + sm->msg_id_base), \
+ #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_reply_msg;
+#undef _
+
+ /* API messages we can send */
+#define _(n,h) hash_set_mem (vam->function_by_name, #n, api_##n);
+ foreach_vpe_api_msg;
+#undef _
+
+ /* Help strings */
+#define _(n,h) hash_set_mem (vam->help_by_name, #n, h);
+ foreach_vpe_api_msg;
+#undef _
+}
+
+clib_error_t *
+vat_plugin_register (vat_main_t * vam)
+{
+ vxlan_gpe_test_main_t *sm = &vxlan_gpe_test_main;
+ u8 *name;
+
+ sm->vat_main = vam;
+
+ name = format (0, "ioam_vxlan_gpe_%08x%c", api_version, 0);
+ sm->msg_id_base = vl_client_get_first_plugin_msg_id ((char *) name);
+
+ if (sm->msg_id_base != (u16) ~ 0)
+ vat_api_hookup (vam);
+
+ vec_free (name);
+
+ return 0;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */