summaryrefslogtreecommitdiffstats
path: root/src/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins')
-rw-r--r--src/plugins/gre/CMakeLists.txt35
-rw-r--r--src/plugins/gre/FEATURE.yaml13
-rw-r--r--src/plugins/gre/error.def23
-rw-r--r--src/plugins/gre/gre.api110
-rw-r--r--src/plugins/gre/gre.c846
-rw-r--r--src/plugins/gre/gre.h443
-rw-r--r--src/plugins/gre/gre_api.c218
-rw-r--r--src/plugins/gre/interface.c832
-rw-r--r--src/plugins/gre/node.c576
-rw-r--r--src/plugins/gre/pg.c84
-rw-r--r--src/plugins/gre/plugin.c26
-rw-r--r--src/plugins/nsh/nsh.c2
12 files changed, 3207 insertions, 1 deletions
diff --git a/src/plugins/gre/CMakeLists.txt b/src/plugins/gre/CMakeLists.txt
new file mode 100644
index 00000000000..60fe540b968
--- /dev/null
+++ b/src/plugins/gre/CMakeLists.txt
@@ -0,0 +1,35 @@
+# Copyright (c) 2023 Cisco and/or its affiliates
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_vpp_plugin(gre
+ SOURCES
+ gre.c
+ node.c
+ gre_api.c
+ interface.c
+ pg.c
+ plugin.c
+
+ MULTIARCH_SOURCES
+ node.c
+ gre.c
+
+ INSTALL_HEADERS
+ gre.h
+ error.def
+
+ API_FILES
+ gre.api
+
+)
+
diff --git a/src/plugins/gre/FEATURE.yaml b/src/plugins/gre/FEATURE.yaml
new file mode 100644
index 00000000000..4b35b870dc3
--- /dev/null
+++ b/src/plugins/gre/FEATURE.yaml
@@ -0,0 +1,13 @@
+---
+name: Generic Routing Encapsulation
+maintainer: Neale Ranns <nranns@cisco.com>
+features:
+ - L3 tunnels, all combinations of IPv4 and IPv6
+ - Encap/Decap flags to control the copying of DSCP, ECN, DF from overlay to
+ underlay and vice-versa.
+ - L2 tunnels
+missing:
+ - GRE keys
+description: "An implementation of Generic Routing Encapsulation (GRE)"
+state: production
+properties: [API, CLI, MULTITHREAD]
diff --git a/src/plugins/gre/error.def b/src/plugins/gre/error.def
new file mode 100644
index 00000000000..161ecc1d874
--- /dev/null
+++ b/src/plugins/gre/error.def
@@ -0,0 +1,23 @@
+/*
+ * gre_error.def: gre errors
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+gre_error (NONE, "no error")
+gre_error (UNKNOWN_PROTOCOL, "unknown protocol")
+gre_error (UNSUPPORTED_VERSION, "unsupported version")
+gre_error (PKTS_DECAP, "GRE input packets decapsulated")
+gre_error (PKTS_ENCAP, "GRE output packets encapsulated")
+gre_error (NO_SUCH_TUNNEL, "GRE input packets dropped due to missing tunnel")
diff --git a/src/plugins/gre/gre.api b/src/plugins/gre/gre.api
new file mode 100644
index 00000000000..9c69ba4007d
--- /dev/null
+++ b/src/plugins/gre/gre.api
@@ -0,0 +1,110 @@
+/* Hey Emacs use -*- mode: C -*- */
+/*
+ * Copyright (c) 2015-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option version = "2.1.1";
+
+import "vnet/interface_types.api";
+import "vnet/tunnel/tunnel_types.api";
+import "vnet/ip/ip_types.api";
+
+/** \brief A GRE tunnel type
+*/
+enum gre_tunnel_type : u8
+{
+ GRE_API_TUNNEL_TYPE_L3 = 0,
+ /* L2 Transparent Ethernet Bridge */
+ GRE_API_TUNNEL_TYPE_TEB,
+ /* Encapsulated Remote Switched Port ANalyzer */
+ GRE_API_TUNNEL_TYPE_ERSPAN,
+};
+
+/** \brief A composite type uniquely defining a GRE tunnel.
+ @param type - tunnel type (see enum definition), 0: L3, 1: TEB, 2: ERSPAN
+ @param mode - P2P or P2MP
+ @param flags - to control encap/decap behaviour
+ @param session_id - session for ERSPAN tunnel, range 0-1023
+ @param instance - optional unique custom device instance, else ~0.
+ @param outer_table_id - Encap FIB table ID
+ @param sw_if_index - ignored on create/delete, present in details.
+ @param src - Source IP address
+ @param dst - Destination IP address, can be multicast
+*/
+typedef gre_tunnel
+{
+ vl_api_gre_tunnel_type_t type;
+ vl_api_tunnel_mode_t mode;
+ vl_api_tunnel_encap_decap_flags_t flags;
+ u16 session_id;
+ u32 instance;
+ u32 outer_table_id;
+ vl_api_interface_index_t sw_if_index;
+ vl_api_address_t src;
+ vl_api_address_t dst;
+};
+
+/** \brief Add or delete a single GRE tunnel.
+ @param client_index - opaque cookie to identify the sender.
+ @param context - sender context, to match reply w/ request.
+ @param is_add - add if true, delete if false.
+ @param tunnel - tunnel definition to add or delete.
+*/
+define gre_tunnel_add_del
+{
+ u32 client_index;
+ u32 context;
+ bool is_add;
+ vl_api_gre_tunnel_t tunnel;
+};
+
+/** \brief Add or delete a single GRE tunnel.
+ @param context - sender context, to match reply w/ request.
+ @param retval - return code for the request.
+ @param sw_if_index - the interface corresponding to the affected tunnel.
+*/
+define gre_tunnel_add_del_reply
+{
+ u32 context;
+ i32 retval;
+ vl_api_interface_index_t sw_if_index;
+};
+
+/** \brief Dump details of all or just a single GRE tunnel.
+ @param client_index - opaque cookie to identify the sender.
+ @param context - sender context, to match reply w/ request.
+ @param sw_if_index - filter for tunnel of this interface index, ~0 for all.
+*/
+define gre_tunnel_dump
+{
+ u32 client_index;
+ u32 context;
+ vl_api_interface_index_t sw_if_index;
+};
+
+/** \brief Details response for one of the requested GRE tunnels.
+ @param context - sender context, to match reply w/ request.
+ @param tunnel - definition of the dumped tunnel.
+*/
+define gre_tunnel_details
+{
+ u32 context;
+ vl_api_gre_tunnel_t tunnel;
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gre/gre.c b/src/plugins/gre/gre.c
new file mode 100644
index 00000000000..a15717475f8
--- /dev/null
+++ b/src/plugins/gre/gre.c
@@ -0,0 +1,846 @@
+/*
+ * gre.c: gre
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <gre/gre.h>
+#include <vnet/adj/adj_midchain.h>
+#include <vnet/tunnel/tunnel_dp.h>
+#include <vpp/app/version.h>
+#include <vnet/plugin/plugin.h>
+
+extern gre_main_t gre_main;
+
+#ifndef CLIB_MARCH_VARIANT
+gre_main_t gre_main;
+
+typedef struct
+{
+ union
+ {
+ ip4_and_gre_header_t ip4_and_gre;
+ u64 as_u64[3];
+ };
+} ip4_and_gre_union_t;
+
+typedef struct
+{
+ union
+ {
+ ip6_and_gre_header_t ip6_and_gre;
+ u64 as_u64[3];
+ };
+} ip6_and_gre_union_t;
+#endif /* CLIB_MARCH_VARIANT */
+
+/* Packet trace structure */
+typedef struct
+{
+ /* Tunnel-id / index in tunnel vector */
+ u32 tunnel_id;
+
+ /* pkt length */
+ u32 length;
+
+ /* tunnel ip addresses */
+ ip46_address_t src;
+ ip46_address_t dst;
+} gre_tx_trace_t;
+
+extern u8 *format_gre_tx_trace (u8 *s, va_list *args);
+
+#ifndef CLIB_MARCH_VARIANT
+u8 *
+format_gre_tx_trace (u8 *s, va_list *args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gre_tx_trace_t *t = va_arg (*args, gre_tx_trace_t *);
+
+ s = format (s, "GRE: tunnel %d len %d src %U dst %U", t->tunnel_id,
+ t->length, format_ip46_address, &t->src, IP46_TYPE_ANY,
+ format_ip46_address, &t->dst, IP46_TYPE_ANY);
+ return s;
+}
+
+u8 *
+format_gre_protocol (u8 *s, va_list *args)
+{
+ gre_protocol_t p = va_arg (*args, u32);
+ gre_main_t *gm = &gre_main;
+ gre_protocol_info_t *pi = gre_get_protocol_info (gm, p);
+
+ if (pi)
+ s = format (s, "%s", pi->name);
+ else
+ s = format (s, "0x%04x", p);
+
+ return s;
+}
+
+u8 *
+format_gre_header_with_length (u8 *s, va_list *args)
+{
+ gre_main_t *gm = &gre_main;
+ gre_header_t *h = va_arg (*args, gre_header_t *);
+ u32 max_header_bytes = va_arg (*args, u32);
+ gre_protocol_t p = clib_net_to_host_u16 (h->protocol);
+ u32 indent, header_bytes;
+
+ header_bytes = sizeof (h[0]);
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ return format (s, "gre header truncated");
+
+ indent = format_get_indent (s);
+
+ s = format (s, "GRE %U", format_gre_protocol, p);
+
+ if (max_header_bytes != 0 && header_bytes < max_header_bytes)
+ {
+ gre_protocol_info_t *pi = gre_get_protocol_info (gm, p);
+ vlib_node_t *node = vlib_get_node (gm->vlib_main, pi->node_index);
+ if (node->format_buffer)
+ s =
+ format (s, "\n%U%U", format_white_space, indent, node->format_buffer,
+ (void *) (h + 1), max_header_bytes - header_bytes);
+ }
+
+ return s;
+}
+
+u8 *
+format_gre_header (u8 *s, va_list *args)
+{
+ gre_header_t *h = va_arg (*args, gre_header_t *);
+ return format (s, "%U", format_gre_header_with_length, h, 0);
+}
+
+/* Returns gre protocol as an int in host byte order. */
+uword
+unformat_gre_protocol_host_byte_order (unformat_input_t *input, va_list *args)
+{
+ u16 *result = va_arg (*args, u16 *);
+ gre_main_t *gm = &gre_main;
+ int i;
+
+ /* Named type. */
+ if (unformat_user (input, unformat_vlib_number_by_name,
+ gm->protocol_info_by_name, &i))
+ {
+ gre_protocol_info_t *pi = vec_elt_at_index (gm->protocol_infos, i);
+ *result = pi->protocol;
+ return 1;
+ }
+
+ return 0;
+}
+
+uword
+unformat_gre_protocol_net_byte_order (unformat_input_t *input, va_list *args)
+{
+ u16 *result = va_arg (*args, u16 *);
+ if (!unformat_user (input, unformat_gre_protocol_host_byte_order, result))
+ return 0;
+ *result = clib_host_to_net_u16 ((u16) *result);
+ return 1;
+}
+
+uword
+unformat_gre_header (unformat_input_t *input, va_list *args)
+{
+ u8 **result = va_arg (*args, u8 **);
+ gre_header_t _h, *h = &_h;
+ u16 p;
+
+ if (!unformat (input, "%U", unformat_gre_protocol_host_byte_order, &p))
+ return 0;
+
+ h->protocol = clib_host_to_net_u16 (p);
+
+ /* Add header to result. */
+ {
+ void *p;
+ u32 n_bytes = sizeof (h[0]);
+
+ vec_add2 (*result, p, n_bytes);
+ clib_memcpy (p, h, n_bytes);
+ }
+
+ return 1;
+}
+
+static int
+gre_proto_from_vnet_link (vnet_link_t link)
+{
+ switch (link)
+ {
+ case VNET_LINK_IP4:
+ return (GRE_PROTOCOL_ip4);
+ case VNET_LINK_IP6:
+ return (GRE_PROTOCOL_ip6);
+ case VNET_LINK_MPLS:
+ return (GRE_PROTOCOL_mpls_unicast);
+ case VNET_LINK_ETHERNET:
+ return (GRE_PROTOCOL_teb);
+ case VNET_LINK_ARP:
+ return (GRE_PROTOCOL_arp);
+ case VNET_LINK_NSH:
+ ASSERT (0);
+ break;
+ }
+ ASSERT (0);
+ return (GRE_PROTOCOL_ip4);
+}
+
+static u8 *
+gre_build_rewrite (vnet_main_t *vnm, u32 sw_if_index, vnet_link_t link_type,
+ const void *dst_address)
+{
+ gre_main_t *gm = &gre_main;
+ const ip46_address_t *dst;
+ ip4_and_gre_header_t *h4;
+ ip6_and_gre_header_t *h6;
+ gre_header_t *gre;
+ u8 *rewrite = NULL;
+ gre_tunnel_t *t;
+ u32 ti;
+ u8 is_ipv6;
+
+ dst = dst_address;
+ ti = gm->tunnel_index_by_sw_if_index[sw_if_index];
+
+ if (~0 == ti)
+ /* not one of ours */
+ return (0);
+
+ t = pool_elt_at_index (gm->tunnels, ti);
+
+ is_ipv6 = t->tunnel_dst.fp_proto == FIB_PROTOCOL_IP6 ? 1 : 0;
+
+ if (!is_ipv6)
+ {
+ vec_validate (rewrite, sizeof (*h4) - 1);
+ h4 = (ip4_and_gre_header_t *) rewrite;
+ gre = &h4->gre;
+ h4->ip4.ip_version_and_header_length = 0x45;
+ h4->ip4.ttl = 254;
+ h4->ip4.protocol = IP_PROTOCOL_GRE;
+ /* fixup ip4 header length and checksum after-the-fact */
+ h4->ip4.src_address.as_u32 = t->tunnel_src.ip4.as_u32;
+ h4->ip4.dst_address.as_u32 = dst->ip4.as_u32;
+ h4->ip4.checksum = ip4_header_checksum (&h4->ip4);
+ }
+ else
+ {
+ vec_validate (rewrite, sizeof (*h6) - 1);
+ h6 = (ip6_and_gre_header_t *) rewrite;
+ gre = &h6->gre;
+ h6->ip6.ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 (6 << 28);
+ h6->ip6.hop_limit = 255;
+ h6->ip6.protocol = IP_PROTOCOL_GRE;
+ /* fixup ip6 header length and checksum after-the-fact */
+ h6->ip6.src_address.as_u64[0] = t->tunnel_src.ip6.as_u64[0];
+ h6->ip6.src_address.as_u64[1] = t->tunnel_src.ip6.as_u64[1];
+ h6->ip6.dst_address.as_u64[0] = dst->ip6.as_u64[0];
+ h6->ip6.dst_address.as_u64[1] = dst->ip6.as_u64[1];
+ }
+
+ if (PREDICT_FALSE (t->type == GRE_TUNNEL_TYPE_ERSPAN))
+ {
+ gre->protocol = clib_host_to_net_u16 (GRE_PROTOCOL_erspan);
+ gre->flags_and_version = clib_host_to_net_u16 (GRE_FLAGS_SEQUENCE);
+ }
+ else
+ gre->protocol =
+ clib_host_to_net_u16 (gre_proto_from_vnet_link (link_type));
+
+ return (rewrite);
+}
+
+static void
+gre44_fixup (vlib_main_t *vm, const ip_adjacency_t *adj, vlib_buffer_t *b0,
+ const void *data)
+{
+ tunnel_encap_decap_flags_t flags;
+ ip4_and_gre_header_t *ip0;
+
+ ip0 = vlib_buffer_get_current (b0);
+ flags = pointer_to_uword (data);
+
+ /* Fixup the checksum and len fields in the GRE tunnel encap
+ * that was applied at the midchain node */
+ ip0->ip4.length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
+ tunnel_encap_fixup_4o4 (flags, (ip4_header_t *) (ip0 + 1), &ip0->ip4);
+ ip0->ip4.checksum = ip4_header_checksum (&ip0->ip4);
+}
+
+static void
+gre64_fixup (vlib_main_t *vm, const ip_adjacency_t *adj, vlib_buffer_t *b0,
+ const void *data)
+{
+ tunnel_encap_decap_flags_t flags;
+ ip4_and_gre_header_t *ip0;
+
+ ip0 = vlib_buffer_get_current (b0);
+ flags = pointer_to_uword (data);
+
+ /* Fixup the checksum and len fields in the GRE tunnel encap
+ * that was applied at the midchain node */
+ ip0->ip4.length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
+ tunnel_encap_fixup_6o4 (flags, (ip6_header_t *) (ip0 + 1), &ip0->ip4);
+ ip0->ip4.checksum = ip4_header_checksum (&ip0->ip4);
+}
+
+static void
+grex4_fixup (vlib_main_t *vm, const ip_adjacency_t *adj, vlib_buffer_t *b0,
+ const void *data)
+{
+ ip4_header_t *ip0;
+
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Fixup the checksum and len fields in the GRE tunnel encap
+ * that was applied at the midchain node */
+ ip0->length = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
+ ip0->checksum = ip4_header_checksum (ip0);
+}
+
+static void
+gre46_fixup (vlib_main_t *vm, const ip_adjacency_t *adj, vlib_buffer_t *b0,
+ const void *data)
+{
+ tunnel_encap_decap_flags_t flags;
+ ip6_and_gre_header_t *ip0;
+
+ ip0 = vlib_buffer_get_current (b0);
+ flags = pointer_to_uword (data);
+
+ /* Fixup the payload length field in the GRE tunnel encap that was applied
+ * at the midchain node */
+ ip0->ip6.payload_length = clib_host_to_net_u16 (
+ vlib_buffer_length_in_chain (vm, b0) - sizeof (ip0->ip6));
+ tunnel_encap_fixup_4o6 (flags, b0, (ip4_header_t *) (ip0 + 1), &ip0->ip6);
+}
+
+static void
+gre66_fixup (vlib_main_t *vm, const ip_adjacency_t *adj, vlib_buffer_t *b0,
+ const void *data)
+{
+ tunnel_encap_decap_flags_t flags;
+ ip6_and_gre_header_t *ip0;
+
+ ip0 = vlib_buffer_get_current (b0);
+ flags = pointer_to_uword (data);
+
+ /* Fixup the payload length field in the GRE tunnel encap that was applied
+ * at the midchain node */
+ ip0->ip6.payload_length = clib_host_to_net_u16 (
+ vlib_buffer_length_in_chain (vm, b0) - sizeof (ip0->ip6));
+ tunnel_encap_fixup_6o6 (flags, (ip6_header_t *) (ip0 + 1), &ip0->ip6);
+}
+
+static void
+grex6_fixup (vlib_main_t *vm, const ip_adjacency_t *adj, vlib_buffer_t *b0,
+ const void *data)
+{
+ ip6_and_gre_header_t *ip0;
+
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Fixup the payload length field in the GRE tunnel encap that was applied
+ * at the midchain node */
+ ip0->ip6.payload_length = clib_host_to_net_u16 (
+ vlib_buffer_length_in_chain (vm, b0) - sizeof (ip0->ip6));
+}
+
+/**
+ * return the appropriate fixup function given the overlay (link-type) and
+ * underlay (fproto) combination
+ */
+static adj_midchain_fixup_t
+gre_get_fixup (fib_protocol_t fproto, vnet_link_t lt)
+{
+ if (fproto == FIB_PROTOCOL_IP6 && lt == VNET_LINK_IP6)
+ return (gre66_fixup);
+ if (fproto == FIB_PROTOCOL_IP6 && lt == VNET_LINK_IP4)
+ return (gre46_fixup);
+ if (fproto == FIB_PROTOCOL_IP4 && lt == VNET_LINK_IP6)
+ return (gre64_fixup);
+ if (fproto == FIB_PROTOCOL_IP4 && lt == VNET_LINK_IP4)
+ return (gre44_fixup);
+ if (fproto == FIB_PROTOCOL_IP6)
+ return (grex6_fixup);
+ if (fproto == FIB_PROTOCOL_IP4)
+ return (grex4_fixup);
+
+ ASSERT (0);
+ return (gre44_fixup);
+}
+
+void
+gre_update_adj (vnet_main_t *vnm, u32 sw_if_index, adj_index_t ai)
+{
+ gre_main_t *gm = &gre_main;
+ gre_tunnel_t *t;
+ adj_flags_t af;
+ u32 ti;
+
+ ti = gm->tunnel_index_by_sw_if_index[sw_if_index];
+ t = pool_elt_at_index (gm->tunnels, ti);
+ af = ADJ_FLAG_NONE;
+
+ /*
+ * the user has not requested that the load-balancing be based on
+ * a flow hash of the inner packet. so use the stacking to choose
+ * a path.
+ */
+ if (!(t->flags & TUNNEL_ENCAP_DECAP_FLAG_ENCAP_INNER_HASH))
+ af |= ADJ_FLAG_MIDCHAIN_IP_STACK;
+
+ adj_nbr_midchain_update_rewrite (
+ ai, gre_get_fixup (t->tunnel_dst.fp_proto, adj_get_link_type (ai)),
+ uword_to_pointer (t->flags, void *), af,
+ gre_build_rewrite (vnm, sw_if_index, adj_get_link_type (ai),
+ &t->tunnel_dst.fp_addr));
+
+ gre_tunnel_stack (ai);
+}
+
+adj_walk_rc_t
+mgre_mk_complete_walk (adj_index_t ai, void *data)
+{
+ mgre_walk_ctx_t *ctx = data;
+ adj_flags_t af;
+
+ af = ADJ_FLAG_NONE;
+
+ /*
+ * the user has not requested that the load-balancing be based on
+ * a flow hash of the inner packet. so use the stacking to choose
+ * a path.
+ */
+ if (!(ctx->t->flags & TUNNEL_ENCAP_DECAP_FLAG_ENCAP_INNER_HASH))
+ af |= ADJ_FLAG_MIDCHAIN_IP_STACK;
+
+ adj_nbr_midchain_update_rewrite (
+ ai, gre_get_fixup (ctx->t->tunnel_dst.fp_proto, adj_get_link_type (ai)),
+ uword_to_pointer (ctx->t->flags, void *), af,
+ gre_build_rewrite (vnet_get_main (), ctx->t->sw_if_index,
+ adj_get_link_type (ai),
+ &teib_entry_get_nh (ctx->ne)->fp_addr));
+
+ teib_entry_adj_stack (ctx->ne, ai);
+
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+adj_walk_rc_t
+mgre_mk_incomplete_walk (adj_index_t ai, void *data)
+{
+ gre_tunnel_t *t = data;
+
+ adj_nbr_midchain_update_rewrite (
+ ai, gre_get_fixup (t->tunnel_dst.fp_proto, adj_get_link_type (ai)), NULL,
+ ADJ_FLAG_NONE, NULL);
+
+ adj_midchain_delegate_unstack (ai);
+
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+void
+mgre_update_adj (vnet_main_t *vnm, u32 sw_if_index, adj_index_t ai)
+{
+ gre_main_t *gm = &gre_main;
+ ip_adjacency_t *adj;
+ teib_entry_t *ne;
+ gre_tunnel_t *t;
+ u32 ti;
+
+ adj = adj_get (ai);
+ ti = gm->tunnel_index_by_sw_if_index[sw_if_index];
+ t = pool_elt_at_index (gm->tunnels, ti);
+
+ ne = teib_entry_find_46 (sw_if_index, adj->ia_nh_proto,
+ &adj->sub_type.nbr.next_hop);
+
+ if (NULL == ne)
+ {
+ // no TEIB entry to provide the next-hop
+ adj_nbr_midchain_update_rewrite (
+ ai, gre_get_fixup (t->tunnel_dst.fp_proto, adj_get_link_type (ai)),
+ uword_to_pointer (t->flags, void *), ADJ_FLAG_NONE, NULL);
+ return;
+ }
+
+ mgre_walk_ctx_t ctx = { .t = t, .ne = ne };
+ adj_nbr_walk_nh (sw_if_index, adj->ia_nh_proto, &adj->sub_type.nbr.next_hop,
+ mgre_mk_complete_walk, &ctx);
+}
+#endif /* CLIB_MARCH_VARIANT */
+
+typedef enum
+{
+ GRE_ENCAP_NEXT_L2_MIDCHAIN,
+ GRE_ENCAP_N_NEXT,
+} gre_encap_next_t;
+
+/**
+ * @brief TX function. Only called for L2 payload including TEB or ERSPAN.
+ * L3 traffic uses the adj-midchains.
+ */
+static_always_inline u32
+gre_encap_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame, gre_tunnel_type_t type)
+{
+ gre_main_t *gm = &gre_main;
+ u32 *from, n_left_from;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+ u32 sw_if_index[2] = { ~0, ~0 };
+ const gre_tunnel_t *gt[2] = { 0 };
+ adj_index_t adj_index[2] = { ADJ_INDEX_INVALID, ADJ_INDEX_INVALID };
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ vlib_get_buffers (vm, from, bufs, n_left_from);
+
+ while (n_left_from >= 2)
+ {
+
+ if (PREDICT_FALSE (sw_if_index[0] !=
+ vnet_buffer (b[0])->sw_if_index[VLIB_TX]))
+ {
+ const vnet_hw_interface_t *hi;
+ sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
+ hi = vnet_get_sup_hw_interface (gm->vnet_main, sw_if_index[0]);
+ gt[0] = &gm->tunnels[hi->dev_instance];
+ adj_index[0] = gt[0]->l2_adj_index;
+ }
+ if (PREDICT_FALSE (sw_if_index[1] !=
+ vnet_buffer (b[1])->sw_if_index[VLIB_TX]))
+ {
+ const vnet_hw_interface_t *hi;
+ sw_if_index[1] = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
+ hi = vnet_get_sup_hw_interface (gm->vnet_main, sw_if_index[1]);
+ gt[1] = &gm->tunnels[hi->dev_instance];
+ adj_index[1] = gt[1]->l2_adj_index;
+ }
+
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = adj_index[0];
+ vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = adj_index[1];
+
+ if (type == GRE_TUNNEL_TYPE_ERSPAN)
+ {
+ /* Encap GRE seq# and ERSPAN type II header */
+ erspan_t2_t *h0;
+ u32 seq_num;
+ u64 hdr;
+ vlib_buffer_advance (b[0], -sizeof (erspan_t2_t));
+ h0 = vlib_buffer_get_current (b[0]);
+ seq_num = clib_atomic_fetch_add (&gt[0]->gre_sn->seq_num, 1);
+ hdr = clib_host_to_net_u64 (ERSPAN_HDR2);
+ h0->seq_num = clib_host_to_net_u32 (seq_num);
+ h0->t2_u64 = hdr;
+ h0->t2.cos_en_t_session |= clib_host_to_net_u16 (gt[0]->session_id);
+ }
+ if (type == GRE_TUNNEL_TYPE_ERSPAN)
+ {
+ /* Encap GRE seq# and ERSPAN type II header */
+ erspan_t2_t *h0;
+ u32 seq_num;
+ u64 hdr;
+ vlib_buffer_advance (b[1], -sizeof (erspan_t2_t));
+ h0 = vlib_buffer_get_current (b[1]);
+ seq_num = clib_atomic_fetch_add (&gt[1]->gre_sn->seq_num, 1);
+ hdr = clib_host_to_net_u64 (ERSPAN_HDR2);
+ h0->seq_num = clib_host_to_net_u32 (seq_num);
+ h0->t2_u64 = hdr;
+ h0->t2.cos_en_t_session |= clib_host_to_net_u16 (gt[1]->session_id);
+ }
+
+ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ gre_tx_trace_t *tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
+ tr->tunnel_id = gt[0] - gm->tunnels;
+ tr->src = gt[0]->tunnel_src;
+ tr->dst = gt[0]->tunnel_dst.fp_addr;
+ tr->length = vlib_buffer_length_in_chain (vm, b[0]);
+ }
+ if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ gre_tx_trace_t *tr = vlib_add_trace (vm, node, b[1], sizeof (*tr));
+ tr->tunnel_id = gt[1] - gm->tunnels;
+ tr->src = gt[1]->tunnel_src;
+ tr->dst = gt[1]->tunnel_dst.fp_addr;
+ tr->length = vlib_buffer_length_in_chain (vm, b[1]);
+ }
+
+ b += 2;
+ n_left_from -= 2;
+ }
+
+ while (n_left_from >= 1)
+ {
+
+ if (PREDICT_FALSE (sw_if_index[0] !=
+ vnet_buffer (b[0])->sw_if_index[VLIB_TX]))
+ {
+ const vnet_hw_interface_t *hi;
+ sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
+ hi = vnet_get_sup_hw_interface (gm->vnet_main, sw_if_index[0]);
+ gt[0] = &gm->tunnels[hi->dev_instance];
+ adj_index[0] = gt[0]->l2_adj_index;
+ }
+
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = adj_index[0];
+
+ if (type == GRE_TUNNEL_TYPE_ERSPAN)
+ {
+ /* Encap GRE seq# and ERSPAN type II header */
+ erspan_t2_t *h0;
+ u32 seq_num;
+ u64 hdr;
+ ASSERT (gt[0]->type == GRE_TUNNEL_TYPE_ERSPAN);
+ vlib_buffer_advance (b[0], -sizeof (erspan_t2_t));
+ h0 = vlib_buffer_get_current (b[0]);
+ seq_num = clib_atomic_fetch_add (&gt[0]->gre_sn->seq_num, 1);
+ hdr = clib_host_to_net_u64 (ERSPAN_HDR2);
+ h0->seq_num = clib_host_to_net_u32 (seq_num);
+ h0->t2_u64 = hdr;
+ h0->t2.cos_en_t_session |= clib_host_to_net_u16 (gt[0]->session_id);
+ }
+
+ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ gre_tx_trace_t *tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
+ tr->tunnel_id = gt[0] - gm->tunnels;
+ tr->src = gt[0]->tunnel_src;
+ tr->dst = gt[0]->tunnel_dst.fp_addr;
+ tr->length = vlib_buffer_length_in_chain (vm, b[0]);
+ }
+
+ b += 1;
+ n_left_from -= 1;
+ }
+
+ vlib_buffer_enqueue_to_single_next (
+ vm, node, from, GRE_ENCAP_NEXT_L2_MIDCHAIN, frame->n_vectors);
+
+ vlib_node_increment_counter (vm, node->node_index, GRE_ERROR_PKTS_ENCAP,
+ frame->n_vectors);
+
+ return frame->n_vectors;
+}
+
+static char *gre_error_strings[] = {
+#define gre_error(n, s) s,
+#include "error.def"
+#undef gre_error
+};
+
+VLIB_NODE_FN (gre_teb_encap_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+ return (gre_encap_inline (vm, node, frame, GRE_TUNNEL_TYPE_TEB));
+}
+
+VLIB_NODE_FN (gre_erspan_encap_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+ return (gre_encap_inline (vm, node, frame, GRE_TUNNEL_TYPE_ERSPAN));
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gre_teb_encap_node) =
+{
+ .name = "gre-teb-encap",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gre_tx_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = GRE_N_ERROR,
+ .error_strings = gre_error_strings,
+ .n_next_nodes = GRE_ENCAP_N_NEXT,
+ .next_nodes = {
+ [GRE_ENCAP_NEXT_L2_MIDCHAIN] = "adj-l2-midchain",
+ },
+};
+VLIB_REGISTER_NODE (gre_erspan_encap_node) =
+{
+ .name = "gre-erspan-encap",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gre_tx_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = GRE_N_ERROR,
+ .error_strings = gre_error_strings,
+ .n_next_nodes = GRE_ENCAP_N_NEXT,
+ .next_nodes = {
+ [GRE_ENCAP_NEXT_L2_MIDCHAIN] = "adj-l2-midchain",
+ },
+};
+/* *INDENT-ON* */
+
+#ifndef CLIB_MARCH_VARIANT
+static u8 *
+format_gre_tunnel_name (u8 *s, va_list *args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ gre_main_t *gm = &gre_main;
+ gre_tunnel_t *t;
+
+ if (dev_instance >= vec_len (gm->tunnels))
+ return format (s, "<improperly-referenced>");
+
+ t = pool_elt_at_index (gm->tunnels, dev_instance);
+ return format (s, "gre%d", t->user_instance);
+}
+
+static u8 *
+format_gre_device (u8 *s, va_list *args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ CLIB_UNUSED (int verbose) = va_arg (*args, int);
+
+ s = format (s, "GRE tunnel: id %d\n", dev_instance);
+ return s;
+}
+
+static int
+gre_tunnel_desc (u32 sw_if_index, ip46_address_t *src, ip46_address_t *dst,
+ u8 *is_l2)
+{
+ gre_main_t *gm = &gre_main;
+ gre_tunnel_t *t;
+ u32 ti;
+
+ ti = gm->tunnel_index_by_sw_if_index[sw_if_index];
+
+ if (~0 == ti)
+ /* not one of ours */
+ return -1;
+
+ t = pool_elt_at_index (gm->tunnels, ti);
+
+ *src = t->tunnel_src;
+ *dst = t->tunnel_dst.fp_addr;
+ *is_l2 = t->type == GRE_TUNNEL_TYPE_TEB;
+
+ return (0);
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (gre_device_class) = {
+ .name = "GRE tunnel device",
+ .format_device_name = format_gre_tunnel_name,
+ .format_device = format_gre_device,
+ .format_tx_trace = format_gre_tx_trace,
+ .admin_up_down_function = gre_interface_admin_up_down,
+ .ip_tun_desc = gre_tunnel_desc,
+#ifdef SOON
+ .clear counter = 0;
+#endif
+}
+;
+
+VNET_HW_INTERFACE_CLASS (gre_hw_interface_class) = {
+ .name = "GRE",
+ .format_header = format_gre_header_with_length,
+ .unformat_header = unformat_gre_header,
+ .build_rewrite = gre_build_rewrite,
+ .update_adjacency = gre_update_adj,
+ .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
+};
+
+VNET_HW_INTERFACE_CLASS (mgre_hw_interface_class) = {
+ .name = "mGRE",
+ .format_header = format_gre_header_with_length,
+ .unformat_header = unformat_gre_header,
+ .build_rewrite = gre_build_rewrite,
+ .update_adjacency = mgre_update_adj,
+ .flags = VNET_HW_INTERFACE_CLASS_FLAG_NBMA,
+};
+/* *INDENT-ON* */
+#endif /* CLIB_MARCH_VARIANT */
+
+static void
+add_protocol (gre_main_t *gm, gre_protocol_t protocol, char *protocol_name)
+{
+ gre_protocol_info_t *pi;
+ u32 i;
+
+ vec_add2 (gm->protocol_infos, pi, 1);
+ i = pi - gm->protocol_infos;
+
+ pi->name = protocol_name;
+ pi->protocol = protocol;
+ pi->next_index = pi->node_index = ~0;
+
+ hash_set (gm->protocol_info_by_protocol, protocol, i);
+ hash_set_mem (gm->protocol_info_by_name, pi->name, i);
+}
+
+static clib_error_t *
+gre_init (vlib_main_t *vm)
+{
+ gre_main_t *gm = &gre_main;
+ clib_error_t *error;
+ ip_main_t *im = &ip_main;
+ ip_protocol_info_t *pi;
+
+ clib_memset (gm, 0, sizeof (gm[0]));
+ gm->vlib_main = vm;
+ gm->vnet_main = vnet_get_main ();
+
+ if ((error = vlib_call_init_function (vm, ip_main_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, ip4_lookup_init)))
+ return error;
+
+ if ((error = vlib_call_init_function (vm, ip6_lookup_init)))
+ return error;
+
+ /* Set up the ip packet generator */
+ pi = ip_get_protocol_info (im, IP_PROTOCOL_GRE);
+ pi->format_header = format_gre_header;
+ pi->unformat_pg_edit = unformat_pg_gre_header;
+
+ gm->protocol_info_by_name = hash_create_string (0, sizeof (uword));
+ gm->protocol_info_by_protocol = hash_create (0, sizeof (uword));
+ gm->tunnel_by_key4 =
+ hash_create_mem (0, sizeof (gre_tunnel_key4_t), sizeof (uword));
+ gm->tunnel_by_key6 =
+ hash_create_mem (0, sizeof (gre_tunnel_key6_t), sizeof (uword));
+ gm->seq_num_by_key =
+ hash_create_mem (0, sizeof (gre_sn_key_t), sizeof (uword));
+
+#define _(n, s) add_protocol (gm, GRE_PROTOCOL_##s, #s);
+ foreach_gre_protocol
+#undef _
+ return vlib_call_init_function (vm, gre_input_init);
+}
+
+VLIB_INIT_FUNCTION (gre_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gre/gre.h b/src/plugins/gre/gre.h
new file mode 100644
index 00000000000..9e17efcf7ef
--- /dev/null
+++ b/src/plugins/gre/gre.h
@@ -0,0 +1,443 @@
+/*
+ * gre.h: types/functions for gre.
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_gre_h
+#define included_gre_h
+
+#include <vnet/vnet.h>
+#include <vnet/gre/packet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ip/format.h>
+#include <vnet/adj/adj_types.h>
+#include <vnet/tunnel/tunnel.h>
+#include <vnet/teib/teib.h>
+
+extern vnet_hw_interface_class_t gre_hw_interface_class;
+extern vnet_hw_interface_class_t mgre_hw_interface_class;
+
+typedef enum
+{
+#define gre_error(n,s) GRE_ERROR_##n,
+#include <gre/error.def>
+#undef gre_error
+ GRE_N_ERROR,
+} gre_error_t;
+
+/**
+ * L3: GRE (i.e. this tunnel is in L3 mode)
+ * TEB: Transparent Ethernet Bridging - the tunnel is in L2 mode
+ * ERSPAN: type 2 - the tunnel is for port mirror SPAN output. Each tunnel is
+ * associated with a session ID and expected to be used for encap
+ * and output of mirrored packet from a L2 network only. There is
+ * no support for receiving ERSPAN packets from a GRE ERSPAN tunnel
+ */
+#define foreach_gre_tunnel_type \
+ _(L3, "L3") \
+ _(TEB, "TEB") \
+ _(ERSPAN, "ERSPAN") \
+
+/**
+ * @brief The GRE tunnel type
+ */
+typedef enum gre_tunnel_type_t_
+{
+#define _(n, s) GRE_TUNNEL_TYPE_##n,
+ foreach_gre_tunnel_type
+#undef _
+} __clib_packed gre_tunnel_type_t;
+
+extern u8 *format_gre_tunnel_type (u8 * s, va_list * args);
+
+
+/**
+ * A GRE payload protocol registration
+ */
+typedef struct
+{
+ /** Name (a c string). */
+ char *name;
+
+ /** GRE protocol type in host byte order. */
+ gre_protocol_t protocol;
+
+ /** GRE tunnel type */
+ gre_tunnel_type_t tunnel_type;
+
+ /** Node which handles this type. */
+ u32 node_index;
+
+ /** Next index for this type. */
+ u32 next_index;
+} gre_protocol_info_t;
+
+/**
+ * Elements of the GRE key that are common for v6 and v6 addresses
+ */
+typedef struct gre_tunnel_key_common_t_
+{
+ union
+ {
+ struct
+ {
+ u32 fib_index;
+ u16 session_id;
+ gre_tunnel_type_t type;
+ tunnel_mode_t mode;
+ };
+ u64 as_u64;
+ };
+} gre_tunnel_key_common_t;
+
+STATIC_ASSERT_SIZEOF (gre_tunnel_key_common_t, sizeof (u64));
+
+/**
+ * @brief Key for a IPv4 GRE Tunnel
+ */
+typedef struct gre_tunnel_key4_t_
+{
+ /**
+ * Source and destination IP addresses
+ */
+ union
+ {
+ struct
+ {
+ ip4_address_t gtk_src;
+ ip4_address_t gtk_dst;
+ };
+ u64 gtk_as_u64;
+ };
+
+ /** address independent attributes */
+ gre_tunnel_key_common_t gtk_common;
+} __attribute__ ((packed)) gre_tunnel_key4_t;
+
+STATIC_ASSERT_SIZEOF (gre_tunnel_key4_t, 2 * sizeof (u64));
+
+/**
+ * @brief Key for a IPv6 GRE Tunnel
+ * We use a different type so that the V4 key hash is as small as possible
+ */
+typedef struct gre_tunnel_key6_t_
+{
+ /**
+ * Source and destination IP addresses
+ */
+ ip6_address_t gtk_src;
+ ip6_address_t gtk_dst;
+
+ /** address independent attributes */
+ gre_tunnel_key_common_t gtk_common;
+} __attribute__ ((packed)) gre_tunnel_key6_t;
+
+STATIC_ASSERT_SIZEOF (gre_tunnel_key6_t, 5 * sizeof (u64));
+
+/**
+ * Union of the two possible key types
+ */
+typedef union gre_tunnel_key_t_
+{
+ gre_tunnel_key4_t gtk_v4;
+ gre_tunnel_key6_t gtk_v6;
+} gre_tunnel_key_t;
+
+/**
+ * The session ID is only a 10 bit value
+ */
+#define GTK_SESSION_ID_MAX (0x3ff)
+
+/**
+ * Used for GRE header seq number generation for ERSPAN encap
+ */
+typedef struct
+{
+ u32 seq_num;
+ u32 ref_count;
+} gre_sn_t;
+
+/**
+ * Hash key for GRE header seq number generation for ERSPAN encap
+ */
+typedef struct
+{
+ ip46_address_t src;
+ ip46_address_t dst;
+ u32 fib_index;
+} gre_sn_key_t;
+
+/**
+ * @brief A representation of a GRE tunnel
+ */
+typedef struct
+{
+ /**
+ * Required for pool_get_aligned
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
+ /**
+ * The tunnel's source/local address
+ */
+ ip46_address_t tunnel_src;
+ /**
+ * The tunnel's destination/remote address
+ */
+ fib_prefix_t tunnel_dst;
+ /**
+ * The FIB in which the src.dst address are present
+ */
+ u32 outer_fib_index;
+ u32 hw_if_index;
+ u32 sw_if_index;
+ gre_tunnel_type_t type;
+ tunnel_mode_t mode;
+ tunnel_encap_decap_flags_t flags;
+
+ /**
+ * an L2 tunnel always rquires an L2 midchain. cache here for DP.
+ */
+ adj_index_t l2_adj_index;
+
+ /**
+ * ERSPAN type 2 session ID, least significant 10 bits of u16
+ */
+ u16 session_id;
+
+ /**
+ * GRE header sequence number (SN) used for ERSPAN type 2 header, must be
+ * bumped automically to be thread safe. As multiple GRE tunnels are created
+ * for the same fib-idx/DIP/SIP with different ERSPAN session number, they all
+ * share the same SN which is kept per FIB/DIP/SIP, as specified by RFC2890.
+ */
+ gre_sn_t *gre_sn;
+
+
+ u32 dev_instance; /* Real device instance in tunnel vector */
+ u32 user_instance; /* Instance name being shown to user */
+} gre_tunnel_t;
+
+typedef struct
+{
+ u8 next_index;
+ u8 tunnel_type;
+} next_info_t;
+
+/**
+ * @brief GRE related global data
+ */
+typedef struct
+{
+ /**
+ * pool of tunnel instances
+ */
+ gre_tunnel_t *tunnels;
+
+ /**
+ * GRE payload protocol registrations
+ */
+ gre_protocol_info_t *protocol_infos;
+
+ /**
+ * Hash tables mapping name/protocol to protocol info index.
+ */
+ uword *protocol_info_by_name, *protocol_info_by_protocol;
+
+ /**
+ * Hash mapping to tunnels with ipv4 src/dst addr
+ */
+ uword *tunnel_by_key4;
+
+ /**
+ * Hash mapping to tunnels with ipv6 src/dst addr
+ */
+ uword *tunnel_by_key6;
+
+ /**
+ * Hash mapping tunnel src/dst addr and fib-idx to sequence number
+ */
+ uword *seq_num_by_key;
+
+ /**
+ * Mapping from sw_if_index to tunnel index
+ */
+ u32 *tunnel_index_by_sw_if_index;
+
+ /* Sparse vector mapping gre protocol in network byte order
+ to next index. */
+ next_info_t *next_by_protocol;
+
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+
+ /* Record used instances */
+ uword *instance_used;
+
+ u16 msg_id_base;
+} gre_main_t;
+
+/**
+ * @brief IPv4 and GRE header.
+ */
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ ip4_header_t ip4;
+ gre_header_t gre;
+}) ip4_and_gre_header_t;
+/* *INDENT-ON* */
+
+/**
+ * @brief IPv6 and GRE header.
+ */
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ ip6_header_t ip6;
+ gre_header_t gre;
+}) ip6_and_gre_header_t;
+/* *INDENT-ON* */
+
+always_inline gre_protocol_info_t *
+gre_get_protocol_info (gre_main_t * em, gre_protocol_t protocol)
+{
+ uword *p = hash_get (em->protocol_info_by_protocol, protocol);
+ return p ? vec_elt_at_index (em->protocol_infos, p[0]) : 0;
+}
+
+extern gre_main_t gre_main;
+
+extern clib_error_t *gre_interface_admin_up_down (vnet_main_t * vnm,
+ u32 hw_if_index, u32 flags);
+
+extern void gre_tunnel_stack (adj_index_t ai);
+extern void gre_update_adj (vnet_main_t * vnm,
+ u32 sw_if_index, adj_index_t ai);
+
+typedef struct mgre_walk_ctx_t_
+{
+ const gre_tunnel_t *t;
+ const teib_entry_t *ne;
+} mgre_walk_ctx_t;
+
+adj_walk_rc_t mgre_mk_complete_walk (adj_index_t ai, void *data);
+adj_walk_rc_t mgre_mk_incomplete_walk (adj_index_t ai, void *data);
+
+format_function_t format_gre_protocol;
+format_function_t format_gre_header;
+format_function_t format_gre_header_with_length;
+
+extern vlib_node_registration_t gre4_input_node;
+extern vlib_node_registration_t gre6_input_node;
+extern vlib_node_registration_t gre_erspan_encap_node;
+extern vlib_node_registration_t gre_teb_encap_node;
+extern vnet_device_class_t gre_device_class;
+
+/* Parse gre protocol as 0xXXXX or protocol name.
+ In either host or network byte order. */
+unformat_function_t unformat_gre_protocol_host_byte_order;
+unformat_function_t unformat_gre_protocol_net_byte_order;
+
+/* Parse gre header. */
+unformat_function_t unformat_gre_header;
+unformat_function_t unformat_pg_gre_header;
+
+void
+gre_register_input_protocol (vlib_main_t * vm, gre_protocol_t protocol,
+ u32 node_index, gre_tunnel_type_t tunnel_type);
+
+/* manually added to the interface output node in gre.c */
+#define GRE_OUTPUT_NEXT_LOOKUP 1
+
+typedef struct
+{
+ u8 is_add;
+ gre_tunnel_type_t type;
+ tunnel_mode_t mode;
+ u8 is_ipv6;
+ u32 instance;
+ ip46_address_t src, dst;
+ u32 outer_table_id;
+ u16 session_id;
+ tunnel_encap_decap_flags_t flags;
+} vnet_gre_tunnel_add_del_args_t;
+
+extern int vnet_gre_tunnel_add_del (vnet_gre_tunnel_add_del_args_t * a,
+ u32 * sw_if_indexp);
+
+static inline void
+gre_mk_key4 (ip4_address_t src,
+ ip4_address_t dst,
+ u32 fib_index,
+ gre_tunnel_type_t ttype,
+ tunnel_mode_t tmode, u16 session_id, gre_tunnel_key4_t * key)
+{
+ key->gtk_src = src;
+ key->gtk_dst = dst;
+ key->gtk_common.type = ttype;
+ key->gtk_common.mode = tmode;
+ key->gtk_common.fib_index = fib_index;
+ key->gtk_common.session_id = session_id;
+}
+
+static inline int
+gre_match_key4 (const gre_tunnel_key4_t * key1,
+ const gre_tunnel_key4_t * key2)
+{
+ return ((key1->gtk_as_u64 == key2->gtk_as_u64) &&
+ (key1->gtk_common.as_u64 == key2->gtk_common.as_u64));
+}
+
+static inline void
+gre_mk_key6 (const ip6_address_t * src,
+ const ip6_address_t * dst,
+ u32 fib_index,
+ gre_tunnel_type_t ttype,
+ tunnel_mode_t tmode, u16 session_id, gre_tunnel_key6_t * key)
+{
+ key->gtk_src = *src;
+ key->gtk_dst = *dst;
+ key->gtk_common.type = ttype;
+ key->gtk_common.mode = tmode;
+ key->gtk_common.fib_index = fib_index;
+ key->gtk_common.session_id = session_id;
+}
+
+static inline int
+gre_match_key6 (const gre_tunnel_key6_t * key1,
+ const gre_tunnel_key6_t * key2)
+{
+ return (ip6_address_is_equal (&key1->gtk_src, &key2->gtk_src) &&
+ ip6_address_is_equal (&key1->gtk_dst, &key2->gtk_dst) &&
+ (key1->gtk_common.as_u64 == key2->gtk_common.as_u64));
+}
+
+static inline void
+gre_mk_sn_key (const gre_tunnel_t * gt, gre_sn_key_t * key)
+{
+ key->src = gt->tunnel_src;
+ key->dst = gt->tunnel_dst.fp_addr;
+ key->fib_index = gt->outer_fib_index;
+}
+
+#endif /* included_gre_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gre/gre_api.c b/src/plugins/gre/gre_api.c
new file mode 100644
index 00000000000..f8e3ea654fc
--- /dev/null
+++ b/src/plugins/gre/gre_api.c
@@ -0,0 +1,218 @@
+/*
+ *------------------------------------------------------------------
+ * gre_api.c - gre api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+
+#include <gre/gre.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/tunnel/tunnel_types_api.h>
+#include <vnet/ip/ip_types_api.h>
+
+#include <gre/gre.api_enum.h>
+#include <gre/gre.api_types.h>
+
+#define REPLY_MSG_ID_BASE gre_main.msg_id_base
+#include <vlibapi/api_helper_macros.h>
+
+static int
+gre_tunnel_type_decode (vl_api_gre_tunnel_type_t in, gre_tunnel_type_t *out)
+{
+ switch (in)
+ {
+#define _(n, v) \
+ case GRE_API_TUNNEL_TYPE_##n: \
+ *out = GRE_TUNNEL_TYPE_##n; \
+ return (0);
+ foreach_gre_tunnel_type
+#undef _
+ }
+
+ return (VNET_API_ERROR_INVALID_VALUE);
+}
+
+static vl_api_gre_tunnel_type_t
+gre_tunnel_type_encode (gre_tunnel_type_t in)
+{
+ vl_api_gre_tunnel_type_t out = GRE_API_TUNNEL_TYPE_L3;
+
+ switch (in)
+ {
+#define _(n, v) \
+ case GRE_TUNNEL_TYPE_##n: \
+ out = GRE_API_TUNNEL_TYPE_##n; \
+ break;
+ foreach_gre_tunnel_type
+#undef _
+ }
+
+ return (out);
+}
+
+static void
+vl_api_gre_tunnel_add_del_t_handler (vl_api_gre_tunnel_add_del_t *mp)
+{
+ vnet_gre_tunnel_add_del_args_t _a = {}, *a = &_a;
+ vl_api_gre_tunnel_add_del_reply_t *rmp;
+ tunnel_encap_decap_flags_t flags;
+ u32 sw_if_index = ~0;
+ ip46_type_t itype[2];
+ int rv = 0;
+
+ itype[0] = ip_address_decode (&mp->tunnel.src, &a->src);
+ itype[1] = ip_address_decode (&mp->tunnel.dst, &a->dst);
+
+ if (itype[0] != itype[1])
+ {
+ rv = VNET_API_ERROR_INVALID_PROTOCOL;
+ goto out;
+ }
+
+ if (ip46_address_is_equal (&a->src, &a->dst))
+ {
+ rv = VNET_API_ERROR_SAME_SRC_DST;
+ goto out;
+ }
+
+ rv = gre_tunnel_type_decode (mp->tunnel.type, &a->type);
+
+ if (rv)
+ goto out;
+
+ rv = tunnel_mode_decode (mp->tunnel.mode, &a->mode);
+
+ if (rv)
+ goto out;
+
+ rv = tunnel_encap_decap_flags_decode (mp->tunnel.flags, &flags);
+
+ if (rv)
+ goto out;
+
+ a->is_add = mp->is_add;
+ a->is_ipv6 = (itype[0] == IP46_TYPE_IP6);
+ a->instance = ntohl (mp->tunnel.instance);
+ a->session_id = ntohs (mp->tunnel.session_id);
+ a->outer_table_id = ntohl (mp->tunnel.outer_table_id);
+ a->flags = flags;
+
+ rv = vnet_gre_tunnel_add_del (a, &sw_if_index);
+
+out:
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_GRE_TUNNEL_ADD_DEL_REPLY,
+ ({ rmp->sw_if_index = ntohl (sw_if_index); }));
+ /* *INDENT-ON* */
+}
+
+static void
+send_gre_tunnel_details (gre_tunnel_t *t, vl_api_gre_tunnel_dump_t *mp)
+{
+ vl_api_gre_tunnel_details_t *rmp;
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO_DETAILS2 (
+ VL_API_GRE_TUNNEL_DETAILS, ({
+ ip_address_encode (&t->tunnel_src, IP46_TYPE_ANY, &rmp->tunnel.src);
+ ip_address_encode (&t->tunnel_dst.fp_addr, IP46_TYPE_ANY,
+ &rmp->tunnel.dst);
+
+ rmp->tunnel.outer_table_id = htonl (
+ fib_table_get_table_id (t->outer_fib_index, t->tunnel_dst.fp_proto));
+
+ rmp->tunnel.type = gre_tunnel_type_encode (t->type);
+ rmp->tunnel.mode = tunnel_mode_encode (t->mode);
+ rmp->tunnel.flags = tunnel_encap_decap_flags_encode (t->flags);
+ rmp->tunnel.instance = htonl (t->user_instance);
+ rmp->tunnel.sw_if_index = htonl (t->sw_if_index);
+ rmp->tunnel.session_id = htons (t->session_id);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_gre_tunnel_dump_t_handler (vl_api_gre_tunnel_dump_t *mp)
+{
+ vl_api_registration_t *reg;
+ gre_main_t *gm = &gre_main;
+ gre_tunnel_t *t;
+ u32 sw_if_index;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ if (~0 == sw_if_index)
+ {
+ /* *INDENT-OFF* */
+ pool_foreach (t, gm->tunnels)
+ {
+ send_gre_tunnel_details (t, mp);
+ }
+ /* *INDENT-ON* */
+ }
+
+ else
+ {
+ if ((sw_if_index >= vec_len (gm->tunnel_index_by_sw_if_index)) ||
+ (~0 == gm->tunnel_index_by_sw_if_index[sw_if_index]))
+ {
+ return;
+ }
+ t = &gm->tunnels[gm->tunnel_index_by_sw_if_index[sw_if_index]];
+ send_gre_tunnel_details (t, mp);
+ }
+}
+
+/*
+ * gre_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has already mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+/* API definitions */
+#include <vnet/format_fns.h>
+#include <gre/gre.api.c>
+
+static clib_error_t *
+gre_api_hookup (vlib_main_t *vm)
+{
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ gre_main.msg_id_base = setup_message_id_table ();
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (gre_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gre/interface.c b/src/plugins/gre/interface.c
new file mode 100644
index 00000000000..8d93ebd0cf5
--- /dev/null
+++ b/src/plugins/gre/interface.c
@@ -0,0 +1,832 @@
+/*
+ * gre_interface.c: gre interfaces
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <gre/gre.h>
+#include <vnet/ip/format.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/adj/adj_midchain.h>
+#include <vnet/adj/adj_nbr.h>
+#include <vnet/mpls/mpls.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/teib/teib.h>
+
+u8 *
+format_gre_tunnel_type (u8 *s, va_list *args)
+{
+ gre_tunnel_type_t type = va_arg (*args, int);
+
+ switch (type)
+ {
+#define _(n, v) \
+ case GRE_TUNNEL_TYPE_##n: \
+ s = format (s, "%s", v); \
+ break;
+ foreach_gre_tunnel_type
+#undef _
+ }
+
+ return (s);
+}
+
+static u8 *
+format_gre_tunnel (u8 *s, va_list *args)
+{
+ gre_tunnel_t *t = va_arg (*args, gre_tunnel_t *);
+
+ s = format (s, "[%d] instance %d src %U dst %U fib-idx %d sw-if-idx %d ",
+ t->dev_instance, t->user_instance, format_ip46_address,
+ &t->tunnel_src, IP46_TYPE_ANY, format_ip46_address,
+ &t->tunnel_dst.fp_addr, IP46_TYPE_ANY, t->outer_fib_index,
+ t->sw_if_index);
+
+ s = format (s, "payload %U ", format_gre_tunnel_type, t->type);
+ s = format (s, "%U ", format_tunnel_mode, t->mode);
+
+ if (t->type == GRE_TUNNEL_TYPE_ERSPAN)
+ s = format (s, "session %d ", t->session_id);
+
+ if (t->type != GRE_TUNNEL_TYPE_L3)
+ s = format (s, "l2-adj-idx %d ", t->l2_adj_index);
+
+ return s;
+}
+
+static gre_tunnel_t *
+gre_tunnel_db_find (const vnet_gre_tunnel_add_del_args_t *a,
+ u32 outer_fib_index, gre_tunnel_key_t *key)
+{
+ gre_main_t *gm = &gre_main;
+ uword *p;
+
+ if (!a->is_ipv6)
+ {
+ gre_mk_key4 (a->src.ip4, a->dst.ip4, outer_fib_index, a->type, a->mode,
+ a->session_id, &key->gtk_v4);
+ p = hash_get_mem (gm->tunnel_by_key4, &key->gtk_v4);
+ }
+ else
+ {
+ gre_mk_key6 (&a->src.ip6, &a->dst.ip6, outer_fib_index, a->type, a->mode,
+ a->session_id, &key->gtk_v6);
+ p = hash_get_mem (gm->tunnel_by_key6, &key->gtk_v6);
+ }
+
+ if (NULL == p)
+ return (NULL);
+
+ return (pool_elt_at_index (gm->tunnels, p[0]));
+}
+
+static void
+gre_tunnel_db_add (gre_tunnel_t *t, gre_tunnel_key_t *key)
+{
+ gre_main_t *gm = &gre_main;
+
+ if (t->tunnel_dst.fp_proto == FIB_PROTOCOL_IP6)
+ {
+ hash_set_mem_alloc (&gm->tunnel_by_key6, &key->gtk_v6, t->dev_instance);
+ }
+ else
+ {
+ hash_set_mem_alloc (&gm->tunnel_by_key4, &key->gtk_v4, t->dev_instance);
+ }
+}
+
+static void
+gre_tunnel_db_remove (gre_tunnel_t *t, gre_tunnel_key_t *key)
+{
+ gre_main_t *gm = &gre_main;
+
+ if (t->tunnel_dst.fp_proto == FIB_PROTOCOL_IP6)
+ {
+ hash_unset_mem_free (&gm->tunnel_by_key6, &key->gtk_v6);
+ }
+ else
+ {
+ hash_unset_mem_free (&gm->tunnel_by_key4, &key->gtk_v4);
+ }
+}
+
+/**
+ * gre_tunnel_stack
+ *
+ * 'stack' (resolve the recursion for) the tunnel's midchain adjacency
+ */
+void
+gre_tunnel_stack (adj_index_t ai)
+{
+ gre_main_t *gm = &gre_main;
+ ip_adjacency_t *adj;
+ gre_tunnel_t *gt;
+ u32 sw_if_index;
+
+ adj = adj_get (ai);
+ sw_if_index = adj->rewrite_header.sw_if_index;
+
+ if ((vec_len (gm->tunnel_index_by_sw_if_index) <= sw_if_index) ||
+ (~0 == gm->tunnel_index_by_sw_if_index[sw_if_index]))
+ return;
+
+ gt = pool_elt_at_index (gm->tunnels,
+ gm->tunnel_index_by_sw_if_index[sw_if_index]);
+
+ if ((vnet_hw_interface_get_flags (vnet_get_main (), gt->hw_if_index) &
+ VNET_HW_INTERFACE_FLAG_LINK_UP) == 0)
+ {
+ adj_midchain_delegate_unstack (ai);
+ }
+ else
+ {
+ adj_midchain_delegate_stack (ai, gt->outer_fib_index, &gt->tunnel_dst);
+ }
+}
+
+/**
+ * mgre_tunnel_stack
+ *
+ * 'stack' (resolve the recursion for) the tunnel's midchain adjacency
+ */
+static void
+mgre_tunnel_stack (adj_index_t ai)
+{
+ gre_main_t *gm = &gre_main;
+ const ip_adjacency_t *adj;
+ const gre_tunnel_t *gt;
+ u32 sw_if_index;
+
+ adj = adj_get (ai);
+ sw_if_index = adj->rewrite_header.sw_if_index;
+
+ if ((vec_len (gm->tunnel_index_by_sw_if_index) <= sw_if_index) ||
+ (~0 == gm->tunnel_index_by_sw_if_index[sw_if_index]))
+ return;
+
+ gt = pool_elt_at_index (gm->tunnels,
+ gm->tunnel_index_by_sw_if_index[sw_if_index]);
+
+ if ((vnet_hw_interface_get_flags (vnet_get_main (), gt->hw_if_index) &
+ VNET_HW_INTERFACE_FLAG_LINK_UP) == 0)
+ {
+ adj_midchain_delegate_unstack (ai);
+ }
+ else
+ {
+ const teib_entry_t *ne;
+
+ ne = teib_entry_find_46 (sw_if_index, adj->ia_nh_proto,
+ &adj->sub_type.nbr.next_hop);
+ if (NULL != ne)
+ teib_entry_adj_stack (ne, ai);
+ }
+}
+
+/**
+ * @brief Call back when restacking all adjacencies on a GRE interface
+ */
+static adj_walk_rc_t
+gre_adj_walk_cb (adj_index_t ai, void *ctx)
+{
+ gre_tunnel_stack (ai);
+
+ return (ADJ_WALK_RC_CONTINUE);
+}
+static adj_walk_rc_t
+mgre_adj_walk_cb (adj_index_t ai, void *ctx)
+{
+ mgre_tunnel_stack (ai);
+
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+static void
+gre_tunnel_restack (gre_tunnel_t *gt)
+{
+ fib_protocol_t proto;
+
+ /*
+ * walk all the adjacencies on th GRE interface and restack them
+ */
+ FOR_EACH_FIB_IP_PROTOCOL (proto)
+ {
+ switch (gt->mode)
+ {
+ case TUNNEL_MODE_P2P:
+ adj_nbr_walk (gt->sw_if_index, proto, gre_adj_walk_cb, NULL);
+ break;
+ case TUNNEL_MODE_MP:
+ adj_nbr_walk (gt->sw_if_index, proto, mgre_adj_walk_cb, NULL);
+ break;
+ }
+ }
+}
+
+static void
+gre_teib_mk_key (const gre_tunnel_t *t, const teib_entry_t *ne,
+ gre_tunnel_key_t *key)
+{
+ const fib_prefix_t *nh;
+
+ nh = teib_entry_get_nh (ne);
+
+ /* construct the key using mode P2P so it can be found in the DP */
+ if (FIB_PROTOCOL_IP4 == nh->fp_proto)
+ gre_mk_key4 (t->tunnel_src.ip4, nh->fp_addr.ip4,
+ teib_entry_get_fib_index (ne), t->type, TUNNEL_MODE_P2P, 0,
+ &key->gtk_v4);
+ else
+ gre_mk_key6 (&t->tunnel_src.ip6, &nh->fp_addr.ip6,
+ teib_entry_get_fib_index (ne), t->type, TUNNEL_MODE_P2P, 0,
+ &key->gtk_v6);
+}
+
+/**
+ * An TEIB entry has been added
+ */
+static void
+gre_teib_entry_added (const teib_entry_t *ne)
+{
+ gre_main_t *gm = &gre_main;
+ const ip_address_t *nh;
+ gre_tunnel_key_t key;
+ gre_tunnel_t *t;
+ u32 sw_if_index;
+ u32 t_idx;
+
+ sw_if_index = teib_entry_get_sw_if_index (ne);
+ if (vec_len (gm->tunnel_index_by_sw_if_index) < sw_if_index)
+ return;
+
+ t_idx = gm->tunnel_index_by_sw_if_index[sw_if_index];
+
+ if (INDEX_INVALID == t_idx)
+ return;
+
+ /* entry has been added on an interface for which there is a GRE tunnel */
+ t = pool_elt_at_index (gm->tunnels, t_idx);
+
+ if (t->mode != TUNNEL_MODE_MP)
+ return;
+
+ /* the next-hop (underlay) of the NHRP entry will form part of the key for
+ * ingress lookup to match packets to this interface */
+ gre_teib_mk_key (t, ne, &key);
+ gre_tunnel_db_add (t, &key);
+
+ /* update the rewrites for each of the adjacencies for this peer (overlay)
+ * using the next-hop (underlay) */
+ mgre_walk_ctx_t ctx = { .t = t, .ne = ne };
+ nh = teib_entry_get_peer (ne);
+ adj_nbr_walk_nh (
+ teib_entry_get_sw_if_index (ne),
+ (AF_IP4 == ip_addr_version (nh) ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6),
+ &ip_addr_46 (nh), mgre_mk_complete_walk, &ctx);
+}
+
+static void
+gre_teib_entry_deleted (const teib_entry_t *ne)
+{
+ gre_main_t *gm = &gre_main;
+ const ip_address_t *nh;
+ gre_tunnel_key_t key;
+ gre_tunnel_t *t;
+ u32 sw_if_index;
+ u32 t_idx;
+
+ sw_if_index = teib_entry_get_sw_if_index (ne);
+ if (vec_len (gm->tunnel_index_by_sw_if_index) < sw_if_index)
+ return;
+
+ t_idx = gm->tunnel_index_by_sw_if_index[sw_if_index];
+
+ if (INDEX_INVALID == t_idx)
+ return;
+
+ t = pool_elt_at_index (gm->tunnels, t_idx);
+
+ /* remove the next-hop as an ingress lookup key */
+ gre_teib_mk_key (t, ne, &key);
+ gre_tunnel_db_remove (t, &key);
+
+ nh = teib_entry_get_peer (ne);
+
+ /* make all the adjacencies incomplete */
+ adj_nbr_walk_nh (
+ teib_entry_get_sw_if_index (ne),
+ (AF_IP4 == ip_addr_version (nh) ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6),
+ &ip_addr_46 (nh), mgre_mk_incomplete_walk, t);
+}
+
+static walk_rc_t
+gre_tunnel_delete_teib_walk (index_t nei, void *ctx)
+{
+ gre_tunnel_t *t = ctx;
+ gre_tunnel_key_t key;
+
+ gre_teib_mk_key (t, teib_entry_get (nei), &key);
+ gre_tunnel_db_remove (t, &key);
+
+ return (WALK_CONTINUE);
+}
+
+static walk_rc_t
+gre_tunnel_add_teib_walk (index_t nei, void *ctx)
+{
+ gre_tunnel_t *t = ctx;
+ gre_tunnel_key_t key = {};
+
+ gre_teib_mk_key (t, teib_entry_get (nei), &key);
+ gre_tunnel_db_add (t, &key);
+
+ return (WALK_CONTINUE);
+}
+
+static int
+vnet_gre_tunnel_add (vnet_gre_tunnel_add_del_args_t *a, u32 outer_fib_index,
+ u32 *sw_if_indexp)
+{
+ gre_main_t *gm = &gre_main;
+ vnet_main_t *vnm = gm->vnet_main;
+ gre_tunnel_t *t;
+ vnet_hw_interface_t *hi;
+ u32 hw_if_index, sw_if_index;
+ u8 is_ipv6 = a->is_ipv6;
+ gre_tunnel_key_t key;
+
+ t = gre_tunnel_db_find (a, outer_fib_index, &key);
+ if (NULL != t)
+ return VNET_API_ERROR_IF_ALREADY_EXISTS;
+
+ pool_get_aligned (gm->tunnels, t, CLIB_CACHE_LINE_BYTES);
+ clib_memset (t, 0, sizeof (*t));
+
+ /* Reconcile the real dev_instance and a possible requested instance */
+ u32 t_idx = t - gm->tunnels; /* tunnel index (or instance) */
+ u32 u_idx = a->instance; /* user specified instance */
+ if (u_idx == ~0)
+ u_idx = t_idx;
+ if (hash_get (gm->instance_used, u_idx))
+ {
+ pool_put (gm->tunnels, t);
+ return VNET_API_ERROR_INSTANCE_IN_USE;
+ }
+ hash_set (gm->instance_used, u_idx, 1);
+
+ t->dev_instance = t_idx; /* actual */
+ t->user_instance = u_idx; /* name */
+
+ t->type = a->type;
+ t->mode = a->mode;
+ t->flags = a->flags;
+ if (t->type == GRE_TUNNEL_TYPE_ERSPAN)
+ t->session_id = a->session_id;
+
+ if (t->type == GRE_TUNNEL_TYPE_L3)
+ {
+ if (t->mode == TUNNEL_MODE_P2P)
+ hw_if_index =
+ vnet_register_interface (vnm, gre_device_class.index, t_idx,
+ gre_hw_interface_class.index, t_idx);
+ else
+ hw_if_index =
+ vnet_register_interface (vnm, gre_device_class.index, t_idx,
+ mgre_hw_interface_class.index, t_idx);
+ }
+ else
+ {
+ vnet_eth_interface_registration_t eir = {};
+
+ /* Default MAC address (d00b:eed0:0000 + sw_if_index) */
+ u8 address[6] = {
+ 0xd0, 0x0b, 0xee, 0xd0, (u8) (t_idx >> 8), (u8) t_idx
+ };
+
+ eir.dev_class_index = gre_device_class.index;
+ eir.dev_instance = t_idx;
+ eir.address = address;
+ hw_if_index = vnet_eth_register_interface (vnm, &eir);
+ }
+
+ /* Set GRE tunnel interface output node (not used for L3 payload) */
+ if (GRE_TUNNEL_TYPE_ERSPAN == t->type)
+ vnet_set_interface_output_node (vnm, hw_if_index,
+ gre_erspan_encap_node.index);
+ else
+ vnet_set_interface_output_node (vnm, hw_if_index,
+ gre_teb_encap_node.index);
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ sw_if_index = hi->sw_if_index;
+
+ t->hw_if_index = hw_if_index;
+ t->outer_fib_index = outer_fib_index;
+ t->sw_if_index = sw_if_index;
+ t->l2_adj_index = ADJ_INDEX_INVALID;
+
+ vec_validate_init_empty (gm->tunnel_index_by_sw_if_index, sw_if_index, ~0);
+ gm->tunnel_index_by_sw_if_index[sw_if_index] = t_idx;
+
+ if (!is_ipv6)
+ {
+ hi->frame_overhead = sizeof (gre_header_t) + sizeof (ip4_header_t);
+ hi->min_frame_size = hi->frame_overhead + 64;
+ }
+ else
+ {
+ hi->frame_overhead = sizeof (gre_header_t) + sizeof (ip6_header_t);
+ hi->min_frame_size = hi->frame_overhead + 64;
+ }
+
+ /* Standard default gre MTU. */
+ vnet_sw_interface_set_mtu (vnm, sw_if_index, 9000);
+
+ /*
+ * source the FIB entry for the tunnel's destination
+ * and become a child thereof. The tunnel will then get poked
+ * when the forwarding for the entry updates, and the tunnel can
+ * re-stack accordingly
+ */
+
+ clib_memcpy (&t->tunnel_src, &a->src, sizeof (t->tunnel_src));
+ t->tunnel_dst.fp_len = !is_ipv6 ? 32 : 128;
+ t->tunnel_dst.fp_proto = !is_ipv6 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
+ t->tunnel_dst.fp_addr = a->dst;
+
+ gre_tunnel_db_add (t, &key);
+
+ if (t->mode == TUNNEL_MODE_MP)
+ teib_walk_itf (t->sw_if_index, gre_tunnel_add_teib_walk, t);
+
+ if (t->type == GRE_TUNNEL_TYPE_ERSPAN)
+ {
+ gre_sn_key_t skey;
+ gre_sn_t *gre_sn;
+
+ gre_mk_sn_key (t, &skey);
+ gre_sn = (gre_sn_t *) hash_get_mem (gm->seq_num_by_key, &skey);
+ if (gre_sn != NULL)
+ {
+ gre_sn->ref_count++;
+ t->gre_sn = gre_sn;
+ }
+ else
+ {
+ gre_sn = clib_mem_alloc (sizeof (gre_sn_t));
+ gre_sn->seq_num = 0;
+ gre_sn->ref_count = 1;
+ t->gre_sn = gre_sn;
+ hash_set_mem_alloc (&gm->seq_num_by_key, &skey, (uword) gre_sn);
+ }
+ }
+
+ if (t->type != GRE_TUNNEL_TYPE_L3)
+ {
+ t->l2_adj_index = adj_nbr_add_or_lock (
+ t->tunnel_dst.fp_proto, VNET_LINK_ETHERNET, &zero_addr, sw_if_index);
+ vnet_set_interface_l3_output_node (gm->vlib_main, sw_if_index,
+ (u8 *) "tunnel-output-no-count");
+ gre_update_adj (vnm, t->sw_if_index, t->l2_adj_index);
+ }
+ else
+ {
+ vnet_set_interface_l3_output_node (gm->vlib_main, sw_if_index,
+ (u8 *) "tunnel-output");
+ }
+ if (sw_if_indexp)
+ *sw_if_indexp = sw_if_index;
+
+ /* register gre46-input nodes */
+ ip4_register_protocol (IP_PROTOCOL_GRE, gre4_input_node.index);
+ ip6_register_protocol (IP_PROTOCOL_GRE, gre6_input_node.index);
+
+ return 0;
+}
+
+static int
+vnet_gre_tunnel_delete (vnet_gre_tunnel_add_del_args_t *a, u32 outer_fib_index,
+ u32 *sw_if_indexp)
+{
+ gre_main_t *gm = &gre_main;
+ vnet_main_t *vnm = gm->vnet_main;
+ gre_tunnel_t *t;
+ gre_tunnel_key_t key;
+ u32 sw_if_index;
+
+ t = gre_tunnel_db_find (a, outer_fib_index, &key);
+ if (NULL == t)
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ if (t->mode == TUNNEL_MODE_MP)
+ teib_walk_itf (t->sw_if_index, gre_tunnel_delete_teib_walk, t);
+
+ sw_if_index = t->sw_if_index;
+ vnet_sw_interface_set_flags (vnm, sw_if_index, 0 /* down */);
+
+ /* make sure tunnel is removed from l2 bd or xconnect */
+ set_int_l2_mode (gm->vlib_main, vnm, MODE_L3, sw_if_index, 0,
+ L2_BD_PORT_TYPE_NORMAL, 0, 0);
+ gm->tunnel_index_by_sw_if_index[sw_if_index] = ~0;
+
+ if (t->type == GRE_TUNNEL_TYPE_L3)
+ vnet_delete_hw_interface (vnm, t->hw_if_index);
+ else
+ ethernet_delete_interface (vnm, t->hw_if_index);
+
+ if (t->l2_adj_index != ADJ_INDEX_INVALID)
+ {
+ adj_midchain_delegate_unstack (t->l2_adj_index);
+ adj_unlock (t->l2_adj_index);
+ }
+
+ ASSERT ((t->type != GRE_TUNNEL_TYPE_ERSPAN) || (t->gre_sn != NULL));
+ if ((t->type == GRE_TUNNEL_TYPE_ERSPAN) && (t->gre_sn->ref_count-- == 1))
+ {
+ gre_sn_key_t skey;
+ gre_mk_sn_key (t, &skey);
+ hash_unset_mem_free (&gm->seq_num_by_key, &skey);
+ clib_mem_free (t->gre_sn);
+ }
+
+ vnet_reset_interface_l3_output_node (gm->vlib_main, sw_if_index);
+ hash_unset (gm->instance_used, t->user_instance);
+ gre_tunnel_db_remove (t, &key);
+ pool_put (gm->tunnels, t);
+
+ if (sw_if_indexp)
+ *sw_if_indexp = sw_if_index;
+
+ return 0;
+}
+
+int
+vnet_gre_tunnel_add_del (vnet_gre_tunnel_add_del_args_t *a, u32 *sw_if_indexp)
+{
+ u32 outer_fib_index;
+
+ outer_fib_index = fib_table_find (
+ (a->is_ipv6 ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4), a->outer_table_id);
+
+ if (~0 == outer_fib_index)
+ return VNET_API_ERROR_NO_SUCH_FIB;
+
+ if (a->session_id > GTK_SESSION_ID_MAX)
+ return VNET_API_ERROR_INVALID_SESSION_ID;
+
+ if (a->mode == TUNNEL_MODE_MP && !ip46_address_is_zero (&a->dst))
+ return (VNET_API_ERROR_INVALID_DST_ADDRESS);
+
+ if (a->is_add)
+ return (vnet_gre_tunnel_add (a, outer_fib_index, sw_if_indexp));
+ else
+ return (vnet_gre_tunnel_delete (a, outer_fib_index, sw_if_indexp));
+}
+
+clib_error_t *
+gre_interface_admin_up_down (vnet_main_t *vnm, u32 hw_if_index, u32 flags)
+{
+ gre_main_t *gm = &gre_main;
+ vnet_hw_interface_t *hi;
+ gre_tunnel_t *t;
+ u32 ti;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+
+ if (NULL == gm->tunnel_index_by_sw_if_index ||
+ hi->sw_if_index >= vec_len (gm->tunnel_index_by_sw_if_index))
+ return (NULL);
+
+ ti = gm->tunnel_index_by_sw_if_index[hi->sw_if_index];
+
+ if (~0 == ti)
+ /* not one of ours */
+ return (NULL);
+
+ t = pool_elt_at_index (gm->tunnels, ti);
+
+ if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ vnet_hw_interface_set_flags (vnm, hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+ else
+ vnet_hw_interface_set_flags (vnm, hw_if_index, 0 /* down */);
+
+ gre_tunnel_restack (t);
+
+ return /* no error */ 0;
+}
+
+static clib_error_t *
+create_gre_tunnel_command_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ vnet_gre_tunnel_add_del_args_t _a, *a = &_a;
+ ip46_address_t src = ip46_address_initializer,
+ dst = ip46_address_initializer;
+ u32 instance = ~0;
+ u32 outer_table_id = 0;
+ gre_tunnel_type_t t_type = GRE_TUNNEL_TYPE_L3;
+ tunnel_mode_t t_mode = TUNNEL_MODE_P2P;
+ tunnel_encap_decap_flags_t flags = TUNNEL_ENCAP_DECAP_FLAG_NONE;
+ u32 session_id = 0;
+ int rv;
+ u8 is_add = 1;
+ u32 sw_if_index;
+ clib_error_t *error = NULL;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "instance %d", &instance))
+ ;
+ else if (unformat (line_input, "src %U", unformat_ip46_address, &src))
+ ;
+ else if (unformat (line_input, "dst %U", unformat_ip46_address, &dst))
+ ;
+ else if (unformat (line_input, "outer-table-id %d", &outer_table_id))
+ ;
+ else if (unformat (line_input, "multipoint"))
+ t_mode = TUNNEL_MODE_MP;
+ else if (unformat (line_input, "teb"))
+ t_type = GRE_TUNNEL_TYPE_TEB;
+ else if (unformat (line_input, "erspan %d", &session_id))
+ t_type = GRE_TUNNEL_TYPE_ERSPAN;
+ else if (unformat (line_input, "flags %U",
+ unformat_tunnel_encap_decap_flags, &flags))
+ ;
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+ }
+
+ if (ip46_address_is_equal (&src, &dst))
+ {
+ error = clib_error_return (0, "src and dst are identical");
+ goto done;
+ }
+
+ if (t_mode != TUNNEL_MODE_MP && ip46_address_is_zero (&dst))
+ {
+ error = clib_error_return (0, "destination address not specified");
+ goto done;
+ }
+
+ if (ip46_address_is_zero (&src))
+ {
+ error = clib_error_return (0, "source address not specified");
+ goto done;
+ }
+
+ if (ip46_address_is_ip4 (&src) != ip46_address_is_ip4 (&dst))
+ {
+ error = clib_error_return (0, "src and dst address must be the same AF");
+ goto done;
+ }
+
+ clib_memset (a, 0, sizeof (*a));
+ a->is_add = is_add;
+ a->outer_table_id = outer_table_id;
+ a->type = t_type;
+ a->mode = t_mode;
+ a->session_id = session_id;
+ a->is_ipv6 = !ip46_address_is_ip4 (&src);
+ a->instance = instance;
+ a->flags = flags;
+ clib_memcpy (&a->src, &src, sizeof (a->src));
+ clib_memcpy (&a->dst, &dst, sizeof (a->dst));
+
+ rv = vnet_gre_tunnel_add_del (a, &sw_if_index);
+
+ switch (rv)
+ {
+ case 0:
+ vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name,
+ vnet_get_main (), sw_if_index);
+ break;
+ case VNET_API_ERROR_IF_ALREADY_EXISTS:
+ error = clib_error_return (0, "GRE tunnel already exists...");
+ goto done;
+ case VNET_API_ERROR_NO_SUCH_FIB:
+ error = clib_error_return (0, "outer table ID %d doesn't exist\n",
+ outer_table_id);
+ goto done;
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ error = clib_error_return (0, "GRE tunnel doesn't exist");
+ goto done;
+ case VNET_API_ERROR_INVALID_SESSION_ID:
+ error =
+ clib_error_return (0, "session ID %d out of range\n", session_id);
+ goto done;
+ case VNET_API_ERROR_INSTANCE_IN_USE:
+ error = clib_error_return (0, "Instance is in use");
+ goto done;
+ default:
+ error = clib_error_return (0, "vnet_gre_tunnel_add_del returned %d", rv);
+ goto done;
+ }
+
+done:
+ unformat_free (line_input);
+
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (create_gre_tunnel_command, static) = {
+ .path = "create gre tunnel",
+ .short_help = "create gre tunnel src <addr> dst <addr> [instance <n>] "
+ "[outer-fib-id <fib>] [teb | erspan <session-id>] [del] "
+ "[multipoint]",
+ .function = create_gre_tunnel_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+show_gre_tunnel_command_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
+{
+ gre_main_t *gm = &gre_main;
+ gre_tunnel_t *t;
+ u32 ti = ~0;
+
+ if (pool_elts (gm->tunnels) == 0)
+ vlib_cli_output (vm, "No GRE tunnels configured...");
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%d", &ti))
+ ;
+ else
+ break;
+ }
+
+ if (~0 == ti)
+ {
+ /* *INDENT-OFF* */
+ pool_foreach (t, gm->tunnels)
+ {
+ vlib_cli_output (vm, "%U", format_gre_tunnel, t);
+ }
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ t = pool_elt_at_index (gm->tunnels, ti);
+
+ vlib_cli_output (vm, "%U", format_gre_tunnel, t);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_gre_tunnel_command, static) = {
+ .path = "show gre tunnel",
+ .function = show_gre_tunnel_command_fn,
+};
+/* *INDENT-ON* */
+
+const static teib_vft_t gre_teib_vft = {
+ .nv_added = gre_teib_entry_added,
+ .nv_deleted = gre_teib_entry_deleted,
+};
+
+/* force inclusion from application's main.c */
+clib_error_t *
+gre_interface_init (vlib_main_t *vm)
+{
+ teib_register (&gre_teib_vft);
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gre_interface_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gre/node.c b/src/plugins/gre/node.c
new file mode 100644
index 00000000000..7ee22c3cb11
--- /dev/null
+++ b/src/plugins/gre/node.c
@@ -0,0 +1,576 @@
+/*
+ * node.c: gre packet processing
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <gre/gre.h>
+#include <vnet/mpls/mpls.h>
+#include <vppinfra/sparse_vec.h>
+
+#define foreach_gre_input_next \
+ _ (PUNT, "error-punt") \
+ _ (DROP, "error-drop") \
+ _ (ETHERNET_INPUT, "ethernet-input") \
+ _ (IP4_INPUT, "ip4-input") \
+ _ (IP6_INPUT, "ip6-input") \
+ _ (MPLS_INPUT, "mpls-input")
+
+typedef enum
+{
+#define _(s, n) GRE_INPUT_NEXT_##s,
+ foreach_gre_input_next
+#undef _
+ GRE_INPUT_N_NEXT,
+} gre_input_next_t;
+
+typedef struct
+{
+ u32 tunnel_id;
+ u32 length;
+ ip46_address_t src;
+ ip46_address_t dst;
+} gre_rx_trace_t;
+
+extern u8 *format_gre_rx_trace (u8 *s, va_list *args);
+
+#ifndef CLIB_MARCH_VARIANT
+u8 *
+format_gre_rx_trace (u8 *s, va_list *args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gre_rx_trace_t *t = va_arg (*args, gre_rx_trace_t *);
+
+ s = format (s, "GRE: tunnel %d len %d src %U dst %U", t->tunnel_id,
+ clib_net_to_host_u16 (t->length), format_ip46_address, &t->src,
+ IP46_TYPE_ANY, format_ip46_address, &t->dst, IP46_TYPE_ANY);
+ return s;
+}
+#endif /* CLIB_MARCH_VARIANT */
+
+typedef struct
+{
+ /* Sparse vector mapping gre protocol in network byte order
+ to next index. */
+ u16 *next_by_protocol;
+} gre_input_runtime_t;
+
+always_inline void
+gre_trace (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b,
+ u32 tun_sw_if_index, const ip6_header_t *ip6,
+ const ip4_header_t *ip4, int is_ipv6)
+{
+ gre_rx_trace_t *tr = vlib_add_trace (vm, node, b, sizeof (*tr));
+ tr->tunnel_id = tun_sw_if_index;
+ if (is_ipv6)
+ {
+ tr->length = ip6->payload_length;
+ tr->src.ip6.as_u64[0] = ip6->src_address.as_u64[0];
+ tr->src.ip6.as_u64[1] = ip6->src_address.as_u64[1];
+ tr->dst.ip6.as_u64[0] = ip6->dst_address.as_u64[0];
+ tr->dst.ip6.as_u64[1] = ip6->dst_address.as_u64[1];
+ }
+ else
+ {
+ tr->length = ip4->length;
+ tr->src.as_u64[0] = tr->src.as_u64[1] = 0;
+ tr->dst.as_u64[0] = tr->dst.as_u64[1] = 0;
+ tr->src.ip4.as_u32 = ip4->src_address.as_u32;
+ tr->dst.ip4.as_u32 = ip4->dst_address.as_u32;
+ }
+}
+
+always_inline void
+gre_tunnel_get (const gre_main_t *gm, vlib_node_runtime_t *node,
+ vlib_buffer_t *b, u16 *next, const gre_tunnel_key_t *key,
+ gre_tunnel_key_t *cached_key, u32 *tun_sw_if_index,
+ u32 *cached_tun_sw_if_index, int is_ipv6)
+{
+ const uword *p;
+ p = is_ipv6 ? hash_get_mem (gm->tunnel_by_key6, &key->gtk_v6) :
+ hash_get_mem (gm->tunnel_by_key4, &key->gtk_v4);
+ if (PREDICT_FALSE (!p))
+ {
+ *next = GRE_INPUT_NEXT_DROP;
+ b->error = node->errors[GRE_ERROR_NO_SUCH_TUNNEL];
+ *tun_sw_if_index = ~0;
+ }
+ else
+ {
+ const gre_tunnel_t *tun;
+ tun = pool_elt_at_index (gm->tunnels, *p);
+ *cached_tun_sw_if_index = *tun_sw_if_index = tun->sw_if_index;
+ if (is_ipv6)
+ cached_key->gtk_v6 = key->gtk_v6;
+ else
+ cached_key->gtk_v4 = key->gtk_v4;
+ }
+}
+
+always_inline uword
+gre_input (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame,
+ const int is_ipv6)
+{
+ gre_main_t *gm = &gre_main;
+ u32 *from, n_left_from;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+ u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
+ u16 cached_protocol = ~0;
+ u32 cached_next_index = SPARSE_VEC_INVALID_INDEX;
+ u32 cached_tun_sw_if_index = ~0;
+ gre_tunnel_key_t cached_key;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ vlib_get_buffers (vm, from, bufs, n_left_from);
+
+ if (is_ipv6)
+ clib_memset (&cached_key.gtk_v6, 0xff, sizeof (cached_key.gtk_v6));
+ else
+ clib_memset (&cached_key.gtk_v4, 0xff, sizeof (cached_key.gtk_v4));
+
+ while (n_left_from >= 2)
+ {
+ const ip6_header_t *ip6[2];
+ const ip4_header_t *ip4[2];
+ const gre_header_t *gre[2];
+ u32 nidx[2];
+ next_info_t ni[2];
+ u8 type[2];
+ u16 version[2];
+ u32 len[2];
+ gre_tunnel_key_t key[2];
+ u8 matched[2];
+ u32 tun_sw_if_index[2];
+
+ if (PREDICT_TRUE (n_left_from >= 6))
+ {
+ vlib_prefetch_buffer_data (b[2], LOAD);
+ vlib_prefetch_buffer_data (b[3], LOAD);
+ vlib_prefetch_buffer_header (b[4], STORE);
+ vlib_prefetch_buffer_header (b[5], STORE);
+ }
+
+ if (is_ipv6)
+ {
+ /* ip6_local hands us the ip header, not the gre header */
+ ip6[0] = vlib_buffer_get_current (b[0]);
+ ip6[1] = vlib_buffer_get_current (b[1]);
+ gre[0] = (void *) (ip6[0] + 1);
+ gre[1] = (void *) (ip6[1] + 1);
+ vlib_buffer_advance (b[0], sizeof (*ip6[0]) + sizeof (*gre[0]));
+ vlib_buffer_advance (b[1], sizeof (*ip6[0]) + sizeof (*gre[0]));
+ }
+ else
+ {
+ /* ip4_local hands us the ip header, not the gre header */
+ ip4[0] = vlib_buffer_get_current (b[0]);
+ ip4[1] = vlib_buffer_get_current (b[1]);
+ gre[0] = (void *) (ip4[0] + 1);
+ gre[1] = (void *) (ip4[1] + 1);
+ vlib_buffer_advance (b[0], sizeof (*ip4[0]) + sizeof (*gre[0]));
+ vlib_buffer_advance (b[1], sizeof (*ip4[0]) + sizeof (*gre[0]));
+ }
+
+ if (PREDICT_TRUE (cached_protocol == gre[0]->protocol))
+ {
+ nidx[0] = cached_next_index;
+ }
+ else
+ {
+ cached_next_index = nidx[0] =
+ sparse_vec_index (gm->next_by_protocol, gre[0]->protocol);
+ cached_protocol = gre[0]->protocol;
+ }
+ if (PREDICT_TRUE (cached_protocol == gre[1]->protocol))
+ {
+ nidx[1] = cached_next_index;
+ }
+ else
+ {
+ cached_next_index = nidx[1] =
+ sparse_vec_index (gm->next_by_protocol, gre[1]->protocol);
+ cached_protocol = gre[1]->protocol;
+ }
+
+ ni[0] = vec_elt (gm->next_by_protocol, nidx[0]);
+ ni[1] = vec_elt (gm->next_by_protocol, nidx[1]);
+ next[0] = ni[0].next_index;
+ next[1] = ni[1].next_index;
+ type[0] = ni[0].tunnel_type;
+ type[1] = ni[1].tunnel_type;
+
+ b[0]->error = nidx[0] == SPARSE_VEC_INVALID_INDEX ?
+ node->errors[GRE_ERROR_UNKNOWN_PROTOCOL] :
+ node->errors[GRE_ERROR_NONE];
+ b[1]->error = nidx[1] == SPARSE_VEC_INVALID_INDEX ?
+ node->errors[GRE_ERROR_UNKNOWN_PROTOCOL] :
+ node->errors[GRE_ERROR_NONE];
+
+ version[0] = clib_net_to_host_u16 (gre[0]->flags_and_version);
+ version[1] = clib_net_to_host_u16 (gre[1]->flags_and_version);
+ version[0] &= GRE_VERSION_MASK;
+ version[1] &= GRE_VERSION_MASK;
+
+ b[0]->error =
+ version[0] ? node->errors[GRE_ERROR_UNSUPPORTED_VERSION] : b[0]->error;
+ next[0] = version[0] ? GRE_INPUT_NEXT_DROP : next[0];
+ b[1]->error =
+ version[1] ? node->errors[GRE_ERROR_UNSUPPORTED_VERSION] : b[1]->error;
+ next[1] = version[1] ? GRE_INPUT_NEXT_DROP : next[1];
+
+ len[0] = vlib_buffer_length_in_chain (vm, b[0]);
+ len[1] = vlib_buffer_length_in_chain (vm, b[1]);
+
+ /* always search for P2P types in the DP */
+ if (is_ipv6)
+ {
+ gre_mk_key6 (&ip6[0]->dst_address, &ip6[0]->src_address,
+ vnet_buffer (b[0])->ip.fib_index, type[0],
+ TUNNEL_MODE_P2P, 0, &key[0].gtk_v6);
+ gre_mk_key6 (&ip6[1]->dst_address, &ip6[1]->src_address,
+ vnet_buffer (b[1])->ip.fib_index, type[1],
+ TUNNEL_MODE_P2P, 0, &key[1].gtk_v6);
+ matched[0] = gre_match_key6 (&cached_key.gtk_v6, &key[0].gtk_v6);
+ matched[1] = gre_match_key6 (&cached_key.gtk_v6, &key[1].gtk_v6);
+ }
+ else
+ {
+ gre_mk_key4 (ip4[0]->dst_address, ip4[0]->src_address,
+ vnet_buffer (b[0])->ip.fib_index, type[0],
+ TUNNEL_MODE_P2P, 0, &key[0].gtk_v4);
+ gre_mk_key4 (ip4[1]->dst_address, ip4[1]->src_address,
+ vnet_buffer (b[1])->ip.fib_index, type[1],
+ TUNNEL_MODE_P2P, 0, &key[1].gtk_v4);
+ matched[0] = gre_match_key4 (&cached_key.gtk_v4, &key[0].gtk_v4);
+ matched[1] = gre_match_key4 (&cached_key.gtk_v4, &key[1].gtk_v4);
+ }
+
+ tun_sw_if_index[0] = cached_tun_sw_if_index;
+ tun_sw_if_index[1] = cached_tun_sw_if_index;
+ if (PREDICT_FALSE (!matched[0]))
+ gre_tunnel_get (gm, node, b[0], &next[0], &key[0], &cached_key,
+ &tun_sw_if_index[0], &cached_tun_sw_if_index, is_ipv6);
+ if (PREDICT_FALSE (!matched[1]))
+ gre_tunnel_get (gm, node, b[1], &next[1], &key[1], &cached_key,
+ &tun_sw_if_index[1], &cached_tun_sw_if_index, is_ipv6);
+
+ if (PREDICT_TRUE (next[0] > GRE_INPUT_NEXT_DROP))
+ {
+ vlib_increment_combined_counter (
+ &gm->vnet_main->interface_main
+ .combined_sw_if_counters[VNET_INTERFACE_COUNTER_RX],
+ vm->thread_index, tun_sw_if_index[0], 1 /* packets */,
+ len[0] /* bytes */);
+ vnet_buffer (b[0])->sw_if_index[VLIB_RX] = tun_sw_if_index[0];
+ }
+ if (PREDICT_TRUE (next[1] > GRE_INPUT_NEXT_DROP))
+ {
+ vlib_increment_combined_counter (
+ &gm->vnet_main->interface_main
+ .combined_sw_if_counters[VNET_INTERFACE_COUNTER_RX],
+ vm->thread_index, tun_sw_if_index[1], 1 /* packets */,
+ len[1] /* bytes */);
+ vnet_buffer (b[1])->sw_if_index[VLIB_RX] = tun_sw_if_index[1];
+ }
+
+ vnet_buffer (b[0])->sw_if_index[VLIB_TX] = (u32) ~0;
+ vnet_buffer (b[1])->sw_if_index[VLIB_TX] = (u32) ~0;
+
+ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ gre_trace (vm, node, b[0], tun_sw_if_index[0], ip6[0], ip4[0],
+ is_ipv6);
+ if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
+ gre_trace (vm, node, b[1], tun_sw_if_index[1], ip6[1], ip4[1],
+ is_ipv6);
+
+ b += 2;
+ next += 2;
+ n_left_from -= 2;
+ }
+
+ while (n_left_from >= 1)
+ {
+ const ip6_header_t *ip6[1];
+ const ip4_header_t *ip4[1];
+ const gre_header_t *gre[1];
+ u32 nidx[1];
+ next_info_t ni[1];
+ u8 type[1];
+ u16 version[1];
+ u32 len[1];
+ gre_tunnel_key_t key[1];
+ u8 matched[1];
+ u32 tun_sw_if_index[1];
+
+ if (PREDICT_TRUE (n_left_from >= 3))
+ {
+ vlib_prefetch_buffer_data (b[1], LOAD);
+ vlib_prefetch_buffer_header (b[2], STORE);
+ }
+
+ if (is_ipv6)
+ {
+ /* ip6_local hands us the ip header, not the gre header */
+ ip6[0] = vlib_buffer_get_current (b[0]);
+ gre[0] = (void *) (ip6[0] + 1);
+ vlib_buffer_advance (b[0], sizeof (*ip6[0]) + sizeof (*gre[0]));
+ }
+ else
+ {
+ /* ip4_local hands us the ip header, not the gre header */
+ ip4[0] = vlib_buffer_get_current (b[0]);
+ gre[0] = (void *) (ip4[0] + 1);
+ vlib_buffer_advance (b[0], sizeof (*ip4[0]) + sizeof (*gre[0]));
+ }
+
+ if (PREDICT_TRUE (cached_protocol == gre[0]->protocol))
+ {
+ nidx[0] = cached_next_index;
+ }
+ else
+ {
+ cached_next_index = nidx[0] =
+ sparse_vec_index (gm->next_by_protocol, gre[0]->protocol);
+ cached_protocol = gre[0]->protocol;
+ }
+
+ ni[0] = vec_elt (gm->next_by_protocol, nidx[0]);
+ next[0] = ni[0].next_index;
+ type[0] = ni[0].tunnel_type;
+
+ b[0]->error = nidx[0] == SPARSE_VEC_INVALID_INDEX ?
+ node->errors[GRE_ERROR_UNKNOWN_PROTOCOL] :
+ node->errors[GRE_ERROR_NONE];
+
+ version[0] = clib_net_to_host_u16 (gre[0]->flags_and_version);
+ version[0] &= GRE_VERSION_MASK;
+
+ b[0]->error =
+ version[0] ? node->errors[GRE_ERROR_UNSUPPORTED_VERSION] : b[0]->error;
+ next[0] = version[0] ? GRE_INPUT_NEXT_DROP : next[0];
+
+ len[0] = vlib_buffer_length_in_chain (vm, b[0]);
+
+ if (is_ipv6)
+ {
+ gre_mk_key6 (&ip6[0]->dst_address, &ip6[0]->src_address,
+ vnet_buffer (b[0])->ip.fib_index, type[0],
+ TUNNEL_MODE_P2P, 0, &key[0].gtk_v6);
+ matched[0] = gre_match_key6 (&cached_key.gtk_v6, &key[0].gtk_v6);
+ }
+ else
+ {
+ gre_mk_key4 (ip4[0]->dst_address, ip4[0]->src_address,
+ vnet_buffer (b[0])->ip.fib_index, type[0],
+ TUNNEL_MODE_P2P, 0, &key[0].gtk_v4);
+ matched[0] = gre_match_key4 (&cached_key.gtk_v4, &key[0].gtk_v4);
+ }
+
+ tun_sw_if_index[0] = cached_tun_sw_if_index;
+ if (PREDICT_FALSE (!matched[0]))
+ gre_tunnel_get (gm, node, b[0], &next[0], &key[0], &cached_key,
+ &tun_sw_if_index[0], &cached_tun_sw_if_index, is_ipv6);
+
+ if (PREDICT_TRUE (next[0] > GRE_INPUT_NEXT_DROP))
+ {
+ vlib_increment_combined_counter (
+ &gm->vnet_main->interface_main
+ .combined_sw_if_counters[VNET_INTERFACE_COUNTER_RX],
+ vm->thread_index, tun_sw_if_index[0], 1 /* packets */,
+ len[0] /* bytes */);
+ vnet_buffer (b[0])->sw_if_index[VLIB_RX] = tun_sw_if_index[0];
+ }
+
+ vnet_buffer (b[0])->sw_if_index[VLIB_TX] = (u32) ~0;
+
+ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ gre_trace (vm, node, b[0], tun_sw_if_index[0], ip6[0], ip4[0],
+ is_ipv6);
+
+ b += 1;
+ next += 1;
+ n_left_from -= 1;
+ }
+
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
+
+ vlib_node_increment_counter (
+ vm, is_ipv6 ? gre6_input_node.index : gre4_input_node.index,
+ GRE_ERROR_PKTS_DECAP, n_left_from);
+
+ return frame->n_vectors;
+}
+
+VLIB_NODE_FN (gre4_input_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
+{
+ return gre_input (vm, node, from_frame, /* is_ip6 */ 0);
+}
+
+VLIB_NODE_FN (gre6_input_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
+{
+ return gre_input (vm, node, from_frame, /* is_ip6 */ 1);
+}
+
+static char *gre_error_strings[] = {
+#define gre_error(n, s) s,
+#include "error.def"
+#undef gre_error
+};
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gre4_input_node) = {
+ .name = "gre4-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .n_errors = GRE_N_ERROR,
+ .error_strings = gre_error_strings,
+
+ .n_next_nodes = GRE_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s, n) [GRE_INPUT_NEXT_##s] = n,
+ foreach_gre_input_next
+#undef _
+ },
+
+ .format_buffer = format_gre_header_with_length,
+ .format_trace = format_gre_rx_trace,
+ .unformat_buffer = unformat_gre_header,
+};
+
+VLIB_REGISTER_NODE (gre6_input_node) = {
+ .name = "gre6-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .runtime_data_bytes = sizeof (gre_input_runtime_t),
+
+ .n_errors = GRE_N_ERROR,
+ .error_strings = gre_error_strings,
+
+ .n_next_nodes = GRE_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s, n) [GRE_INPUT_NEXT_##s] = n,
+ foreach_gre_input_next
+#undef _
+ },
+
+ .format_buffer = format_gre_header_with_length,
+ .format_trace = format_gre_rx_trace,
+ .unformat_buffer = unformat_gre_header,
+};
+/* *INDENT-ON* */
+
+#ifndef CLIB_MARCH_VARIANT
+void
+gre_register_input_protocol (vlib_main_t *vm, gre_protocol_t protocol,
+ u32 node_index, gre_tunnel_type_t tunnel_type)
+{
+ gre_main_t *em = &gre_main;
+ gre_protocol_info_t *pi;
+ next_info_t *n;
+ u32 i;
+
+ {
+ clib_error_t *error = vlib_call_init_function (vm, gre_input_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ pi = gre_get_protocol_info (em, protocol);
+ pi->node_index = node_index;
+ pi->tunnel_type = tunnel_type;
+ pi->next_index = vlib_node_add_next (vm, gre4_input_node.index, node_index);
+ i = vlib_node_add_next (vm, gre6_input_node.index, node_index);
+ ASSERT (i == pi->next_index);
+
+ /* Setup gre protocol -> next index sparse vector mapping. */
+ n = sparse_vec_validate (em->next_by_protocol,
+ clib_host_to_net_u16 (protocol));
+ n->next_index = pi->next_index;
+ n->tunnel_type = tunnel_type;
+}
+
+static void
+gre_setup_node (vlib_main_t *vm, u32 node_index)
+{
+ vlib_node_t *n = vlib_get_node (vm, node_index);
+ pg_node_t *pn = pg_get_node (node_index);
+
+ n->format_buffer = format_gre_header_with_length;
+ n->unformat_buffer = unformat_gre_header;
+ pn->unformat_edit = unformat_pg_gre_header;
+}
+
+static clib_error_t *
+gre_input_init (vlib_main_t *vm)
+{
+ gre_main_t *gm = &gre_main;
+ vlib_node_t *ethernet_input, *ip4_input, *ip6_input, *mpls_unicast_input;
+
+ {
+ clib_error_t *error;
+ error = vlib_call_init_function (vm, gre_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ gre_setup_node (vm, gre4_input_node.index);
+ gre_setup_node (vm, gre6_input_node.index);
+
+ gm->next_by_protocol =
+ sparse_vec_new (/* elt bytes */ sizeof (gm->next_by_protocol[0]),
+ /* bits in index */ BITS (((gre_header_t *) 0)->protocol));
+
+ /* These could be moved to the supported protocol input node defn's */
+ ethernet_input = vlib_get_node_by_name (vm, (u8 *) "ethernet-input");
+ ASSERT (ethernet_input);
+ ip4_input = vlib_get_node_by_name (vm, (u8 *) "ip4-input");
+ ASSERT (ip4_input);
+ ip6_input = vlib_get_node_by_name (vm, (u8 *) "ip6-input");
+ ASSERT (ip6_input);
+ mpls_unicast_input = vlib_get_node_by_name (vm, (u8 *) "mpls-input");
+ ASSERT (mpls_unicast_input);
+
+ gre_register_input_protocol (vm, GRE_PROTOCOL_teb, ethernet_input->index,
+ GRE_TUNNEL_TYPE_TEB);
+
+ gre_register_input_protocol (vm, GRE_PROTOCOL_ip4, ip4_input->index,
+ GRE_TUNNEL_TYPE_L3);
+
+ gre_register_input_protocol (vm, GRE_PROTOCOL_ip6, ip6_input->index,
+ GRE_TUNNEL_TYPE_L3);
+
+ gre_register_input_protocol (vm, GRE_PROTOCOL_mpls_unicast,
+ mpls_unicast_input->index, GRE_TUNNEL_TYPE_L3);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (gre_input_init);
+
+#endif /* CLIB_MARCH_VARIANT */
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gre/pg.c b/src/plugins/gre/pg.c
new file mode 100644
index 00000000000..91c9e487899
--- /dev/null
+++ b/src/plugins/gre/pg.c
@@ -0,0 +1,84 @@
+/*
+ * hdlc_pg.c: packet generator gre interface
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <gre/gre.h>
+
+typedef struct
+{
+ pg_edit_t flags_and_version;
+ pg_edit_t protocol;
+} pg_gre_header_t;
+
+static inline void
+pg_gre_header_init (pg_gre_header_t *e)
+{
+ pg_edit_init (&e->flags_and_version, gre_header_t, flags_and_version);
+ pg_edit_init (&e->protocol, gre_header_t, protocol);
+}
+
+uword
+unformat_pg_gre_header (unformat_input_t *input, va_list *args)
+{
+ pg_stream_t *s = va_arg (*args, pg_stream_t *);
+ pg_gre_header_t *h;
+ u32 group_index, error;
+
+ h = pg_create_edit_group (s, sizeof (h[0]), sizeof (gre_header_t),
+ &group_index);
+ pg_gre_header_init (h);
+
+ pg_edit_set_fixed (&h->flags_and_version, 0);
+
+ error = 1;
+ if (!unformat (input, "%U", unformat_pg_edit,
+ unformat_gre_protocol_net_byte_order, &h->protocol))
+ goto done;
+
+ {
+ gre_main_t *pm = &gre_main;
+ gre_protocol_info_t *pi = 0;
+ pg_node_t *pg_node = 0;
+
+ if (h->protocol.type == PG_EDIT_FIXED)
+ {
+ u16 t = *(u16 *) h->protocol.values[PG_EDIT_LO];
+ pi = gre_get_protocol_info (pm, clib_net_to_host_u16 (t));
+ if (pi && pi->node_index != ~0)
+ pg_node = pg_get_node (pi->node_index);
+ }
+
+ if (pg_node && pg_node->unformat_edit &&
+ unformat_user (input, pg_node->unformat_edit, s))
+ ;
+ }
+
+ error = 0;
+done:
+ if (error)
+ pg_free_edit_group (s);
+ return error == 0;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gre/plugin.c b/src/plugins/gre/plugin.c
new file mode 100644
index 00000000000..b92ec0b6dcd
--- /dev/null
+++ b/src/plugins/gre/plugin.c
@@ -0,0 +1,26 @@
+/*
+ * plugin.c: gre
+ *
+ * Copyright (c) 2023 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/plugin/plugin.h>
+#include <vpp/app/version.h>
+
+// register a plugin
+VLIB_PLUGIN_REGISTER () = {
+ .version = VPP_BUILD_VER,
+ .description = "Generic Routing Encapsulation (GRE) plugin",
+};
diff --git a/src/plugins/nsh/nsh.c b/src/plugins/nsh/nsh.c
index 391fa8dbac5..3c804e9934c 100644
--- a/src/plugins/nsh/nsh.c
+++ b/src/plugins/nsh/nsh.c
@@ -18,7 +18,7 @@
#include <vnet/vnet.h>
#include <vnet/plugin/plugin.h>
#include <nsh/nsh.h>
-#include <vnet/gre/gre.h>
+#include <gre/gre.h>
#include <vxlan/vxlan.h>
#include <vnet/vxlan-gpe/vxlan_gpe.h>
#include <vnet/l2/l2_classify.h>