From a23197980e40d4d9414bcfaf59005a1dc2a89251 Mon Sep 17 00:00:00 2001 From: sreejith Date: Wed, 29 Mar 2017 01:15:02 -0400 Subject: Added vpp intial source code from master branch 17.01.1 Change-Id: I81bdace6f330825a1746a853766779dfb24765fd Signed-off-by: sreejith --- vpp/vnet/vnet/gre/error.def | 23 ++ vpp/vnet/vnet/gre/gre.c | 455 +++++++++++++++++++++++++++++++ vpp/vnet/vnet/gre/gre.h | 235 ++++++++++++++++ vpp/vnet/vnet/gre/interface.c | 606 ++++++++++++++++++++++++++++++++++++++++++ vpp/vnet/vnet/gre/node.c | 531 ++++++++++++++++++++++++++++++++++++ vpp/vnet/vnet/gre/packet.h | 55 ++++ vpp/vnet/vnet/gre/pg.c | 77 ++++++ 7 files changed, 1982 insertions(+) create mode 100644 vpp/vnet/vnet/gre/error.def create mode 100644 vpp/vnet/vnet/gre/gre.c create mode 100644 vpp/vnet/vnet/gre/gre.h create mode 100644 vpp/vnet/vnet/gre/interface.c create mode 100644 vpp/vnet/vnet/gre/node.c create mode 100644 vpp/vnet/vnet/gre/packet.h create mode 100644 vpp/vnet/vnet/gre/pg.c (limited to 'vpp/vnet/vnet/gre') diff --git a/vpp/vnet/vnet/gre/error.def b/vpp/vnet/vnet/gre/error.def new file mode 100644 index 00000000..161ecc1d --- /dev/null +++ b/vpp/vnet/vnet/gre/error.def @@ -0,0 +1,23 @@ +/* + * gre_error.def: gre errors + * + * Copyright (c) 2012 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +gre_error (NONE, "no error") +gre_error (UNKNOWN_PROTOCOL, "unknown protocol") +gre_error (UNSUPPORTED_VERSION, "unsupported version") +gre_error (PKTS_DECAP, "GRE input packets decapsulated") +gre_error (PKTS_ENCAP, "GRE output packets encapsulated") +gre_error (NO_SUCH_TUNNEL, "GRE input packets dropped due to missing tunnel") diff --git a/vpp/vnet/vnet/gre/gre.c b/vpp/vnet/vnet/gre/gre.c new file mode 100644 index 00000000..0faed13e --- /dev/null +++ b/vpp/vnet/vnet/gre/gre.c @@ -0,0 +1,455 @@ +/* + * gre.c: gre + * + * Copyright (c) 2012 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +gre_main_t gre_main; + +typedef struct { + union { + ip4_and_gre_header_t ip4_and_gre; + u64 as_u64[3]; + }; +} ip4_and_gre_union_t; + + +/* Packet trace structure */ +typedef struct { + /* Tunnel-id / index in tunnel vector */ + u32 tunnel_id; + + /* pkt length */ + u32 length; + + /* tunnel ip4 addresses */ + ip4_address_t src; + ip4_address_t dst; +} gre_tx_trace_t; + +u8 * format_gre_tx_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + gre_tx_trace_t * t = va_arg (*args, gre_tx_trace_t *); + + s = format (s, "GRE: tunnel %d len %d src %U dst %U", + t->tunnel_id, clib_net_to_host_u16 (t->length), + format_ip4_address, &t->src.as_u8, + format_ip4_address, &t->dst.as_u8); + return s; +} + +u8 * format_gre_protocol (u8 * s, va_list * args) +{ + gre_protocol_t p = va_arg (*args, u32); + gre_main_t * gm = &gre_main; + gre_protocol_info_t * pi = gre_get_protocol_info (gm, p); + + if (pi) + s = format (s, "%s", pi->name); + else + s = format (s, "0x%04x", p); + + return s; +} + +u8 * format_gre_header_with_length (u8 * s, va_list * args) +{ + gre_main_t * gm = &gre_main; + gre_header_t * h = va_arg (*args, gre_header_t *); + u32 max_header_bytes = va_arg (*args, u32); + gre_protocol_t p = clib_net_to_host_u16 (h->protocol); + uword indent, header_bytes; + + header_bytes = sizeof (h[0]); + if (max_header_bytes != 0 && header_bytes > max_header_bytes) + return format (s, "gre header truncated"); + + indent = format_get_indent (s); + + s = format (s, "GRE %U", format_gre_protocol, p); + + if (max_header_bytes != 0 && header_bytes > max_header_bytes) + { + gre_protocol_info_t * pi = gre_get_protocol_info (gm, p); + vlib_node_t * node = vlib_get_node (gm->vlib_main, pi->node_index); + if (node->format_buffer) + s = format (s, "\n%U%U", + format_white_space, indent, + node->format_buffer, (void *) (h + 1), + max_header_bytes - header_bytes); + } + + return s; +} + +u8 * format_gre_header (u8 * s, va_list * args) +{ + gre_header_t * h = va_arg (*args, gre_header_t *); + return format (s, "%U", format_gre_header_with_length, h, 0); +} + +/* Returns gre protocol as an int in host byte order. */ +uword +unformat_gre_protocol_host_byte_order (unformat_input_t * input, + va_list * args) +{ + u16 * result = va_arg (*args, u16 *); + gre_main_t * gm = &gre_main; + int i; + + /* Named type. */ + if (unformat_user (input, unformat_vlib_number_by_name, + gm->protocol_info_by_name, &i)) + { + gre_protocol_info_t * pi = vec_elt_at_index (gm->protocol_infos, i); + *result = pi->protocol; + return 1; + } + + return 0; +} + +uword +unformat_gre_protocol_net_byte_order (unformat_input_t * input, + va_list * args) +{ + u16 * result = va_arg (*args, u16 *); + if (! unformat_user (input, unformat_gre_protocol_host_byte_order, result)) + return 0; + *result = clib_host_to_net_u16 ((u16) *result); + return 1; +} + +uword +unformat_gre_header (unformat_input_t * input, va_list * args) +{ + u8 ** result = va_arg (*args, u8 **); + gre_header_t _h, * h = &_h; + u16 p; + + if (! unformat (input, "%U", + unformat_gre_protocol_host_byte_order, &p)) + return 0; + + h->protocol = clib_host_to_net_u16 (p); + + /* Add header to result. */ + { + void * p; + u32 n_bytes = sizeof (h[0]); + + vec_add2 (*result, p, n_bytes); + clib_memcpy (p, h, n_bytes); + } + + return 1; +} + +static int +gre_proto_from_vnet_link (vnet_link_t link) +{ + switch (link) + { + case VNET_LINK_IP4: + return (GRE_PROTOCOL_ip4); + case VNET_LINK_IP6: + return (GRE_PROTOCOL_ip6); + case VNET_LINK_MPLS: + return (GRE_PROTOCOL_mpls_unicast); + case VNET_LINK_ETHERNET: + return (GRE_PROTOCOL_teb); + case VNET_LINK_ARP: + return (GRE_PROTOCOL_arp); + } + ASSERT(0); + return (GRE_PROTOCOL_ip4); +} + +static u8* +gre_build_rewrite (vnet_main_t * vnm, + u32 sw_if_index, + vnet_link_t link_type, + const void *dst_address) +{ + gre_main_t * gm = &gre_main; + ip4_and_gre_header_t * h; + u8* rewrite = NULL; + gre_tunnel_t *t; + u32 ti; + + ti = gm->tunnel_index_by_sw_if_index[sw_if_index]; + + if (~0 == ti) + /* not one of ours */ + return (0); + + t = pool_elt_at_index(gm->tunnels, ti); + + vec_validate(rewrite, sizeof(*h)-1); + h = (ip4_and_gre_header_t*)rewrite; + h->gre.protocol = clib_host_to_net_u16(gre_proto_from_vnet_link(link_type)); + + h->ip4.ip_version_and_header_length = 0x45; + h->ip4.ttl = 254; + h->ip4.protocol = IP_PROTOCOL_GRE; + /* fixup ip4 header length and checksum after-the-fact */ + h->ip4.src_address.as_u32 = t->tunnel_src.as_u32; + h->ip4.dst_address.as_u32 = t->tunnel_dst.as_u32; + h->ip4.checksum = ip4_header_checksum (&h->ip4); + + return (rewrite); +} + +void +gre_fixup (vlib_main_t *vm, + ip_adjacency_t *adj, + vlib_buffer_t *b0) +{ + ip4_header_t * ip0; + + ip0 = vlib_buffer_get_current (b0); + + /* Fixup the checksum and len fields in the GRE tunnel encap + * that was applied at the midchain node */ + ip0->length = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)); + ip0->checksum = ip4_header_checksum (ip0); +} + +void +gre_update_adj (vnet_main_t * vnm, + u32 sw_if_index, + adj_index_t ai) +{ + adj_nbr_midchain_update_rewrite (ai, gre_fixup, + ADJ_MIDCHAIN_FLAG_NONE, + gre_build_rewrite(vnm, sw_if_index, + adj_get_link_type(ai), + NULL)); + + gre_tunnel_stack(ai); +} + +/** + * @brief TX function. Only called L2. L3 traffic uses the adj-midchains + */ +static uword +gre_interface_tx_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + gre_main_t * gm = &gre_main; + u32 next_index; + u32 * from, * to_next, n_left_from, n_left_to_next; + vnet_interface_output_runtime_t * rd = (void *) node->runtime_data; + const gre_tunnel_t *gt = pool_elt_at_index (gm->tunnels, rd->dev_instance); + + /* Vector of buffer / pkt indices we're supposed to process */ + from = vlib_frame_vector_args (frame); + + /* Number of buffers / pkts */ + n_left_from = frame->n_vectors; + + /* Speculatively send the first buffer to the last disposition we used */ + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + /* set up to enqueue to our disposition with index = next_index */ + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + /* + * FIXME DUAL LOOP + */ + + while (n_left_from > 0 && n_left_to_next > 0) + { + vlib_buffer_t * b0; + u32 bi0; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer(vm, bi0); + + vnet_buffer(b0)->ip.adj_index[VLIB_TX] = gt->l2_adj_index; + + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + gre_tx_trace_t *tr = vlib_add_trace (vm, node, + b0, sizeof (*tr)); + tr->tunnel_id = gt - gm->tunnels; + tr->length = vlib_buffer_length_in_chain (vm, b0); + tr->src.as_u32 = gt->tunnel_src.as_u32; + tr->dst.as_u32 = gt->tunnel_src.as_u32; + } + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + bi0, gt->l2_tx_arc); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + vlib_node_increment_counter (vm, gre_input_node.index, + GRE_ERROR_PKTS_ENCAP, frame->n_vectors); + + return frame->n_vectors; +} + +static uword +gre_interface_tx (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + return (gre_interface_tx_inline (vm, node, frame)); +} + +static uword +gre_teb_interface_tx (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + return (gre_interface_tx_inline (vm, node, frame)); +} + +static u8 * format_gre_tunnel_name (u8 * s, va_list * args) +{ + u32 dev_instance = va_arg (*args, u32); + return format (s, "gre%d", dev_instance); +} + +static u8 * format_gre_tunnel_teb_name (u8 * s, va_list * args) +{ + u32 dev_instance = va_arg (*args, u32); + return format (s, "teb-gre%d", dev_instance); +} + +static u8 * format_gre_device (u8 * s, va_list * args) +{ + u32 dev_instance = va_arg (*args, u32); + CLIB_UNUSED (int verbose) = va_arg (*args, int); + + s = format (s, "GRE tunnel: id %d\n", dev_instance); + return s; +} + +VNET_DEVICE_CLASS (gre_device_class) = { + .name = "GRE tunnel device", + .format_device_name = format_gre_tunnel_name, + .format_device = format_gre_device, + .format_tx_trace = format_gre_tx_trace, + .tx_function = gre_interface_tx, + .admin_up_down_function = gre_interface_admin_up_down, +#ifdef SOON + .clear counter = 0; +#endif +}; + +VLIB_DEVICE_TX_FUNCTION_MULTIARCH (gre_device_class, + gre_interface_tx) + +VNET_DEVICE_CLASS (gre_device_teb_class) = { + .name = "GRE TEB tunnel device", + .format_device_name = format_gre_tunnel_teb_name, + .format_device = format_gre_device, + .format_tx_trace = format_gre_tx_trace, + .tx_function = gre_teb_interface_tx, + .admin_up_down_function = gre_interface_admin_up_down, +#ifdef SOON + .clear counter = 0; +#endif +}; + +VLIB_DEVICE_TX_FUNCTION_MULTIARCH (gre_device_teb_class, + gre_teb_interface_tx) + +VNET_HW_INTERFACE_CLASS (gre_hw_interface_class) = { + .name = "GRE", + .format_header = format_gre_header_with_length, + .unformat_header = unformat_gre_header, + .build_rewrite = gre_build_rewrite, + .update_adjacency = gre_update_adj, + .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P, +}; + +static void add_protocol (gre_main_t * gm, + gre_protocol_t protocol, + char * protocol_name) +{ + gre_protocol_info_t * pi; + u32 i; + + vec_add2 (gm->protocol_infos, pi, 1); + i = pi - gm->protocol_infos; + + pi->name = protocol_name; + pi->protocol = protocol; + pi->next_index = pi->node_index = ~0; + + hash_set (gm->protocol_info_by_protocol, protocol, i); + hash_set_mem (gm->protocol_info_by_name, pi->name, i); +} + +static clib_error_t * gre_init (vlib_main_t * vm) +{ + gre_main_t * gm = &gre_main; + clib_error_t * error; + ip_main_t * im = &ip_main; + ip_protocol_info_t * pi; + + memset (gm, 0, sizeof (gm[0])); + gm->vlib_main = vm; + gm->vnet_main = vnet_get_main(); + + if ((error = vlib_call_init_function (vm, ip_main_init))) + return error; + + if ((error = vlib_call_init_function (vm, ip4_lookup_init))) + return error; + + /* Set up the ip packet generator */ + pi = ip_get_protocol_info (im, IP_PROTOCOL_GRE); + pi->format_header = format_gre_header; + pi->unformat_pg_edit = unformat_pg_gre_header; + + gm->protocol_info_by_name = hash_create_string (0, sizeof (uword)); + gm->protocol_info_by_protocol = hash_create (0, sizeof (uword)); + gm->tunnel_by_key = hash_create (0, sizeof (uword)); + +#define _(n,s) add_protocol (gm, GRE_PROTOCOL_##s, #s); + foreach_gre_protocol +#undef _ + + return vlib_call_init_function (vm, gre_input_init); +} + +VLIB_INIT_FUNCTION (gre_init); + +gre_main_t * gre_get_main (vlib_main_t * vm) +{ + vlib_call_init_function (vm, gre_init); + return &gre_main; +} + diff --git a/vpp/vnet/vnet/gre/gre.h b/vpp/vnet/vnet/gre/gre.h new file mode 100644 index 00000000..b6544b9b --- /dev/null +++ b/vpp/vnet/vnet/gre/gre.h @@ -0,0 +1,235 @@ +/* + * gre.h: types/functions for gre. + * + * Copyright (c) 2012 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef included_gre_h +#define included_gre_h + +#include +#include +#include +#include +#include +#include +#include +#include + +extern vnet_hw_interface_class_t gre_hw_interface_class; + +typedef enum { +#define gre_error(n,s) GRE_ERROR_##n, +#include +#undef gre_error + GRE_N_ERROR, +} gre_error_t; + +/** + * A GRE payload protocol registration + */ +typedef struct { + /** Name (a c string). */ + char * name; + + /** GRE protocol type in host byte order. */ + gre_protocol_t protocol; + + /** Node which handles this type. */ + u32 node_index; + + /** Next index for this type. */ + u32 next_index; +} gre_protocol_info_t; + +/** + * @brief The GRE tunnel type + */ +typedef enum gre_tunnel_tyoe_t_ +{ + /** + * L3 GRE (i.e. this tunnel is in L3 mode) + */ + GRE_TUNNEL_TYPE_L3, + /** + * Transparent Ethernet Bridging - the tunnel is in L2 mode + */ + GRE_TUNNEL_TYPE_TEB, +} gre_tunnel_type_t; + +#define GRE_TUNNEL_TYPE_NAMES { \ + [GRE_TUNNEL_TYPE_L3] = "L3", \ + [GRE_TUNNEL_TYPE_TEB] = "TEB", \ +} + +#define GRE_TUNNEL_N_TYPES ((gre_tunnel_type_t)GRE_TUNNEL_TYPE_TEB+1) + +/** + * @brief A representation of a GRE tunnel + */ +typedef struct { + /** + * Linkage into the FIB object graph + */ + fib_node_t node; + + /** + * The tunnel's source/local address + */ + ip4_address_t tunnel_src; + /** + * The tunnel's destination/remote address + */ + ip4_address_t tunnel_dst; + /** + * The FIB in which the src.dst address are present + */ + u32 outer_fib_index; + u32 hw_if_index; + u32 sw_if_index; + gre_tunnel_type_t type; + + /** + * The FIB entry sourced by the tunnel for its destination prefix + */ + fib_node_index_t fib_entry_index; + + /** + * The tunnel is a child of the FIB entry for its desintion. This is + * so it receives updates when the forwarding information for that entry + * changes. + * The tunnels sibling index on the FIB entry's dependency list. + */ + u32 sibling_index; + + /** + * on a L2 tunnel this is the VLIB arc from the L2-tx to the l2-midchain + */ + u32 l2_tx_arc; + + /** + * an L2 tunnel always rquires an L2 midchain. cache here for DP. + */ + adj_index_t l2_adj_index; +} gre_tunnel_t; + +/** + * @brief GRE related global data + */ +typedef struct { + /** + * pool of tunnel instances + */ + gre_tunnel_t *tunnels; + + /** + * GRE payload protocol registrations + */ + gre_protocol_info_t * protocol_infos; + + /** + * Hash tables mapping name/protocol to protocol info index. + */ + uword * protocol_info_by_name, * protocol_info_by_protocol; + /** + * Hash mapping src/dst addr pair to tunnel + */ + uword * tunnel_by_key; + + /** + * Free vlib hw_if_indices. + * A free list per-tunnel type since the interfaces ctreated are fo different + * types and we cannot change the type. + */ + u32 * free_gre_tunnel_hw_if_indices[GRE_TUNNEL_N_TYPES]; + + /** + * Mapping from sw_if_index to tunnel index + */ + u32 * tunnel_index_by_sw_if_index; + + /* convenience */ + vlib_main_t * vlib_main; + vnet_main_t * vnet_main; +} gre_main_t; + +/** + * @brief IPv4 and GRE header. + */ +typedef CLIB_PACKED (struct { + ip4_header_t ip4; + gre_header_t gre; +}) ip4_and_gre_header_t; + +always_inline gre_protocol_info_t * +gre_get_protocol_info (gre_main_t * em, gre_protocol_t protocol) +{ + uword * p = hash_get (em->protocol_info_by_protocol, protocol); + return p ? vec_elt_at_index (em->protocol_infos, p[0]) : 0; +} + +gre_main_t gre_main; + +/* Register given node index to take input for given gre type. */ +void +gre_register_input_type (vlib_main_t * vm, + gre_protocol_t protocol, + u32 node_index); + +extern clib_error_t * gre_interface_admin_up_down (vnet_main_t * vnm, + u32 hw_if_index, + u32 flags); + +extern void gre_tunnel_stack (adj_index_t ai); +extern void gre_update_adj (vnet_main_t * vnm, + u32 sw_if_index, + adj_index_t ai); + +format_function_t format_gre_protocol; +format_function_t format_gre_header; +format_function_t format_gre_header_with_length; + +extern vlib_node_registration_t gre_input_node; +extern vnet_device_class_t gre_device_class; +extern vnet_device_class_t gre_device_teb_class; + +/* Parse gre protocol as 0xXXXX or protocol name. + In either host or network byte order. */ +unformat_function_t unformat_gre_protocol_host_byte_order; +unformat_function_t unformat_gre_protocol_net_byte_order; + +/* Parse gre header. */ +unformat_function_t unformat_gre_header; +unformat_function_t unformat_pg_gre_header; + +void +gre_register_input_protocol (vlib_main_t * vm, + gre_protocol_t protocol, + u32 node_index); + +/* manually added to the interface output node in gre.c */ +#define GRE_OUTPUT_NEXT_LOOKUP 1 + +typedef struct { + u8 is_add; + + ip4_address_t src, dst; + u32 outer_fib_id; + u8 teb; +} vnet_gre_add_del_tunnel_args_t; + +int vnet_gre_add_del_tunnel + (vnet_gre_add_del_tunnel_args_t *a, u32 * sw_if_indexp); + +#endif /* included_gre_h */ diff --git a/vpp/vnet/vnet/gre/interface.c b/vpp/vnet/vnet/gre/interface.c new file mode 100644 index 00000000..d624587d --- /dev/null +++ b/vpp/vnet/vnet/gre/interface.c @@ -0,0 +1,606 @@ +/* + * gre_interface.c: gre interfaces + * + * Copyright (c) 2012 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +static const char *gre_tunnel_type_names[] = GRE_TUNNEL_TYPE_NAMES; + +static inline u64 +gre_mk_key (const ip4_address_t *src, + const ip4_address_t *dst, + u32 out_fib_index) +{ + // FIXME. the fib index should be part of the key + return ((u64)src->as_u32 << 32 | (u64)dst->as_u32); +} + +static u8 * +format_gre_tunnel_type (u8 * s, va_list * args) +{ + gre_tunnel_type_t type = va_arg (*args, gre_tunnel_type_t); + + return (format(s, "%s", gre_tunnel_type_names[type])); +} + +static u8 * +format_gre_tunnel (u8 * s, va_list * args) +{ + gre_tunnel_t * t = va_arg (*args, gre_tunnel_t *); + gre_main_t * gm = &gre_main; + + s = format (s, + "[%d] %U (src) %U (dst) payload %U outer_fib_index %d", + t - gm->tunnels, + format_ip4_address, &t->tunnel_src, + format_ip4_address, &t->tunnel_dst, + format_gre_tunnel_type, t->type, + t->outer_fib_index); + + return s; +} + +static gre_tunnel_t * +gre_tunnel_db_find (const ip4_address_t *src, + const ip4_address_t *dst, + u32 out_fib_index) +{ + gre_main_t * gm = &gre_main; + uword * p; + u64 key; + + key = gre_mk_key(src, dst, out_fib_index); + + p = hash_get (gm->tunnel_by_key, key); + + if (NULL == p) + return (NULL); + + return (pool_elt_at_index (gm->tunnels, p[0])); +} + +static void +gre_tunnel_db_add (const gre_tunnel_t *t) +{ + gre_main_t * gm = &gre_main; + u64 key; + + key = gre_mk_key(&t->tunnel_src, &t->tunnel_dst, t->outer_fib_index); + hash_set (gm->tunnel_by_key, key, t - gm->tunnels); +} + +static void +gre_tunnel_db_remove (const gre_tunnel_t *t) +{ + gre_main_t * gm = &gre_main; + u64 key; + + key = gre_mk_key(&t->tunnel_src, &t->tunnel_dst, t->outer_fib_index); + hash_unset (gm->tunnel_by_key, key); +} + +static gre_tunnel_t * +gre_tunnel_from_fib_node (fib_node_t *node) +{ +#if (CLIB_DEBUG > 0) + ASSERT(FIB_NODE_TYPE_GRE_TUNNEL == node->fn_type); +#endif + return ((gre_tunnel_t*) (((char*)node) - + STRUCT_OFFSET_OF(gre_tunnel_t, node))); +} + +/** + * gre_tunnel_stack + * + * 'stack' (resolve the recursion for) the tunnel's midchain adjacency + */ +void +gre_tunnel_stack (adj_index_t ai) +{ + gre_main_t * gm = &gre_main; + ip_adjacency_t *adj; + gre_tunnel_t *gt; + u32 sw_if_index; + + adj = adj_get(ai); + sw_if_index = adj->rewrite_header.sw_if_index; + + if ((vec_len(gm->tunnel_index_by_sw_if_index) < sw_if_index) || + (~0 == gm->tunnel_index_by_sw_if_index[sw_if_index])) + return; + + gt = pool_elt_at_index(gm->tunnels, + gm->tunnel_index_by_sw_if_index[sw_if_index]); + + /* + * find the adjacency that is contributed by the FIB entry + * that this tunnel resovles via, and use it as the next adj + * in the midchain + */ + if (vnet_hw_interface_get_flags(vnet_get_main(), + gt->hw_if_index) & + VNET_HW_INTERFACE_FLAG_LINK_UP) + { + adj_nbr_midchain_stack( + ai, + fib_entry_contribute_ip_forwarding(gt->fib_entry_index)); + } + else + { + adj_nbr_midchain_unstack(ai); + } +} + +/** + * @brief Call back when restacking all adjacencies on a GRE interface + */ +static adj_walk_rc_t +gre_adj_walk_cb (adj_index_t ai, + void *ctx) +{ + gre_tunnel_stack(ai); + + return (ADJ_WALK_RC_CONTINUE); +} + +static void +gre_tunnel_restack (gre_tunnel_t *gt) +{ + fib_protocol_t proto; + + /* + * walk all the adjacencies on th GRE interface and restack them + */ + FOR_EACH_FIB_IP_PROTOCOL(proto) + { + adj_nbr_walk(gt->sw_if_index, + proto, + gre_adj_walk_cb, + NULL); + } +} + +/** + * Function definition to backwalk a FIB node + */ +static fib_node_back_walk_rc_t +gre_tunnel_back_walk (fib_node_t *node, + fib_node_back_walk_ctx_t *ctx) +{ + gre_tunnel_restack(gre_tunnel_from_fib_node(node)); + + return (FIB_NODE_BACK_WALK_CONTINUE); +} + +/** + * Function definition to get a FIB node from its index + */ +static fib_node_t* +gre_tunnel_fib_node_get (fib_node_index_t index) +{ + gre_tunnel_t * gt; + gre_main_t * gm; + + gm = &gre_main; + gt = pool_elt_at_index(gm->tunnels, index); + + return (>->node); +} + +/** + * Function definition to inform the FIB node that its last lock has gone. + */ +static void +gre_tunnel_last_lock_gone (fib_node_t *node) +{ + /* + * The MPLS GRE tunnel is a root of the graph. As such + * it never has children and thus is never locked. + */ + ASSERT(0); +} + +/* + * Virtual function table registered by MPLS GRE tunnels + * for participation in the FIB object graph. + */ +const static fib_node_vft_t gre_vft = { + .fnv_get = gre_tunnel_fib_node_get, + .fnv_last_lock = gre_tunnel_last_lock_gone, + .fnv_back_walk = gre_tunnel_back_walk, +}; + +static int +vnet_gre_tunnel_add (vnet_gre_add_del_tunnel_args_t *a, + u32 * sw_if_indexp) +{ + gre_main_t * gm = &gre_main; + vnet_main_t * vnm = gm->vnet_main; + ip4_main_t * im = &ip4_main; + gre_tunnel_t * t; + vnet_hw_interface_t * hi; + u32 hw_if_index, sw_if_index; + u32 outer_fib_index; + u8 address[6]; + clib_error_t *error; + + outer_fib_index = ip4_fib_index_from_table_id(a->outer_fib_id); + + if (~0 == outer_fib_index) + return VNET_API_ERROR_NO_SUCH_FIB; + + t = gre_tunnel_db_find(&a->src, &a->dst, a->outer_fib_id); + + if (NULL != t) + return VNET_API_ERROR_INVALID_VALUE; + + pool_get_aligned (gm->tunnels, t, CLIB_CACHE_LINE_BYTES); + memset (t, 0, sizeof (*t)); + fib_node_init(&t->node, FIB_NODE_TYPE_GRE_TUNNEL); + + if (a->teb) + t->type = GRE_TUNNEL_TYPE_TEB; + else + t->type = GRE_TUNNEL_TYPE_L3; + + if (vec_len (gm->free_gre_tunnel_hw_if_indices[t->type]) > 0) { + vnet_interface_main_t * im = &vnm->interface_main; + + hw_if_index = gm->free_gre_tunnel_hw_if_indices[t->type] + [vec_len (gm->free_gre_tunnel_hw_if_indices[t->type])-1]; + _vec_len (gm->free_gre_tunnel_hw_if_indices[t->type]) -= 1; + + hi = vnet_get_hw_interface (vnm, hw_if_index); + hi->dev_instance = t - gm->tunnels; + hi->hw_instance = hi->dev_instance; + + /* clear old stats of freed tunnel before reuse */ + sw_if_index = hi->sw_if_index; + vnet_interface_counter_lock(im); + vlib_zero_combined_counter + (&im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_TX], sw_if_index); + vlib_zero_combined_counter + (&im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_RX], sw_if_index); + vlib_zero_simple_counter + (&im->sw_if_counters[VNET_INTERFACE_COUNTER_DROP], sw_if_index); + vnet_interface_counter_unlock(im); + if (GRE_TUNNEL_TYPE_TEB == t->type) + { + t->l2_tx_arc = vlib_node_add_named_next(vlib_get_main(), + hi->tx_node_index, + "adj-l2-midchain"); + } + } else { + if (GRE_TUNNEL_TYPE_TEB == t->type) + { + /* Default MAC address (d00b:eed0:0000 + sw_if_index) */ + memset (address, 0, sizeof (address)); + address[0] = 0xd0; + address[1] = 0x0b; + address[2] = 0xee; + address[3] = 0xd0; + address[4] = t - gm->tunnels; + + error = ethernet_register_interface(vnm, + gre_device_teb_class.index, + t - gm->tunnels, address, + &hw_if_index, + 0); + + if (error) + { + clib_error_report (error); + return VNET_API_ERROR_INVALID_REGISTRATION; + } + hi = vnet_get_hw_interface (vnm, hw_if_index); + + t->l2_tx_arc = vlib_node_add_named_next(vlib_get_main(), + hi->tx_node_index, + "adj-l2-midchain"); + } else { + hw_if_index = vnet_register_interface(vnm, + gre_device_class.index, + t - gm->tunnels, + gre_hw_interface_class.index, + t - gm->tunnels); + } + hi = vnet_get_hw_interface (vnm, hw_if_index); + sw_if_index = hi->sw_if_index; + } + + t->hw_if_index = hw_if_index; + t->outer_fib_index = outer_fib_index; + t->sw_if_index = sw_if_index; + t->l2_adj_index = ADJ_INDEX_INVALID; + + vec_validate_init_empty (gm->tunnel_index_by_sw_if_index, sw_if_index, ~0); + gm->tunnel_index_by_sw_if_index[sw_if_index] = t - gm->tunnels; + + vec_validate (im->fib_index_by_sw_if_index, sw_if_index); + + hi->min_packet_bytes = 64 + sizeof (gre_header_t) + sizeof (ip4_header_t); + hi->per_packet_overhead_bytes = + /* preamble */ 8 + /* inter frame gap */ 12; + + /* Standard default gre MTU. */ + hi->max_l3_packet_bytes[VLIB_RX] = hi->max_l3_packet_bytes[VLIB_TX] = 9000; + + clib_memcpy (&t->tunnel_src, &a->src, sizeof (t->tunnel_src)); + clib_memcpy (&t->tunnel_dst, &a->dst, sizeof (t->tunnel_dst)); + + gre_tunnel_db_add(t); + + /* + * source the FIB entry for the tunnel's destination + * and become a child thereof. The tunnel will then get poked + * when the forwarding for the entry updates, and the tunnel can + * re-stack accordingly + */ + const fib_prefix_t tun_dst_pfx = { + .fp_len = 32, + .fp_proto = FIB_PROTOCOL_IP4, + .fp_addr = { + .ip4 = t->tunnel_dst, + } + }; + + t->fib_entry_index = + fib_table_entry_special_add(outer_fib_index, + &tun_dst_pfx, + FIB_SOURCE_RR, + FIB_ENTRY_FLAG_NONE, + ADJ_INDEX_INVALID); + t->sibling_index = + fib_entry_child_add(t->fib_entry_index, + FIB_NODE_TYPE_GRE_TUNNEL, + t - gm->tunnels); + + clib_memcpy (&t->tunnel_src, &a->src, sizeof (t->tunnel_src)); + clib_memcpy (&t->tunnel_dst, &a->dst, sizeof (t->tunnel_dst)); + + if (GRE_TUNNEL_TYPE_TEB == t->type) + { + t->l2_adj_index = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4, + VNET_LINK_ETHERNET, + &zero_addr, + sw_if_index); + gre_update_adj(vnm, t->sw_if_index, t->l2_adj_index); + } + + if (sw_if_indexp) + *sw_if_indexp = sw_if_index; + + return 0; +} + +static int +vnet_gre_tunnel_delete (vnet_gre_add_del_tunnel_args_t *a, + u32 * sw_if_indexp) +{ + gre_main_t * gm = &gre_main; + vnet_main_t * vnm = gm->vnet_main; + gre_tunnel_t * t; + u32 sw_if_index; + + t = gre_tunnel_db_find(&a->src, &a->dst, a->outer_fib_id); + + if (NULL == t) + return VNET_API_ERROR_NO_SUCH_ENTRY; + + sw_if_index = t->sw_if_index; + vnet_sw_interface_set_flags (vnm, sw_if_index, 0 /* down */); + /* make sure tunnel is removed from l2 bd or xconnect */ + set_int_l2_mode(gm->vlib_main, vnm, MODE_L3, sw_if_index, 0, 0, 0, 0); + vec_add1 (gm->free_gre_tunnel_hw_if_indices[t->type], t->hw_if_index); + gm->tunnel_index_by_sw_if_index[sw_if_index] = ~0; + + if (GRE_TUNNEL_TYPE_TEB == t->type) + adj_unlock(t->l2_adj_index); + + if (t->l2_adj_index != ADJ_INDEX_INVALID) + adj_unlock(t->l2_adj_index); + + fib_entry_child_remove(t->fib_entry_index, + t->sibling_index); + fib_table_entry_delete_index(t->fib_entry_index, + FIB_SOURCE_RR); + + gre_tunnel_db_remove(t); + fib_node_deinit(&t->node); + pool_put (gm->tunnels, t); + + if (sw_if_indexp) + *sw_if_indexp = sw_if_index; + + return 0; +} + +int +vnet_gre_add_del_tunnel (vnet_gre_add_del_tunnel_args_t *a, + u32 * sw_if_indexp) +{ + if (a->is_add) + return (vnet_gre_tunnel_add(a, sw_if_indexp)); + else + return (vnet_gre_tunnel_delete(a, sw_if_indexp)); +} + +clib_error_t * +gre_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags) +{ + gre_main_t * gm = &gre_main; + vnet_hw_interface_t * hi; + gre_tunnel_t *t; + u32 ti; + + hi = vnet_get_hw_interface (vnm, hw_if_index); + + if (NULL == gm->tunnel_index_by_sw_if_index || + hi->sw_if_index >= vec_len(gm->tunnel_index_by_sw_if_index)) + return (NULL); + + ti = gm->tunnel_index_by_sw_if_index[hi->sw_if_index]; + + if (~0 == ti) + /* not one of ours */ + return (NULL); + + t = pool_elt_at_index(gm->tunnels, ti); + + if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) + vnet_hw_interface_set_flags (vnm, hw_if_index, VNET_HW_INTERFACE_FLAG_LINK_UP); + else + vnet_hw_interface_set_flags (vnm, hw_if_index, 0 /* down */); + + gre_tunnel_restack(t); + + return /* no error */ 0; +} + +static clib_error_t * +create_gre_tunnel_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, * line_input = &_line_input; + vnet_gre_add_del_tunnel_args_t _a, * a = &_a; + ip4_address_t src, dst; + u32 outer_fib_id = 0; + u8 teb = 0; + int rv; + u32 num_m_args = 0; + u8 is_add = 1; + u32 sw_if_index; + + /* Get a line of input. */ + if (! unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { + if (unformat (line_input, "del")) + is_add = 0; + else if (unformat (line_input, "src %U", unformat_ip4_address, &src)) + num_m_args++; + else if (unformat (line_input, "dst %U", unformat_ip4_address, &dst)) + num_m_args++; + else if (unformat (line_input, "outer-fib-id %d", &outer_fib_id)) + ; + else if (unformat (line_input, "teb")) + teb = 1; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + unformat_free (line_input); + + if (num_m_args < 2) + return clib_error_return (0, "mandatory argument(s) missing"); + + if (memcmp (&src, &dst, sizeof(src)) == 0) + return clib_error_return (0, "src and dst are identical"); + + memset (a, 0, sizeof (*a)); + a->outer_fib_id = outer_fib_id; + a->teb = teb; + clib_memcpy(&a->src, &src, sizeof(src)); + clib_memcpy(&a->dst, &dst, sizeof(dst)); + + if (is_add) + rv = vnet_gre_tunnel_add(a, &sw_if_index); + else + rv = vnet_gre_tunnel_delete(a, &sw_if_index); + + switch(rv) + { + case 0: + vlib_cli_output(vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main(), sw_if_index); + break; + case VNET_API_ERROR_INVALID_VALUE: + return clib_error_return (0, "GRE tunnel already exists..."); + case VNET_API_ERROR_NO_SUCH_FIB: + return clib_error_return (0, "outer fib ID %d doesn't exist\n", + outer_fib_id); + default: + return clib_error_return (0, "vnet_gre_add_del_tunnel returned %d", rv); + } + + return 0; +} + +VLIB_CLI_COMMAND (create_gre_tunnel_command, static) = { + .path = "create gre tunnel", + .short_help = "create gre tunnel src dst " + "[outer-fib-id ] [teb] [del]", + .function = create_gre_tunnel_command_fn, +}; + +static clib_error_t * +show_gre_tunnel_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + gre_main_t * gm = &gre_main; + gre_tunnel_t * t; + u32 ti = ~0; + + if (pool_elts (gm->tunnels) == 0) + vlib_cli_output (vm, "No GRE tunnels configured..."); + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "%d", &ti)) + ; + else + break; + } + + if (~0 == ti) + { + pool_foreach (t, gm->tunnels, + ({ + vlib_cli_output (vm, "%U", format_gre_tunnel, t); + })); + } + else + { + t = pool_elt_at_index(gm->tunnels, ti); + + vlib_cli_output (vm, "%U", format_gre_tunnel, t); + } + + return 0; +} + +VLIB_CLI_COMMAND (show_gre_tunnel_command, static) = { + .path = "show gre tunnel", + .function = show_gre_tunnel_command_fn, +}; + +/* force inclusion from application's main.c */ +clib_error_t *gre_interface_init (vlib_main_t *vm) +{ + fib_node_register_type(FIB_NODE_TYPE_GRE_TUNNEL, &gre_vft); + + return 0; +} +VLIB_INIT_FUNCTION(gre_interface_init); diff --git a/vpp/vnet/vnet/gre/node.c b/vpp/vnet/vnet/gre/node.c new file mode 100644 index 00000000..86f7a6ee --- /dev/null +++ b/vpp/vnet/vnet/gre/node.c @@ -0,0 +1,531 @@ +/* + * node.c: gre packet processing + * + * Copyright (c) 2012 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#define foreach_gre_input_next \ +_(PUNT, "error-punt") \ +_(DROP, "error-drop") \ +_(ETHERNET_INPUT, "ethernet-input") \ +_(IP4_INPUT, "ip4-input") \ +_(IP6_INPUT, "ip6-input") \ +_(MPLS_INPUT, "mpls-input") + +typedef enum { +#define _(s,n) GRE_INPUT_NEXT_##s, + foreach_gre_input_next +#undef _ + GRE_INPUT_N_NEXT, +} gre_input_next_t; + +typedef struct { + u32 tunnel_id; + u32 length; + ip4_address_t src; + ip4_address_t dst; +} gre_rx_trace_t; + +u8 * format_gre_rx_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + gre_rx_trace_t * t = va_arg (*args, gre_rx_trace_t *); + + s = format (s, "GRE: tunnel %d len %d src %U dst %U", + t->tunnel_id, clib_net_to_host_u16(t->length), + format_ip4_address, &t->src.as_u8, + format_ip4_address, &t->dst.as_u8); + return s; +} + +typedef struct { + /* Sparse vector mapping gre protocol in network byte order + to next index. */ + u16 * next_by_protocol; +} gre_input_runtime_t; + +static uword +gre_input (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + gre_main_t * gm = &gre_main; + gre_input_runtime_t * rt = (void *) node->runtime_data; + __attribute__((unused)) u32 n_left_from, next_index, * from, * to_next; + u64 cached_tunnel_key = (u64) ~0; + u32 cached_tunnel_sw_if_index = 0, tunnel_sw_if_index = 0; + + u32 cpu_index = os_get_cpu_number(); + u32 len; + vnet_interface_main_t *im = &gm->vnet_main->interface_main; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, + to_next, n_left_to_next); + + while (n_left_from >= 4 && n_left_to_next >= 2) + { + u32 bi0, bi1; + vlib_buffer_t * b0, * b1; + gre_header_t * h0, * h1; + u16 version0, version1; + int verr0, verr1; + u32 i0, i1, next0, next1, protocol0, protocol1; + ip4_header_t *ip0, *ip1; + + /* Prefetch next iteration. */ + { + vlib_buffer_t * p2, * p3; + + p2 = vlib_get_buffer (vm, from[2]); + p3 = vlib_get_buffer (vm, from[3]); + + vlib_prefetch_buffer_header (p2, LOAD); + vlib_prefetch_buffer_header (p3, LOAD); + + CLIB_PREFETCH (p2->data, sizeof (h0[0]), LOAD); + CLIB_PREFETCH (p3->data, sizeof (h1[0]), LOAD); + } + + bi0 = from[0]; + bi1 = from[1]; + to_next[0] = bi0; + to_next[1] = bi1; + from += 2; + to_next += 2; + n_left_to_next -= 2; + n_left_from -= 2; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + + /* ip4_local hands us the ip header, not the gre header */ + ip0 = vlib_buffer_get_current (b0); + ip1 = vlib_buffer_get_current (b1); + + /* Save src + dst ip4 address, e.g. for mpls-o-gre */ + vnet_buffer(b0)->gre.src = ip0->src_address.as_u32; + vnet_buffer(b0)->gre.dst = ip0->dst_address.as_u32; + vnet_buffer(b1)->gre.src = ip1->src_address.as_u32; + vnet_buffer(b1)->gre.dst = ip1->dst_address.as_u32; + + vlib_buffer_advance (b0, sizeof (*ip0)); + vlib_buffer_advance (b1, sizeof (*ip1)); + + h0 = vlib_buffer_get_current (b0); + h1 = vlib_buffer_get_current (b1); + + /* Index sparse array with network byte order. */ + protocol0 = h0->protocol; + protocol1 = h1->protocol; + sparse_vec_index2 (rt->next_by_protocol, protocol0, protocol1, + &i0, &i1); + next0 = vec_elt(rt->next_by_protocol, i0); + next1 = vec_elt(rt->next_by_protocol, i1); + + b0->error = node->errors[i0 == SPARSE_VEC_INVALID_INDEX ? GRE_ERROR_UNKNOWN_PROTOCOL : GRE_ERROR_NONE]; + b1->error = node->errors[i1 == SPARSE_VEC_INVALID_INDEX ? GRE_ERROR_UNKNOWN_PROTOCOL : GRE_ERROR_NONE]; + + version0 = clib_net_to_host_u16 (h0->flags_and_version); + verr0 = version0 & GRE_VERSION_MASK; + version1 = clib_net_to_host_u16 (h1->flags_and_version); + verr1 = version1 & GRE_VERSION_MASK; + + b0->error = verr0 ? node->errors[GRE_ERROR_UNSUPPORTED_VERSION] + : b0->error; + next0 = verr0 ? GRE_INPUT_NEXT_DROP : next0; + b1->error = verr1 ? node->errors[GRE_ERROR_UNSUPPORTED_VERSION] + : b1->error; + next1 = verr1 ? GRE_INPUT_NEXT_DROP : next1; + + + /* RPF check for ip4/ip6 input */ + if (PREDICT_TRUE(next0 == GRE_INPUT_NEXT_IP4_INPUT + || next0 == GRE_INPUT_NEXT_IP6_INPUT + || next0 == GRE_INPUT_NEXT_ETHERNET_INPUT + || next0 == GRE_INPUT_NEXT_MPLS_INPUT)) + { + u64 key = ((u64)(vnet_buffer(b0)->gre.dst) << 32) | + (u64)(vnet_buffer(b0)->gre.src); + + if (cached_tunnel_key != key) + { + vnet_hw_interface_t * hi; + gre_tunnel_t * t; + uword * p; + + p = hash_get (gm->tunnel_by_key, key); + if (!p) + { + next0 = GRE_INPUT_NEXT_DROP; + b0->error = node->errors[GRE_ERROR_NO_SUCH_TUNNEL]; + goto drop0; + } + t = pool_elt_at_index (gm->tunnels, p[0]); + hi = vnet_get_hw_interface (gm->vnet_main, + t->hw_if_index); + tunnel_sw_if_index = hi->sw_if_index; + + cached_tunnel_sw_if_index = tunnel_sw_if_index; + } + else + { + tunnel_sw_if_index = cached_tunnel_sw_if_index; + } + } + else + { + next0 = GRE_INPUT_NEXT_DROP; + goto drop0; + } + len = vlib_buffer_length_in_chain (vm, b0); + vlib_increment_combined_counter (im->combined_sw_if_counters + + VNET_INTERFACE_COUNTER_RX, + cpu_index, + tunnel_sw_if_index, + 1 /* packets */, + len /* bytes */); + + vnet_buffer(b0)->sw_if_index[VLIB_RX] = tunnel_sw_if_index; + +drop0: + if (PREDICT_TRUE(next1 == GRE_INPUT_NEXT_IP4_INPUT + || next1 == GRE_INPUT_NEXT_IP6_INPUT + || next1 == GRE_INPUT_NEXT_ETHERNET_INPUT + || next1 == GRE_INPUT_NEXT_MPLS_INPUT)) + { + u64 key = ((u64)(vnet_buffer(b1)->gre.dst) << 32) | + (u64)(vnet_buffer(b1)->gre.src); + + if (cached_tunnel_key != key) + { + vnet_hw_interface_t * hi; + gre_tunnel_t * t; + uword * p; + + p = hash_get (gm->tunnel_by_key, key); + if (!p) + { + next1 = GRE_INPUT_NEXT_DROP; + b1->error = node->errors[GRE_ERROR_NO_SUCH_TUNNEL]; + goto drop1; + } + t = pool_elt_at_index (gm->tunnels, p[0]); + hi = vnet_get_hw_interface (gm->vnet_main, + t->hw_if_index); + tunnel_sw_if_index = hi->sw_if_index; + + cached_tunnel_sw_if_index = tunnel_sw_if_index; + } + else + { + tunnel_sw_if_index = cached_tunnel_sw_if_index; + } + } + else + { + next1 = GRE_INPUT_NEXT_DROP; + goto drop1; + } + len = vlib_buffer_length_in_chain (vm, b1); + vlib_increment_combined_counter (im->combined_sw_if_counters + + VNET_INTERFACE_COUNTER_RX, + cpu_index, + tunnel_sw_if_index, + 1 /* packets */, + len /* bytes */); + + vnet_buffer(b1)->sw_if_index[VLIB_RX] = tunnel_sw_if_index; + +drop1: + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + gre_rx_trace_t *tr = vlib_add_trace (vm, node, + b0, sizeof (*tr)); + tr->tunnel_id = tunnel_sw_if_index; + tr->length = ip0->length; + tr->src.as_u32 = ip0->src_address.as_u32; + tr->dst.as_u32 = ip0->dst_address.as_u32; + } + + if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) + { + gre_rx_trace_t *tr = vlib_add_trace (vm, node, + b1, sizeof (*tr)); + tr->tunnel_id = tunnel_sw_if_index; + tr->length = ip1->length; + tr->src.as_u32 = ip1->src_address.as_u32; + tr->dst.as_u32 = ip1->dst_address.as_u32; + } + + vlib_buffer_advance (b0, sizeof (*h0)); + vlib_buffer_advance (b1, sizeof (*h1)); + + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, + to_next, n_left_to_next, + bi0, bi1, next0, next1); + } + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t * b0; + gre_header_t * h0; + ip4_header_t * ip0; + u16 version0; + int verr0; + u32 i0, next0; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + ip0 = vlib_buffer_get_current (b0); + + vnet_buffer(b0)->gre.src = ip0->src_address.as_u32; + vnet_buffer(b0)->gre.dst = ip0->dst_address.as_u32; + + vlib_buffer_advance (b0, sizeof (*ip0)); + + h0 = vlib_buffer_get_current (b0); + + i0 = sparse_vec_index (rt->next_by_protocol, h0->protocol); + next0 = vec_elt(rt->next_by_protocol, i0); + + b0->error = + node->errors[i0 == SPARSE_VEC_INVALID_INDEX + ? GRE_ERROR_UNKNOWN_PROTOCOL : GRE_ERROR_NONE]; + + version0 = clib_net_to_host_u16 (h0->flags_and_version); + verr0 = version0 & GRE_VERSION_MASK; + b0->error = verr0 ? node->errors[GRE_ERROR_UNSUPPORTED_VERSION] + : b0->error; + next0 = verr0 ? GRE_INPUT_NEXT_DROP : next0; + + + /* For IP payload we need to find source interface + so we can increase counters and help forward node to + pick right FIB */ + /* RPF check for ip4/ip6 input */ + if (PREDICT_TRUE(next0 == GRE_INPUT_NEXT_IP4_INPUT + || next0 == GRE_INPUT_NEXT_IP6_INPUT + || next0 == GRE_INPUT_NEXT_ETHERNET_INPUT + || next0 == GRE_INPUT_NEXT_MPLS_INPUT)) + { + u64 key = ((u64)(vnet_buffer(b0)->gre.dst) << 32) | + (u64)(vnet_buffer(b0)->gre.src); + + if (cached_tunnel_key != key) + { + vnet_hw_interface_t * hi; + gre_tunnel_t * t; + uword * p; + + p = hash_get (gm->tunnel_by_key, key); + if (!p) + { + next0 = GRE_INPUT_NEXT_DROP; + b0->error = node->errors[GRE_ERROR_NO_SUCH_TUNNEL]; + goto drop; + } + t = pool_elt_at_index (gm->tunnels, p[0]); + hi = vnet_get_hw_interface (gm->vnet_main, + t->hw_if_index); + tunnel_sw_if_index = hi->sw_if_index; + + cached_tunnel_sw_if_index = tunnel_sw_if_index; + } + else + { + tunnel_sw_if_index = cached_tunnel_sw_if_index; + } + } + else + { + next0 = GRE_INPUT_NEXT_DROP; + goto drop; + } + len = vlib_buffer_length_in_chain (vm, b0); + vlib_increment_combined_counter (im->combined_sw_if_counters + + VNET_INTERFACE_COUNTER_RX, + cpu_index, + tunnel_sw_if_index, + 1 /* packets */, + len /* bytes */); + + vnet_buffer(b0)->sw_if_index[VLIB_RX] = tunnel_sw_if_index; + +drop: + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + gre_rx_trace_t *tr = vlib_add_trace (vm, node, + b0, sizeof (*tr)); + tr->tunnel_id = tunnel_sw_if_index; + tr->length = ip0->length; + tr->src.as_u32 = ip0->src_address.as_u32; + tr->dst.as_u32 = ip0->dst_address.as_u32; + } + + vlib_buffer_advance (b0, sizeof (*h0)); + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + vlib_node_increment_counter (vm, gre_input_node.index, + GRE_ERROR_PKTS_DECAP, from_frame->n_vectors); + return from_frame->n_vectors; +} + +static char * gre_error_strings[] = { +#define gre_error(n,s) s, +#include "error.def" +#undef gre_error +}; + +VLIB_REGISTER_NODE (gre_input_node) = { + .function = gre_input, + .name = "gre-input", + /* Takes a vector of packets. */ + .vector_size = sizeof (u32), + + .runtime_data_bytes = sizeof (gre_input_runtime_t), + + .n_errors = GRE_N_ERROR, + .error_strings = gre_error_strings, + + .n_next_nodes = GRE_INPUT_N_NEXT, + .next_nodes = { +#define _(s,n) [GRE_INPUT_NEXT_##s] = n, + foreach_gre_input_next +#undef _ + }, + + .format_buffer = format_gre_header_with_length, + .format_trace = format_gre_rx_trace, + .unformat_buffer = unformat_gre_header, +}; + +VLIB_NODE_FUNCTION_MULTIARCH (gre_input_node, gre_input) + +void +gre_register_input_protocol (vlib_main_t * vm, + gre_protocol_t protocol, + u32 node_index) +{ + gre_main_t * em = &gre_main; + gre_protocol_info_t * pi; + gre_input_runtime_t * rt; + u16 * n; + + { + clib_error_t * error = vlib_call_init_function (vm, gre_input_init); + if (error) + clib_error_report (error); + } + + pi = gre_get_protocol_info (em, protocol); + pi->node_index = node_index; + pi->next_index = vlib_node_add_next (vm, + gre_input_node.index, + node_index); + + /* Setup gre protocol -> next index sparse vector mapping. */ + rt = vlib_node_get_runtime_data (vm, gre_input_node.index); + n = sparse_vec_validate (rt->next_by_protocol, + clib_host_to_net_u16 (protocol)); + n[0] = pi->next_index; +} + +static void +gre_setup_node (vlib_main_t * vm, u32 node_index) +{ + vlib_node_t * n = vlib_get_node (vm, node_index); + pg_node_t * pn = pg_get_node (node_index); + + n->format_buffer = format_gre_header_with_length; + n->unformat_buffer = unformat_gre_header; + pn->unformat_edit = unformat_pg_gre_header; +} + +static clib_error_t * gre_input_init (vlib_main_t * vm) +{ + gre_input_runtime_t * rt; + vlib_node_t *ethernet_input, *ip4_input, *ip6_input, *mpls_unicast_input; + + { + clib_error_t * error; + error = vlib_call_init_function (vm, gre_init); + if (error) + clib_error_report (error); + } + + gre_setup_node (vm, gre_input_node.index); + + rt = vlib_node_get_runtime_data (vm, gre_input_node.index); + + rt->next_by_protocol = sparse_vec_new + (/* elt bytes */ sizeof (rt->next_by_protocol[0]), + /* bits in index */ BITS (((gre_header_t *) 0)->protocol)); + + /* These could be moved to the supported protocol input node defn's */ + ethernet_input = vlib_get_node_by_name (vm, (u8 *)"ethernet-input"); + ASSERT(ethernet_input); + ip4_input = vlib_get_node_by_name (vm, (u8 *)"ip4-input"); + ASSERT(ip4_input); + ip6_input = vlib_get_node_by_name (vm, (u8 *)"ip6-input"); + ASSERT(ip6_input); + mpls_unicast_input = vlib_get_node_by_name (vm, (u8 *)"mpls-input"); + ASSERT(mpls_unicast_input); + + gre_register_input_protocol (vm, GRE_PROTOCOL_teb, + ethernet_input->index); + + gre_register_input_protocol (vm, GRE_PROTOCOL_ip4, + ip4_input->index); + + gre_register_input_protocol (vm, GRE_PROTOCOL_ip6, + ip6_input->index); + + gre_register_input_protocol (vm, GRE_PROTOCOL_mpls_unicast, + mpls_unicast_input->index); + + ip4_register_protocol (IP_PROTOCOL_GRE, gre_input_node.index); + + return 0; +} + +VLIB_INIT_FUNCTION (gre_input_init); diff --git a/vpp/vnet/vnet/gre/packet.h b/vpp/vnet/vnet/gre/packet.h new file mode 100644 index 00000000..cc2ccda9 --- /dev/null +++ b/vpp/vnet/vnet/gre/packet.h @@ -0,0 +1,55 @@ +#ifndef included_vnet_gre_packet_h +#define included_vnet_gre_packet_h + +/* + * GRE packet format + * + * Copyright (c) 2012 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define foreach_gre_protocol \ +_ (0x0800, ip4) \ +_ (0x86DD, ip6) \ +_ (0x6558, teb) \ +_ (0x0806, arp) \ +_ (0x8847, mpls_unicast) \ +_ (0x894F, nsh) + +typedef enum { +#define _(n,f) GRE_PROTOCOL_##f = n, + foreach_gre_protocol +#undef _ +} gre_protocol_t; + +typedef struct { + /* flags and version */ + u16 flags_and_version; + /* unimplemented at the moment */ +#define GRE_FLAGS_CHECKSUM (1 << 15) + + /* deprecated, according to rfc2784 */ +#define GRE_FLAGS_ROUTING (1 << 14) +#define GRE_FLAGS_KEY (1 << 13) +#define GRE_FLAGS_SEQUENCE (1 << 12) +#define GRE_FLAGS_STRICT_SOURCE_ROUTE (1 << 11) + + /* version 1 is PPTP which we don't support */ +#define GRE_SUPPORTED_VERSION 0 +#define GRE_VERSION_MASK 0x7 + + /* 0x800 for ip4, etc. */ + u16 protocol; +} gre_header_t; + +#endif /* included_vnet_gre_packet_h */ diff --git a/vpp/vnet/vnet/gre/pg.c b/vpp/vnet/vnet/gre/pg.c new file mode 100644 index 00000000..cc065d3b --- /dev/null +++ b/vpp/vnet/vnet/gre/pg.c @@ -0,0 +1,77 @@ +/* + * hdlc_pg.c: packet generator gre interface + * + * Copyright (c) 2012 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +typedef struct { + pg_edit_t flags_and_version; + pg_edit_t protocol; +} pg_gre_header_t; + +static inline void +pg_gre_header_init (pg_gre_header_t * e) +{ + pg_edit_init (&e->flags_and_version, gre_header_t, flags_and_version); + pg_edit_init (&e->protocol, gre_header_t, protocol); +} + +uword +unformat_pg_gre_header (unformat_input_t * input, va_list * args) +{ + pg_stream_t * s = va_arg (*args, pg_stream_t *); + pg_gre_header_t * h; + u32 group_index, error; + + h = pg_create_edit_group (s, sizeof (h[0]), sizeof (gre_header_t), + &group_index); + pg_gre_header_init (h); + + pg_edit_set_fixed (&h->flags_and_version, 0); + + error = 1; + if (! unformat (input, "%U", + unformat_pg_edit, + unformat_gre_protocol_net_byte_order, &h->protocol)) + goto done; + + { + gre_main_t * pm = &gre_main; + gre_protocol_info_t * pi = 0; + pg_node_t * pg_node = 0; + + if (h->protocol.type == PG_EDIT_FIXED) + { + u16 t = *(u16 *) h->protocol.values[PG_EDIT_LO]; + pi = gre_get_protocol_info (pm, clib_net_to_host_u16 (t)); + if (pi && pi->node_index != ~0) + pg_node = pg_get_node (pi->node_index); + } + + if (pg_node && pg_node->unformat_edit + && unformat_user (input, pg_node->unformat_edit, s)) + ; + } + + error = 0; + done: + if (error) + pg_free_edit_group (s); + return error == 0; +} + -- cgit 1.2.3-korg