aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeale Ranns <nranns@cisco.com>2017-11-05 16:26:46 -0800
committerFlorin Coras <florin.coras@gmail.com>2017-11-07 16:13:42 +0000
commit810086d8fd08445919ae03bf36161037e53a712a (patch)
tree76a91d3ed49759ef3adae32066f9dcedd75df889
parent595992c5c3b5abbdb7e90e61acbee212f25ad59f (diff)
UDP Encapsulation.
A UDP-encap object that particiapates in the FIB graph and contributes DPO to teh output chain. It thereofre resembles a tunnel but without the interface. FIB paths (and henace routes) can then be created to egress through the UDP-encap. Said routes can have MPLS labels, hence this also allows MPLSoUPD. Encap is uni-directional. For decap, one still registers with the UDP port dispatcher. Change-Id: I23bd345523b20789a1de1b02022ea1148ca50797 Signed-off-by: Neale Ranns <nranns@cisco.com>
-rw-r--r--src/vnet.am7
-rw-r--r--src/vnet/fib/fib_api.h2
-rw-r--r--src/vnet/fib/fib_node.h2
-rw-r--r--src/vnet/fib/fib_path.c73
-rw-r--r--src/vnet/fib/fib_types.h8
-rw-r--r--src/vnet/ip/ip.api12
-rw-r--r--src/vnet/ip/ip_api.c13
-rw-r--r--src/vnet/ip/lookup.c9
-rw-r--r--src/vnet/mpls/mpls_api.c4
-rw-r--r--src/vnet/udp/udp.api67
-rw-r--r--src/vnet/udp/udp_api.c193
-rw-r--r--src/vnet/udp/udp_encap.c617
-rw-r--r--src/vnet/udp/udp_encap.h147
-rw-r--r--src/vnet/udp/udp_encap_node.c280
-rw-r--r--src/vnet/vnet_all_api_h.h1
-rw-r--r--test/test_udp.py235
-rw-r--r--test/vpp_ip_route.py8
-rw-r--r--test/vpp_papi_provider.py39
-rw-r--r--test/vpp_udp_encap.py73
19 files changed, 1783 insertions, 7 deletions
diff --git a/src/vnet.am b/src/vnet.am
index 19973947674..f5548307647 100644
--- a/src/vnet.am
+++ b/src/vnet.am
@@ -518,13 +518,18 @@ libvnet_la_SOURCES += \
vnet/udp/builtin_server.c \
vnet/udp/udp_format.c \
vnet/udp/udp_local.c \
- vnet/udp/udp_pg.c
+ vnet/udp/udp_pg.c \
+ vnet/udp/udp_encap_node.c \
+ vnet/udp/udp_encap.c \
+ vnet/udp/udp_api.c
nobase_include_HEADERS += \
vnet/udp/udp_error.def \
vnet/udp/udp.h \
vnet/udp/udp_packet.h
+API_FILES += vnet/udp/udp.api
+
########################################
# Tunnel protocol: gre
########################################
diff --git a/src/vnet/fib/fib_api.h b/src/vnet/fib/fib_api.h
index e5b94e14124..655d305372b 100644
--- a/src/vnet/fib/fib_api.h
+++ b/src/vnet/fib/fib_api.h
@@ -42,10 +42,12 @@ add_del_route_t_handler (u8 is_multipath,
u8 is_rpf_id,
u8 is_l2_bridged,
u8 is_source_lookup,
+ u8 is_udp_encap,
u32 fib_index,
const fib_prefix_t * prefix,
dpo_proto_t next_hop_proto,
const ip46_address_t * next_hop,
+ u32 next_hop_id,
u32 next_hop_sw_if_index,
u8 next_hop_fib_index,
u16 next_hop_weight,
diff --git a/src/vnet/fib/fib_node.h b/src/vnet/fib/fib_node.h
index 762e0cf01af..6d26bdd3671 100644
--- a/src/vnet/fib/fib_node.h
+++ b/src/vnet/fib/fib_node.h
@@ -44,6 +44,7 @@ typedef enum fib_node_type_t_ {
FIB_NODE_TYPE_MAP_E,
FIB_NODE_TYPE_VXLAN_GPE_TUNNEL,
FIB_NODE_TYPE_GENEVE_TUNNEL,
+ FIB_NODE_TYPE_UDP_ENCAP,
/**
* Marker. New types before this one. leave the test last.
*/
@@ -68,6 +69,7 @@ typedef enum fib_node_type_t_ {
[FIB_NODE_TYPE_VXLAN_TUNNEL] = "vxlan-tunnel", \
[FIB_NODE_TYPE_MAP_E] = "map-e", \
[FIB_NODE_TYPE_VXLAN_GPE_TUNNEL] = "vxlan-gpe-tunnel", \
+ [FIB_NODE_TYPE_UDP_ENCAP] = "udp-encap", \
}
/**
diff --git a/src/vnet/fib/fib_path.c b/src/vnet/fib/fib_path.c
index 926b2f3dfc6..4fccca80672 100644
--- a/src/vnet/fib/fib_path.c
+++ b/src/vnet/fib/fib_path.c
@@ -36,6 +36,7 @@
#include <vnet/fib/fib_internal.h>
#include <vnet/fib/fib_urpf_list.h>
#include <vnet/fib/mpls_fib.h>
+#include <vnet/udp/udp_encap.h>
/**
* Enurmeration of path types
@@ -74,6 +75,10 @@ typedef enum fib_path_type_t_ {
*/
FIB_PATH_TYPE_INTF_RX,
/**
+ * interface receive.
+ */
+ FIB_PATH_TYPE_UDP_ENCAP,
+ /**
* receive. it's for-us.
*/
FIB_PATH_TYPE_RECEIVE,
@@ -96,6 +101,7 @@ typedef enum fib_path_type_t_ {
[FIB_PATH_TYPE_EXCLUSIVE] = "exclusive", \
[FIB_PATH_TYPE_DEAG] = "deag", \
[FIB_PATH_TYPE_INTF_RX] = "intf-rx", \
+ [FIB_PATH_TYPE_UDP_ENCAP] = "udp-encap", \
[FIB_PATH_TYPE_RECEIVE] = "receive", \
}
@@ -285,6 +291,12 @@ typedef struct fib_path_t_ {
*/
u32 fp_interface;
} intf_rx;
+ struct {
+ /**
+ * The UDP Encap object this path resolves through
+ */
+ u32 fp_udp_encap_id;
+ } udp_encap;
};
STRUCT_MARK(path_hash_end);
@@ -479,6 +491,9 @@ format_fib_path (u8 * s, va_list * args)
path->fp_dpo.dpoi_index);
break;
+ case FIB_PATH_TYPE_UDP_ENCAP:
+ s = format (s, " UDP-encap ID:%d", path->udp_encap.fp_udp_encap_id);
+ break;
case FIB_PATH_TYPE_RECEIVE:
case FIB_PATH_TYPE_INTF_RX:
case FIB_PATH_TYPE_SPECIAL:
@@ -784,6 +799,9 @@ fib_path_unresolve (fib_path_t *path)
adj_unlock(path->fp_dpo.dpoi_index);
}
break;
+ case FIB_PATH_TYPE_UDP_ENCAP:
+ udp_encap_unlock_w_index(path->fp_dpo.dpoi_index);
+ break;
case FIB_PATH_TYPE_EXCLUSIVE:
dpo_reset(&path->exclusive.fp_ex_dpo);
break;
@@ -989,6 +1007,33 @@ FIXME comment
path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
}
break;
+ case FIB_PATH_TYPE_UDP_ENCAP:
+ {
+ dpo_id_t via_dpo = DPO_INVALID;
+
+ /*
+ * hope for the best - clear if restrictions apply.
+ */
+ path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
+
+ udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
+ path->fp_nh_proto,
+ &via_dpo);
+ /*
+ * If this path is contributing a drop, then it's not resolved
+ */
+ if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
+ {
+ path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
+ }
+
+ /*
+ * update the path's contributed DPO
+ */
+ dpo_copy(&path->fp_dpo, &via_dpo);
+ dpo_reset(&via_dpo);
+ break;
+ }
case FIB_PATH_TYPE_INTF_RX:
ASSERT(0);
case FIB_PATH_TYPE_DEAG:
@@ -1103,6 +1148,11 @@ fib_path_create (fib_node_index_t pl_index,
path->receive.fp_interface = rpath->frp_sw_if_index;
path->receive.fp_addr = rpath->frp_addr;
}
+ else if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
+ {
+ path->fp_type = FIB_PATH_TYPE_UDP_ENCAP;
+ path->udp_encap.fp_udp_encap_id = rpath->frp_udp_encap_id;
+ }
else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
{
path->fp_type = FIB_PATH_TYPE_INTF_RX;
@@ -1346,6 +1396,9 @@ fib_path_cmp_i (const fib_path_t *path1,
case FIB_PATH_TYPE_INTF_RX:
res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
break;
+ case FIB_PATH_TYPE_UDP_ENCAP:
+ res = (path1->udp_encap.fp_udp_encap_id - path2->udp_encap.fp_udp_encap_id);
+ break;
case FIB_PATH_TYPE_SPECIAL:
case FIB_PATH_TYPE_RECEIVE:
case FIB_PATH_TYPE_EXCLUSIVE:
@@ -1460,6 +1513,9 @@ fib_path_cmp_w_route_path (fib_node_index_t path_index,
case FIB_PATH_TYPE_INTF_RX:
res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
break;
+ case FIB_PATH_TYPE_UDP_ENCAP:
+ res = (path->udp_encap.fp_udp_encap_id - rpath->frp_udp_encap_id);
+ break;
case FIB_PATH_TYPE_DEAG:
res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
if (0 == res)
@@ -1565,6 +1621,7 @@ fib_path_recursive_loop_detect (fib_node_index_t path_index,
case FIB_PATH_TYPE_DEAG:
case FIB_PATH_TYPE_RECEIVE:
case FIB_PATH_TYPE_INTF_RX:
+ case FIB_PATH_TYPE_UDP_ENCAP:
case FIB_PATH_TYPE_EXCLUSIVE:
/*
* these path types cannot be part of a loop, since they are the leaves
@@ -1724,6 +1781,12 @@ fib_path_resolve (fib_node_index_t path_index)
&path->receive.fp_addr,
&path->fp_dpo);
break;
+ case FIB_PATH_TYPE_UDP_ENCAP:
+ udp_encap_lock(path->udp_encap.fp_udp_encap_id);
+ udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
+ path->fp_nh_proto,
+ &path->fp_dpo);
+ break;
case FIB_PATH_TYPE_INTF_RX: {
/*
* Resolve via a receive DPO.
@@ -1766,6 +1829,7 @@ fib_path_get_resolving_interface (fib_node_index_t path_index)
}
break;
case FIB_PATH_TYPE_INTF_RX:
+ case FIB_PATH_TYPE_UDP_ENCAP:
case FIB_PATH_TYPE_SPECIAL:
case FIB_PATH_TYPE_DEAG:
case FIB_PATH_TYPE_EXCLUSIVE:
@@ -1872,6 +1936,7 @@ fib_path_contribute_urpf (fib_node_index_t path_index,
case FIB_PATH_TYPE_DEAG:
case FIB_PATH_TYPE_RECEIVE:
case FIB_PATH_TYPE_INTF_RX:
+ case FIB_PATH_TYPE_UDP_ENCAP:
/*
* these path types don't link to an adj
*/
@@ -1905,12 +1970,13 @@ fib_path_stack_mpls_disp (fib_node_index_t path_index,
&tmp));
dpo_reset(&tmp);
break;
- }
+ }
case FIB_PATH_TYPE_RECEIVE:
case FIB_PATH_TYPE_ATTACHED:
case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
case FIB_PATH_TYPE_RECURSIVE:
case FIB_PATH_TYPE_INTF_RX:
+ case FIB_PATH_TYPE_UDP_ENCAP:
case FIB_PATH_TYPE_EXCLUSIVE:
case FIB_PATH_TYPE_SPECIAL:
break;
@@ -2072,6 +2138,11 @@ fib_path_contribute_forwarding (fib_node_index_t path_index,
path->attached.fp_interface,
dpo);
break;
+ case FIB_PATH_TYPE_UDP_ENCAP:
+ udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
+ path->fp_nh_proto,
+ dpo);
+ break;
case FIB_PATH_TYPE_RECEIVE:
case FIB_PATH_TYPE_SPECIAL:
dpo_copy(dpo, &path->fp_dpo);
diff --git a/src/vnet/fib/fib_types.h b/src/vnet/fib/fib_types.h
index 7eadbb9bb70..ec7f1b374dd 100644
--- a/src/vnet/fib/fib_types.h
+++ b/src/vnet/fib/fib_types.h
@@ -307,6 +307,10 @@ typedef enum fib_route_path_flags_t_
* A deag path using the packet's source not destination address.
*/
FIB_ROUTE_PATH_SOURCE_LOOKUP = (1 << 8),
+ /**
+ * A path via a UDP encap object.
+ */
+ FIB_ROUTE_PATH_UDP_ENCAP = (1 << 9),
} fib_route_path_flags_t;
/**
@@ -375,6 +379,10 @@ typedef struct fib_route_path_t_ {
* The RPF-ID
*/
fib_rpf_id_t frp_rpf_id;
+ /**
+ * UDP encap ID
+ */
+ u32 frp_udp_encap_id;
};
/**
* The FIB index to lookup the nexthop
diff --git a/src/vnet/ip/ip.api b/src/vnet/ip/ip.api
index df3ae9646c4..85e4b8e04c3 100644
--- a/src/vnet/ip/ip.api
+++ b/src/vnet/ip/ip.api
@@ -368,13 +368,18 @@ autoreply define sw_interface_ip6_set_link_local_address
@param is_unreach - Drop the packet and rate limit send ICMP unreachable
@param is_prohibit - Drop the packet and rate limit send ICMP prohibited
@param is_ipv6 - 0 if an ip4 route, else ip6
- @param is_local -
+ @param is_local - The route will result in packets sent to VPP IP stack
+ @param is_udp_encap - The path describes a UDP-o-IP encapsulation.
@param is_classify -
@param is_multipath - Set to 1 if this is a multipath route, else 0
@param is_source_lookup - The the path is a deaggregate path (i.e. a lookup
in another table) is the lookup on the packet's
source address or destination.
- @param next_hop_weight -
+ @param next_hop_weight - Weight for Unequal cost multi-path
+ @param next_hop_preference - Path that are up that have the best preference are
+ are used for forwarding. lower value is better.
+ @param next_hop_id - Used when the path resolves via an object that has a unique
+ identifier.
@param dst_address_length -
@param dst_address[16] -
@param next_hop_address[16] -
@@ -390,6 +395,7 @@ autoreply define ip_add_del_route
u32 table_id;
u32 classify_table_index;
u32 next_hop_table_id;
+ u32 next_hop_id;
u8 create_vrf_if_needed;
u8 is_add;
u8 is_drop;
@@ -403,8 +409,10 @@ autoreply define ip_add_del_route
u8 is_resolve_attached;
u8 is_l2_bridged;
u8 is_source_lookup;
+ u8 is_udp_encap;
u8 next_hop_weight;
u8 next_hop_preference;
+ u8 next_hop_proto;
u8 dst_address_length;
u8 dst_address[16];
u8 next_hop_address[16];
diff --git a/src/vnet/ip/ip_api.c b/src/vnet/ip/ip_api.c
index fad518f18a5..20e19205756 100644
--- a/src/vnet/ip/ip_api.c
+++ b/src/vnet/ip/ip_api.c
@@ -832,10 +832,12 @@ add_del_route_t_handler (u8 is_multipath,
u8 is_rpf_id,
u8 is_l2_bridged,
u8 is_source_lookup,
+ u8 is_udp_encap,
u32 fib_index,
const fib_prefix_t * prefix,
dpo_proto_t next_hop_proto,
const ip46_address_t * next_hop,
+ u32 next_hop_id,
u32 next_hop_sw_if_index,
u8 next_hop_fib_index,
u16 next_hop_weight,
@@ -883,6 +885,11 @@ add_del_route_t_handler (u8 is_multipath,
path_flags |= FIB_ROUTE_PATH_SOURCE_LOOKUP;
if (is_multicast)
entry_flags |= FIB_ENTRY_FLAG_MULTICAST;
+ if (is_udp_encap)
+ {
+ path_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
+ path.frp_udp_encap_id = next_hop_id;
+ }
path.frp_flags = path_flags;
@@ -1112,8 +1119,10 @@ ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
mp->is_resolve_attached, 0, 0,
mp->is_l2_bridged,
mp->is_source_lookup,
+ mp->is_udp_encap,
fib_index, &pfx, DPO_PROTO_IP4,
&nh,
+ ntohl (mp->next_hop_id),
ntohl (mp->next_hop_sw_if_index),
next_hop_fib_index,
mp->next_hop_weight,
@@ -1173,8 +1182,10 @@ ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
mp->is_resolve_attached, 0, 0,
mp->is_l2_bridged,
mp->is_source_lookup,
+ mp->is_udp_encap,
fib_index, &pfx, DPO_PROTO_IP6,
- &nh, ntohl (mp->next_hop_sw_if_index),
+ &nh, ntohl (mp->next_hop_id),
+ ntohl (mp->next_hop_sw_if_index),
next_hop_fib_index,
mp->next_hop_weight,
mp->next_hop_preference,
diff --git a/src/vnet/ip/lookup.c b/src/vnet/ip/lookup.c
index 3d5dc36c9a0..a376e51d789 100644
--- a/src/vnet/ip/lookup.c
+++ b/src/vnet/ip/lookup.c
@@ -368,11 +368,11 @@ vnet_ip_route_cmd (vlib_main_t * vm,
unformat_input_t _line_input, *line_input = &_line_input;
fib_route_path_t *rpaths = NULL, rpath;
dpo_id_t dpo = DPO_INVALID, *dpos = NULL;
+ u32 table_id, is_del, udp_encap_id;
fib_prefix_t *prefixs = NULL, pfx;
mpls_label_t out_label, via_label;
clib_error_t *error = NULL;
u32 weight, preference;
- u32 table_id, is_del;
vnet_main_t *vnm;
u32 fib_index;
f64 count;
@@ -527,6 +527,13 @@ vnet_ip_route_cmd (vlib_main_t * vm,
rpath.frp_proto = DPO_PROTO_IP6;
vec_add1 (rpaths, rpath);
}
+ else if (unformat (line_input, "via udp-encap %d", &udp_encap_id))
+ {
+ rpath.frp_udp_encap_id = udp_encap_id;
+ rpath.frp_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
+ rpath.frp_proto = fib_proto_to_dpo (pfx.fp_proto);
+ vec_add1 (rpaths, rpath);
+ }
else if (unformat (line_input,
"lookup in table %d", &rpath.frp_fib_index))
{
diff --git a/src/vnet/mpls/mpls_api.c b/src/vnet/mpls/mpls_api.c
index c47e94dd34a..a55daa2b0c4 100644
--- a/src/vnet/mpls/mpls_api.c
+++ b/src/vnet/mpls/mpls_api.c
@@ -234,9 +234,11 @@ mpls_route_add_del_t_handler (vnet_main_t * vnm,
mp->mr_is_rpf_id,
0, // l2_bridged
0, // is source_lookup
+ 0, // is_udp_encap
fib_index, &pfx,
mp->mr_next_hop_proto,
- &nh, ntohl (mp->mr_next_hop_sw_if_index),
+ &nh, ~0, // next_hop_id
+ ntohl (mp->mr_next_hop_sw_if_index),
next_hop_fib_index,
mp->mr_next_hop_weight,
mp->mr_next_hop_preference,
diff --git a/src/vnet/udp/udp.api b/src/vnet/udp/udp.api
new file mode 100644
index 00000000000..25ee96489a2
--- /dev/null
+++ b/src/vnet/udp/udp.api
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \file
+
+ This file defines vpp UDP control-plane API messages which are generally
+ called through a shared memory interface.
+*/
+
+vl_api_version 1.0.0
+
+/** \brief Add / del table request
+ A table can be added multiple times, but need be deleted only once.
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_ipv6 - V4 or V6 table
+ @param table_id - table ID associated with the encap destination
+*/
+autoreply define udp_encap_add_del
+{
+ u32 client_index;
+ u32 context;
+ u32 id;
+ u32 table_id;
+ u8 is_ip6;
+ u8 is_add;
+ u16 src_port;
+ u16 dst_port;
+ u8 src_ip[16];
+ u8 dst_ip[16];
+};
+
+define udp_encap_dump
+{
+ u32 client_index;
+ u32 context;
+};
+
+define udp_encap_details
+{
+ u32 context;
+ u32 id;
+ u32 table_id;
+ u8 is_ip6;
+ u16 src_port;
+ u16 dst_port;
+ u8 src_ip[16];
+ u8 dst_ip[16];
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/udp/udp_api.c b/src/vnet/udp/udp_api.c
new file mode 100644
index 00000000000..e65235a5396
--- /dev/null
+++ b/src/vnet/udp/udp_api.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/udp/udp_encap.h>
+#include <vnet/fib/fib_table.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+
+#define foreach_udp_api_msg \
+ _(UDP_ENCAP_ADD_DEL, udp_encap_add_del) \
+_(UDP_ENCAP_DUMP, udp_encap_dump)
+
+static void
+send_udp_encap_details (const udp_encap_t * ue,
+ unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_udp_encap_details_t *mp;
+ fib_table_t *fib_table;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_UDP_ENCAP_DETAILS);
+ mp->context = context;
+
+ mp->is_ip6 = (ue->ue_ip_proto == FIB_PROTOCOL_IP6);
+
+ if (FIB_PROTOCOL_IP4 == ue->ue_ip_proto)
+ {
+ clib_memcpy (mp->src_ip, &ue->ue_hdrs.ip4.ue_ip4.src_address, 4);
+ clib_memcpy (mp->dst_ip, &ue->ue_hdrs.ip4.ue_ip4.dst_address, 4);
+ mp->src_port = htons (ue->ue_hdrs.ip4.ue_udp.src_port);
+ mp->dst_port = htons (ue->ue_hdrs.ip4.ue_udp.dst_port);
+ }
+ else
+ {
+ clib_memcpy (mp->src_ip, &ue->ue_hdrs.ip6.ue_ip6.src_address, 16);
+ clib_memcpy (mp->dst_ip, &ue->ue_hdrs.ip6.ue_ip6.dst_address, 16);
+ mp->src_port = htons (ue->ue_hdrs.ip6.ue_udp.src_port);
+ mp->dst_port = htons (ue->ue_hdrs.ip6.ue_udp.dst_port);
+ }
+
+ fib_table = fib_table_get (ue->ue_fib_index, ue->ue_ip_proto);
+ mp->table_id = htonl (fib_table->ft_table_id);
+ mp->id = htonl (ue->ue_id);
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+vl_api_udp_encap_dump_t_handler (vl_api_udp_encap_dump_t * mp,
+ vlib_main_t * vm)
+{
+ unix_shared_memory_queue_t *q;
+ udp_encap_t *ue;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ /* *INDENT-OFF* */
+ pool_foreach(ue, udp_encap_pool,
+ ({
+ send_udp_encap_details(ue, q, mp->context);
+ }));
+ /* *INDENT-OFF* */
+}
+
+static void
+vl_api_udp_encap_add_del_t_handler (vl_api_udp_encap_add_del_t * mp,
+ vlib_main_t * vm)
+{
+ vl_api_udp_encap_add_del_reply_t *rmp;
+ ip46_address_t src_ip, dst_ip;
+ u32 fib_index, table_id, ue_id;
+ fib_protocol_t fproto;
+ int rv = 0;
+
+ ue_id = ntohl(mp->id);
+ table_id = ntohl(mp->table_id);
+ fproto = (mp->is_ip6 ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4);
+
+ fib_index = fib_table_find(fproto, table_id);
+
+ if (~0 == fib_index)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_TABLE;
+ goto done;
+ }
+
+ if (FIB_PROTOCOL_IP4 == fproto)
+ {
+ clib_memcpy(&src_ip.ip4, mp->src_ip, 4);
+ clib_memcpy(&dst_ip.ip4, mp->dst_ip, 4);
+ }
+ else
+ {
+ clib_memcpy(&src_ip.ip6, mp->src_ip, 16);
+ clib_memcpy(&dst_ip.ip6, mp->dst_ip, 16);
+ }
+
+ if (mp->is_add)
+ {
+ udp_encap_add_and_lock(ue_id, fproto, fib_index,
+ &src_ip, &dst_ip,
+ ntohs(mp->src_port),
+ ntohs(mp->dst_port),
+ UDP_ENCAP_FIXUP_NONE);
+ }
+ else
+ {
+ udp_encap_unlock(ue_id);
+ }
+
+ done:
+ REPLY_MACRO (VL_API_UDP_ENCAP_ADD_DEL_REPLY);
+}
+
+#define vl_msg_name_crc_list
+#include <vnet/udp/udp.api.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_udp;
+#undef _
+}
+
+static clib_error_t *
+udp_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_udp_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (udp_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/udp/udp_encap.c b/src/vnet/udp/udp_encap.c
new file mode 100644
index 00000000000..98b824ba866
--- /dev/null
+++ b/src/vnet/udp/udp_encap.c
@@ -0,0 +1,617 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/udp/udp_encap.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/dpo/drop_dpo.h>
+
+/**
+ * Registered DPO types for the IP header encapsulated, v4 or v6.
+ */
+dpo_type_t udp_encap_dpo_types[FIB_PROTOCOL_MAX];
+
+/**
+ * Hash DB to map from client ID to VPP index.
+ */
+uword *udp_encap_db;
+
+/**
+ * Pool of encaps
+ */
+udp_encap_t *udp_encap_pool;
+
+static udp_encap_t *
+udp_encap_get_w_id (u32 id)
+{
+ udp_encap_t *ue = NULL;
+ index_t uei;
+
+ uei = udp_encap_find (id);
+
+ if (INDEX_INVALID != uei)
+ {
+ ue = udp_encap_get (uei);
+ }
+
+ return (ue);
+}
+
+static void
+udp_encap_restack (udp_encap_t * ue)
+{
+ dpo_stack (udp_encap_dpo_types[ue->ue_ip_proto],
+ fib_proto_to_dpo (ue->ue_ip_proto),
+ &ue->ue_dpo,
+ fib_entry_contribute_ip_forwarding (ue->ue_fib_entry_index));
+}
+
+index_t
+udp_encap_add_and_lock (u32 id,
+ fib_protocol_t proto,
+ index_t fib_index,
+ const ip46_address_t * src_ip,
+ const ip46_address_t * dst_ip,
+ u16 src_port,
+ u16 dst_port, udp_encap_fixup_flags_t flags)
+{
+ udp_encap_t *ue;
+ index_t uei;
+
+ uei = udp_encap_find (id);
+
+ if (INDEX_INVALID == uei)
+ {
+ u8 pfx_len = 0;
+
+ pool_get (udp_encap_pool, ue);
+ uei = ue - udp_encap_pool;
+
+ hash_set (udp_encap_db, id, uei);
+
+ fib_node_init (&ue->ue_fib_node, FIB_NODE_TYPE_UDP_ENCAP);
+ fib_node_lock (&ue->ue_fib_node);
+ ue->ue_fib_index = fib_index;
+ ue->ue_flags = flags;
+ ue->ue_id = id;
+ ue->ue_ip_proto = proto;
+
+ switch (proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ pfx_len = 32;
+ ue->ue_hdrs.ip4.ue_ip4.ip_version_and_header_length = 0x45;
+ ue->ue_hdrs.ip4.ue_ip4.ttl = 254;
+ ue->ue_hdrs.ip4.ue_ip4.protocol = IP_PROTOCOL_UDP;
+ ue->ue_hdrs.ip4.ue_ip4.src_address.as_u32 = src_ip->ip4.as_u32;
+ ue->ue_hdrs.ip4.ue_ip4.dst_address.as_u32 = dst_ip->ip4.as_u32;
+ ue->ue_hdrs.ip4.ue_ip4.checksum =
+ ip4_header_checksum (&ue->ue_hdrs.ip4.ue_ip4);
+ ue->ue_hdrs.ip4.ue_udp.src_port = clib_host_to_net_u16 (src_port);
+ ue->ue_hdrs.ip4.ue_udp.dst_port = clib_host_to_net_u16 (dst_port);
+
+ break;
+ case FIB_PROTOCOL_IP6:
+ pfx_len = 128;
+ ue->ue_hdrs.ip6.ue_ip6.ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 (6 << 28);
+ ue->ue_hdrs.ip6.ue_ip6.hop_limit = 255;
+ ue->ue_hdrs.ip6.ue_ip6.protocol = IP_PROTOCOL_UDP;
+ ue->ue_hdrs.ip6.ue_ip6.src_address.as_u64[0] =
+ src_ip->ip6.as_u64[0];
+ ue->ue_hdrs.ip6.ue_ip6.src_address.as_u64[1] =
+ src_ip->ip6.as_u64[1];
+ ue->ue_hdrs.ip6.ue_ip6.dst_address.as_u64[0] =
+ dst_ip->ip6.as_u64[0];
+ ue->ue_hdrs.ip6.ue_ip6.dst_address.as_u64[1] =
+ dst_ip->ip6.as_u64[1];
+ ue->ue_hdrs.ip6.ue_udp.src_port = clib_host_to_net_u16 (src_port);
+ ue->ue_hdrs.ip6.ue_udp.dst_port = clib_host_to_net_u16 (dst_port);
+
+ break;
+ default:
+ ASSERT (0);
+ }
+
+ /*
+ * track the destination address
+ */
+ fib_prefix_t dst_pfx = {
+ .fp_proto = proto,
+ .fp_len = pfx_len,
+ .fp_addr = *dst_ip,
+ };
+
+ ue->ue_fib_entry_index =
+ fib_table_entry_special_add (fib_index,
+ &dst_pfx,
+ FIB_SOURCE_RR, FIB_ENTRY_FLAG_NONE);
+ ue->ue_fib_sibling =
+ fib_entry_child_add (ue->ue_fib_entry_index,
+ FIB_NODE_TYPE_UDP_ENCAP, uei);
+
+ udp_encap_restack (ue);
+ }
+ else
+ {
+ /*
+ * existing entry. updates not supported yet
+ */
+ uei = INDEX_INVALID;
+ }
+ return (uei);
+}
+
+void
+udp_encap_contribute_forwarding (u32 id, dpo_proto_t proto, dpo_id_t * dpo)
+{
+ index_t uei;
+
+ uei = udp_encap_find (id);
+
+ if (INDEX_INVALID == uei)
+ {
+ dpo_copy (dpo, drop_dpo_get (proto));
+ }
+ else
+ {
+ udp_encap_t *ue;
+
+ ue = udp_encap_get (uei);
+
+ dpo_set (dpo, udp_encap_dpo_types[ue->ue_ip_proto], proto, uei);
+ }
+}
+
+index_t
+udp_encap_find (u32 id)
+{
+ uword *p;
+
+ p = hash_get (udp_encap_db, id);
+
+ if (NULL != p)
+ return p[0];
+
+ return INDEX_INVALID;
+}
+
+void
+udp_encap_lock (u32 id)
+{
+ udp_encap_t *ue;
+
+ ue = udp_encap_get_w_id (id);
+
+ if (NULL != ue)
+ {
+ fib_node_lock (&ue->ue_fib_node);
+ }
+}
+
+void
+udp_encap_unlock_w_index (index_t uei)
+{
+ udp_encap_t *ue;
+
+ if (INDEX_INVALID == uei)
+ {
+ return;
+ }
+
+ ue = udp_encap_get (uei);
+
+ if (NULL != ue)
+ {
+ fib_node_unlock (&ue->ue_fib_node);
+ }
+}
+
+void
+udp_encap_unlock (u32 id)
+{
+ udp_encap_t *ue;
+
+ ue = udp_encap_get_w_id (id);
+
+ if (NULL != ue)
+ {
+ fib_node_unlock (&ue->ue_fib_node);
+ }
+}
+
+static void
+udp_encap_dpo_lock (dpo_id_t * dpo)
+{
+ udp_encap_t *ue;
+
+ ue = udp_encap_get (dpo->dpoi_index);
+
+ fib_node_lock (&ue->ue_fib_node);
+}
+
+static void
+udp_encap_dpo_unlock (dpo_id_t * dpo)
+{
+ udp_encap_t *ue;
+
+ ue = udp_encap_get (dpo->dpoi_index);
+
+ fib_node_unlock (&ue->ue_fib_node);
+}
+
+static u8 *
+format_udp_encap_i (u8 * s, va_list * args)
+{
+ index_t uei = va_arg (*args, index_t);
+ u32 indent = va_arg (*args, u32);
+ u32 details = va_arg (*args, u32);
+ udp_encap_t *ue;
+
+ ue = udp_encap_get (uei);
+
+ // FIXME
+ s = format (s, "udp-ecap:[%d]: id:%d ip-fib-index:%d",
+ uei, ue->ue_id, ue->ue_fib_index);
+ if (FIB_PROTOCOL_IP4 == ue->ue_ip_proto)
+ {
+ s = format (s, "ip:[src:%U, dst:%U] udp:[src:%d, dst:%d]",
+ format_ip4_address,
+ &ue->ue_hdrs.ip4.ue_ip4.src_address,
+ format_ip4_address,
+ &ue->ue_hdrs.ip4.ue_ip4.dst_address,
+ clib_net_to_host_u16 (ue->ue_hdrs.ip4.ue_udp.src_port),
+ clib_net_to_host_u16 (ue->ue_hdrs.ip4.ue_udp.dst_port));
+ }
+ else
+ {
+ s = format (s, "ip:[src:%U, dst:%U] udp:[src:%d dst:%d]",
+ format_ip6_address,
+ &ue->ue_hdrs.ip6.ue_ip6.src_address,
+ format_ip6_address,
+ &ue->ue_hdrs.ip6.ue_ip6.dst_address,
+ clib_net_to_host_u16 (ue->ue_hdrs.ip6.ue_udp.src_port),
+ clib_net_to_host_u16 (ue->ue_hdrs.ip6.ue_udp.dst_port));
+ }
+ if (details)
+ {
+ s = format (s, " locks:%d", ue->ue_fib_node.fn_locks);
+ s = format (s, "\n%UStacked on:", format_white_space, indent + 1);
+ s = format (s, "\n%U%U",
+ format_white_space, indent + 2,
+ format_dpo_id, &ue->ue_dpo, indent + 3);
+ }
+ return (s);
+}
+
+static u8 *
+format_udp_encap_dpo (u8 * s, va_list * args)
+{
+ index_t uei = va_arg (*args, index_t);
+ u32 indent = va_arg (*args, u32);
+
+ return (format (s, "%U", format_udp_encap_i, uei, indent, 1));
+}
+
+u8 *
+format_udp_encap (u8 * s, va_list * args)
+{
+ u32 id = va_arg (*args, u32);
+ u32 details = va_arg (*args, u32);
+ index_t uei;
+
+ uei = udp_encap_find (id);
+
+ if (INDEX_INVALID == uei)
+ {
+ return (format (s, "Invalid udp-encap ID: %d", id));
+ }
+
+ return (format (s, "%U", format_udp_encap_i, uei, 0, details));
+}
+
+static udp_encap_t *
+udp_encap_from_fib_node (fib_node_t * node)
+{
+#if (CLIB_DEBUG > 0)
+ ASSERT (FIB_NODE_TYPE_UDP_ENCAP == node->fn_type);
+#endif
+ return ((udp_encap_t *) (((char *) node) -
+ STRUCT_OFFSET_OF (udp_encap_t, ue_fib_node)));
+}
+
+/**
+ * Function definition to backwalk a FIB node
+ */
+static fib_node_back_walk_rc_t
+udp_encap_fib_back_walk (fib_node_t * node, fib_node_back_walk_ctx_t * ctx)
+{
+ udp_encap_restack (udp_encap_from_fib_node (node));
+
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+/**
+ * Function definition to get a FIB node from its index
+ */
+static fib_node_t *
+udp_encap_fib_node_get (fib_node_index_t index)
+{
+ udp_encap_t *ue;
+
+ ue = pool_elt_at_index (udp_encap_pool, index);
+
+ return (&ue->ue_fib_node);
+}
+
+/**
+ * Function definition to inform the FIB node that its last lock has gone.
+ */
+static void
+udp_encap_fib_last_lock_gone (fib_node_t * node)
+{
+ udp_encap_t *ue;
+
+ ue = udp_encap_from_fib_node (node);
+
+ /**
+ * reset the stacked DPO to unlock it
+ */
+ dpo_reset (&ue->ue_dpo);
+ hash_unset (udp_encap_db, ue->ue_id);
+
+ fib_entry_child_remove (ue->ue_fib_entry_index, ue->ue_fib_sibling);
+ fib_table_entry_delete_index (ue->ue_fib_entry_index, FIB_SOURCE_RR);
+
+
+ pool_put (udp_encap_pool, ue);
+}
+
+const static char *const udp4_encap_ip4_nodes[] = {
+ "udp4-encap",
+ NULL,
+};
+
+const static char *const udp4_encap_ip6_nodes[] = {
+ "udp4-encap",
+ NULL,
+};
+
+const static char *const udp4_encap_mpls_nodes[] = {
+ "udp4-encap",
+ NULL,
+};
+
+const static char *const udp6_encap_ip4_nodes[] = {
+ "udp6-encap",
+ NULL,
+};
+
+const static char *const udp6_encap_ip6_nodes[] = {
+ "udp6-encap",
+ NULL,
+};
+
+const static char *const udp6_encap_mpls_nodes[] = {
+ "udp6-encap",
+ NULL,
+};
+
+const static char *const *const udp4_encap_nodes[DPO_PROTO_NUM] = {
+ [DPO_PROTO_IP4] = udp4_encap_ip4_nodes,
+ [DPO_PROTO_IP6] = udp4_encap_ip6_nodes,
+ [DPO_PROTO_MPLS] = udp4_encap_mpls_nodes,
+};
+
+const static char *const *const udp6_encap_nodes[DPO_PROTO_NUM] = {
+ [DPO_PROTO_IP4] = udp6_encap_ip4_nodes,
+ [DPO_PROTO_IP6] = udp6_encap_ip6_nodes,
+ [DPO_PROTO_MPLS] = udp6_encap_mpls_nodes,
+};
+
+/*
+ * Virtual function table registered by UDP encaps
+ * for participation in the FIB object graph.
+ */
+const static fib_node_vft_t udp_encap_fib_vft = {
+ .fnv_get = udp_encap_fib_node_get,
+ .fnv_last_lock = udp_encap_fib_last_lock_gone,
+ .fnv_back_walk = udp_encap_fib_back_walk,
+};
+
+const static dpo_vft_t udp_encap_dpo_vft = {
+ .dv_lock = udp_encap_dpo_lock,
+ .dv_unlock = udp_encap_dpo_unlock,
+ .dv_format = format_udp_encap_dpo,
+ //.dv_mem_show = replicate_mem_show,
+};
+
+clib_error_t *
+udp_encap_init (vlib_main_t * vm)
+{
+ udp_encap_db = hash_create (0, sizeof (index_t));
+
+ fib_node_register_type (FIB_NODE_TYPE_UDP_ENCAP, &udp_encap_fib_vft);
+
+ udp_encap_dpo_types[FIB_PROTOCOL_IP4] =
+ dpo_register_new_type (&udp_encap_dpo_vft, udp4_encap_nodes);
+ udp_encap_dpo_types[FIB_PROTOCOL_IP6] =
+ dpo_register_new_type (&udp_encap_dpo_vft, udp6_encap_nodes);
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (udp_encap_init);
+
+clib_error_t *
+udp_encap_cli (vlib_main_t * vm,
+ unformat_input_t * main_input, vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ clib_error_t *error = NULL;
+ ip46_address_t src_ip, dst_ip;
+ u32 table_id, ue_id;
+ u32 src_port, dst_port;
+ udp_encap_fixup_flags_t flags;
+ fib_protocol_t fproto;
+ u8 is_del;
+
+ is_del = 0;
+ table_id = 0;
+ flags = UDP_ENCAP_FIXUP_NONE;
+ fproto = FIB_PROTOCOL_MAX;
+ dst_port = 0;
+ ue_id = ~0;
+
+ /* Get a line of input. */
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "id %d", &ue_id))
+ ;
+ else if (unformat (line_input, "add"))
+ is_del = 0;
+ else if (unformat (line_input, "del"))
+ is_del = 1;
+ else if (unformat (line_input, "%U %U",
+ unformat_ip4_address,
+ &src_ip.ip4, unformat_ip4_address, &dst_ip.ip4))
+ fproto = FIB_PROTOCOL_IP4;
+ else if (unformat (line_input, "%U %U",
+ unformat_ip6_address,
+ &src_ip.ip6, unformat_ip6_address, &dst_ip.ip6))
+ fproto = FIB_PROTOCOL_IP6;
+ else if (unformat (line_input, "%d %d", &src_port, &dst_port))
+ ;
+ else if (unformat (line_input, "%d", &dst_port))
+ ;
+ else if (unformat (line_input, "table-id %d", &table_id))
+ ;
+ else if (unformat (line_input, "src-port-is-entropy"))
+ flags |= UDP_ENCAP_FIXUP_UDP_SRC_PORT_ENTROPY;
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
+ }
+
+ if (~0 == ue_id)
+ {
+ error =
+ clib_error_return (0, "An ID for the UDP encap instance is required");
+ goto done;
+ }
+
+ if (!is_del && fproto != FIB_PROTOCOL_MAX)
+ {
+ u32 fib_index;
+ index_t uei;
+
+ fib_index = fib_table_find (fproto, table_id);
+
+ if (~0 == fib_index)
+ {
+ error = clib_error_return (0, "Nonexistent table id %d", table_id);
+ goto done;
+ }
+
+ uei = udp_encap_add_and_lock (ue_id, fproto, fib_index,
+ &src_ip, &dst_ip,
+ src_port, dst_port, flags);
+
+ if (INDEX_INVALID == uei)
+ {
+ error =
+ clib_error_return (0, "update to existing encap not supported %d",
+ ue_id);
+ goto done;
+ }
+ }
+ else if (is_del)
+ {
+ udp_encap_unlock (ue_id);
+ }
+ else
+ {
+ error =
+ clib_error_return (0,
+ "Some IP addresses would be usefull, don't you think?",
+ ue_id);
+ }
+
+done:
+ unformat_free (line_input);
+ return error;
+}
+
+clib_error_t *
+udp_encap_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ u32 ue_id;
+
+ ue_id = ~0;
+
+ /* Get a line of input. */
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%d", &ue_id))
+ ;
+ }
+
+ if (~0 == ue_id)
+ {
+ udp_encap_t *ue;
+
+ /* *INDENT-OFF* */
+ pool_foreach(ue, udp_encap_pool,
+ ({
+ vlib_cli_output(vm, "%U", format_udp_encap, ue->ue_id, 0);
+ }));
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ vlib_cli_output (vm, "%U", format_udp_encap, ue_id, 1);
+ }
+
+ return NULL;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (udp_encap_add_command, static) = {
+ .path = "udp encap",
+ .short_help = "udp encap [add|del] <id ID> <src-ip> <dst-ip> [<src-port>] <dst-port> [src-port-is-entropy] [table-id <table>]",
+ .function = udp_encap_cli,
+ .is_mp_safe = 1,
+};
+VLIB_CLI_COMMAND (udp_encap_show_command, static) = {
+ .path = "show udp encap",
+ .short_help = "show udp encap [ID]",
+ .function = udp_encap_show,
+ .is_mp_safe = 1,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/udp/udp_encap.h b/src/vnet/udp/udp_encap.h
new file mode 100644
index 00000000000..b8f329dcbee
--- /dev/null
+++ b/src/vnet/udp/udp_encap.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __UDP_ENCAP_H__
+#define __UDP_ENCAP_H__
+
+#include <vnet/ip/ip.h>
+#include <vnet/udp/udp.h>
+#include <vnet/fib/fib_node.h>
+
+/**
+ * UDP encapsualtion.
+ * A representation of the encapsulation of packets in UDP-over-IP.
+ * This is encapsulation only, there is no tunnel interface, hence
+ * it is uni-directional. For decap register a handler with the UDP port
+ * dispatcher.
+ */
+
+/**
+ * Fixup behaviour. Actions performed on the encap in the data-plance
+ */
+typedef enum udp_encap_fixup_flags_t_
+{
+ UDP_ENCAP_FIXUP_NONE = 0,
+ /**
+ * UDP source port contains an entropy/hash value for load-balancing by downstream peers.
+ */
+ UDP_ENCAP_FIXUP_UDP_SRC_PORT_ENTROPY = (1 << 0),
+} udp_encap_fixup_flags_t;
+
+/**
+ * The UDP encap represenation
+ */
+typedef struct udp_encap_t_
+{
+ /**
+ * The first cacheline contains the data used in the data-plane
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
+ /**
+ * The headers to paint, in packet painting order
+ */
+ union
+ {
+ struct
+ {
+ ip4_header_t ue_ip4;
+ udp_header_t ue_udp;
+ } __attribute__ ((packed)) ip4;
+ struct
+ {
+ ip6_header_t ue_ip6;
+ udp_header_t ue_udp;
+ } __attribute__ ((packed)) ip6;
+ } __attribute__ ((packed)) ue_hdrs;
+
+ /**
+ * Flags controlling fixup behaviour
+ */
+ udp_encap_fixup_flags_t ue_flags;
+
+ /**
+ * The DPO used to forward to the next node in the VLIB graph
+ */
+ dpo_id_t ue_dpo;
+
+ /**
+ * the protocol of the IP header imposed
+ */
+ fib_protocol_t ue_ip_proto;
+
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
+
+ /**
+ * linkage into the FIB graph
+ */
+ fib_node_t ue_fib_node;
+
+ /**
+ * The ID given by the user/client.
+ * This ID is used by the client for modifications.
+ */
+ u32 ue_id;
+
+ /**
+ * Tracking information for the IP destination
+ */
+ fib_node_index_t ue_fib_entry_index;
+ u32 ue_fib_sibling;
+
+ /**
+ * The FIB index in which the encap destination resides
+ */
+ index_t ue_fib_index;
+} udp_encap_t;
+
+extern index_t udp_encap_add_and_lock (u32 id,
+ fib_protocol_t proto,
+ index_t fib_index,
+ const ip46_address_t * src_ip,
+ const ip46_address_t * dst_ip,
+ u16 src_port,
+ u16 dst_port,
+ udp_encap_fixup_flags_t flags);
+
+extern index_t udp_encap_find (u32 id);
+extern void udp_encap_lock (u32 id);
+extern void udp_encap_unlock (u32 id);
+extern u8 *format_udp_encap (u8 * s, va_list * args);
+extern void udp_encap_unlock_w_index (index_t uei);
+extern void udp_encap_contribute_forwarding (u32 id,
+ dpo_proto_t proto,
+ dpo_id_t * dpo);
+
+/**
+ * Pool of encaps
+ */
+extern udp_encap_t *udp_encap_pool;
+
+static inline udp_encap_t *
+udp_encap_get (index_t uei)
+{
+ return (pool_elt_at_index (udp_encap_pool, uei));
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
+
+#endif
diff --git a/src/vnet/udp/udp_encap_node.c b/src/vnet/udp/udp_encap_node.c
new file mode 100644
index 00000000000..09a76b530f6
--- /dev/null
+++ b/src/vnet/udp/udp_encap_node.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/udp/udp_encap.h>
+
+typedef struct udp4_encap_trace_t_
+{
+ udp_header_t udp;
+ ip4_header_t ip;
+} udp4_encap_trace_t;
+
+typedef struct udp6_encap_trace_t_
+{
+ udp_header_t udp;
+ ip6_header_t ip;
+} udp6_encap_trace_t;
+
+static u8 *
+format_udp4_encap_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ udp4_encap_trace_t *t;
+
+ t = va_arg (*args, udp4_encap_trace_t *);
+
+ s = format (s, "%U\n %U",
+ format_ip4_header, &t->ip, sizeof (t->ip),
+ format_udp_header, &t->udp, sizeof (t->udp));
+ return (s);
+}
+
+static u8 *
+format_udp6_encap_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ udp6_encap_trace_t *t;
+
+ t = va_arg (*args, udp6_encap_trace_t *);
+
+ s = format (s, "%U\n %U",
+ format_ip6_header, &t->ip, sizeof (t->ip),
+ format_udp_header, &t->udp, sizeof (t->udp));
+ return (s);
+}
+
+always_inline uword
+udp_encap_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, int is_encap_v6)
+{
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left_from, n_left_to_next, *to_next, next_index;
+
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ vlib_buffer_t *b0, *b1;
+ udp_encap_t *ue0, *ue1;
+ u32 bi0, next0, uei0;
+ u32 bi1, next1, uei1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, STORE);
+ vlib_prefetch_buffer_header (p3, STORE);
+ }
+
+ bi0 = to_next[0] = from[0];
+ bi1 = to_next[1] = from[1];
+
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ uei0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
+ uei1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX];
+
+ /* Rewrite packet header and updates lengths. */
+ ue0 = udp_encap_get (uei0);
+ ue1 = udp_encap_get (uei1);
+
+ /* Paint */
+ if (is_encap_v6)
+ {
+ const u8 n_bytes =
+ sizeof (udp_header_t) + sizeof (ip6_header_t);
+ ip_udp_encap_two (vm, b0, b1, (u8 *) & ue0->ue_hdrs,
+ (u8 *) & ue1->ue_hdrs, n_bytes, 0);
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ udp6_encap_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->udp = ue0->ue_hdrs.ip6.ue_udp;
+ tr->ip = ue0->ue_hdrs.ip6.ue_ip6;
+ }
+ if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ udp6_encap_trace_t *tr =
+ vlib_add_trace (vm, node, b1, sizeof (*tr));
+ tr->udp = ue1->ue_hdrs.ip6.ue_udp;
+ tr->ip = ue1->ue_hdrs.ip6.ue_ip6;
+ }
+ }
+ else
+ {
+ const u8 n_bytes =
+ sizeof (udp_header_t) + sizeof (ip4_header_t);
+
+ ip_udp_encap_two (vm, b0, b1,
+ (u8 *) & ue0->ue_hdrs,
+ (u8 *) & ue1->ue_hdrs, n_bytes, 1);
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ udp4_encap_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->udp = ue0->ue_hdrs.ip4.ue_udp;
+ tr->ip = ue0->ue_hdrs.ip4.ue_ip4;
+ }
+ if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ udp4_encap_trace_t *tr =
+ vlib_add_trace (vm, node, b1, sizeof (*tr));
+ tr->udp = ue1->ue_hdrs.ip4.ue_udp;
+ tr->ip = ue1->ue_hdrs.ip4.ue_ip4;
+ }
+ }
+
+ next0 = ue0->ue_dpo.dpoi_next_node;
+ next1 = ue1->ue_dpo.dpoi_next_node;
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ue0->ue_dpo.dpoi_index;
+ vnet_buffer (b1)->ip.adj_index[VLIB_TX] = ue1->ue_dpo.dpoi_index;
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, next0, uei0;
+ vlib_buffer_t *b0;
+ udp_encap_t *ue0;
+
+ bi0 = to_next[0] = from[0];
+
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ uei0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
+
+ /* Rewrite packet header and updates lengths. */
+ ue0 = udp_encap_get (uei0);
+
+ /* Paint */
+ if (is_encap_v6)
+ {
+ const u8 n_bytes =
+ sizeof (udp_header_t) + sizeof (ip6_header_t);
+ ip_udp_encap_one (vm, b0, (u8 *) & ue0->ue_hdrs.ip6, n_bytes,
+ 0);
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ udp6_encap_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->udp = ue0->ue_hdrs.ip6.ue_udp;
+ tr->ip = ue0->ue_hdrs.ip6.ue_ip6;
+ }
+ }
+ else
+ {
+ const u8 n_bytes =
+ sizeof (udp_header_t) + sizeof (ip4_header_t);
+
+ ip_udp_encap_one (vm, b0, (u8 *) & ue0->ue_hdrs.ip4, n_bytes,
+ 1);
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ udp4_encap_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->udp = ue0->ue_hdrs.ip4.ue_udp;
+ tr->ip = ue0->ue_hdrs.ip4.ue_ip4;
+ }
+ }
+
+ next0 = ue0->ue_dpo.dpoi_next_node;
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ue0->ue_dpo.dpoi_index;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+static uword
+udp4_encap (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return udp_encap_inline (vm, node, frame, 0);
+}
+
+static uword
+udp6_encap (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return udp_encap_inline (vm, node, frame, 1);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (udp4_encap_node) = {
+ .function = udp4_encap,
+ .name = "udp4-encap",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_udp4_encap_trace,
+
+ .n_next_nodes = 0,
+};
+VLIB_NODE_FUNCTION_MULTIARCH (udp4_encap_node, udp4_encap);
+
+VLIB_REGISTER_NODE (udp6_encap_node) = {
+ .function = udp6_encap,
+ .name = "udp6-encap",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_udp6_encap_trace,
+
+ .n_next_nodes = 0,
+};
+VLIB_NODE_FUNCTION_MULTIARCH (udp6_encap_node, udp6_encap);
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/vnet_all_api_h.h b/src/vnet/vnet_all_api_h.h
index 936f0a5fac8..c71af079e11 100644
--- a/src/vnet/vnet_all_api_h.h
+++ b/src/vnet/vnet_all_api_h.h
@@ -61,6 +61,7 @@
#include <vnet/ethernet/p2p_ethernet.api.h>
#include <vnet/tcp/tcp.api.h>
#include <vnet/dns/dns.api.h>
+#include <vnet/udp/udp.api.h>
/*
* fd.io coding-style-patch-verification: ON
diff --git a/test/test_udp.py b/test/test_udp.py
new file mode 100644
index 00000000000..7853ac3b3f4
--- /dev/null
+++ b/test/test_udp.py
@@ -0,0 +1,235 @@
+#!/usr/bin/env python
+
+from framework import VppTestCase, VppTestRunner
+from vpp_udp_encap import *
+from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether, ARP
+from scapy.layers.inet import IP, UDP
+from scapy.layers.inet6 import IPv6
+from scapy.contrib.mpls import MPLS
+
+
+class TestUdpEncap(VppTestCase):
+ """ UDP Encap Test Case """
+
+ def setUp(self):
+ super(TestUdpEncap, self).setUp()
+
+ # create 2 pg interfaces
+ self.create_pg_interfaces(range(4))
+
+ # setup interfaces
+ # assign them different tables.
+ table_id = 0
+ self.tables = []
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+
+ if table_id != 0:
+ tbl = VppIpTable(self, table_id)
+ tbl.add_vpp_config()
+ self.tables.append(tbl)
+ tbl = VppIpTable(self, table_id, is_ip6=1)
+ tbl.add_vpp_config()
+ self.tables.append(tbl)
+
+ i.set_table_ip4(table_id)
+ i.set_table_ip6(table_id)
+ i.config_ip4()
+ i.resolve_arp()
+ i.config_ip6()
+ i.resolve_ndp()
+ table_id += 1
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.ip6_disable()
+ i.set_table_ip4(0)
+ i.set_table_ip6(0)
+ i.admin_down()
+ super(TestUdpEncap, self).tearDown()
+
+ def validate_outer4(self, rx, encap_obj):
+ self.assertEqual(rx[IP].src, encap_obj.src_ip_s)
+ self.assertEqual(rx[IP].dst, encap_obj.dst_ip_s)
+ self.assertEqual(rx[UDP].sport, encap_obj.src_port)
+ self.assertEqual(rx[UDP].dport, encap_obj.dst_port)
+
+ def validate_outer6(self, rx, encap_obj):
+ self.assertEqual(rx[IPv6].src, encap_obj.src_ip_s)
+ self.assertEqual(rx[IPv6].dst, encap_obj.dst_ip_s)
+ self.assertEqual(rx[UDP].sport, encap_obj.src_port)
+ self.assertEqual(rx[UDP].dport, encap_obj.dst_port)
+
+ def validate_inner4(self, rx, tx, ttl=None):
+ self.assertEqual(rx.src, tx[IP].src)
+ self.assertEqual(rx.dst, tx[IP].dst)
+ if ttl:
+ self.assertEqual(rx.ttl, ttl)
+ else:
+ self.assertEqual(rx.ttl, tx[IP].ttl)
+
+ def validate_inner6(self, rx, tx):
+ self.assertEqual(rx.src, tx[IPv6].src)
+ self.assertEqual(rx.dst, tx[IPv6].dst)
+ self.assertEqual(rx.hlim, tx[IPv6].hlim)
+
+ def send_and_expect(self, input, output, pkts):
+ self.vapi.cli("clear trace")
+ input.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ rx = output.get_capture(len(pkts))
+ return rx
+
+ def test_udp_encap(self):
+ """ UDP Encap test
+ """
+
+ #
+ # construct a UDP encap object through each of the peers
+ # v4 through the first two peears, v6 through the second.
+ #
+ udp_encap_0 = VppUdpEncap(self, 0,
+ self.pg0.local_ip4,
+ self.pg0.remote_ip4,
+ 330, 440)
+ udp_encap_1 = VppUdpEncap(self, 1,
+ self.pg1.local_ip4,
+ self.pg1.remote_ip4,
+ 331, 441,
+ table_id=1)
+ udp_encap_2 = VppUdpEncap(self, 2,
+ self.pg2.local_ip6,
+ self.pg2.remote_ip6,
+ 332, 442,
+ table_id=2,
+ is_ip6=1)
+ udp_encap_3 = VppUdpEncap(self, 3,
+ self.pg3.local_ip6,
+ self.pg3.remote_ip6,
+ 333, 443,
+ table_id=3,
+ is_ip6=1)
+ udp_encap_0.add_vpp_config()
+ udp_encap_1.add_vpp_config()
+ udp_encap_2.add_vpp_config()
+ udp_encap_3.add_vpp_config()
+
+ #
+ # Routes via each UDP encap object - all combinations of v4 and v6.
+ #
+ route_4o4 = VppIpRoute(self, "1.1.0.1", 32,
+ [VppRoutePath("0.0.0.0",
+ 0xFFFFFFFF,
+ is_udp_encap=1,
+ next_hop_id=0)])
+ route_4o6 = VppIpRoute(self, "1.1.2.1", 32,
+ [VppRoutePath("0.0.0.0",
+ 0xFFFFFFFF,
+ is_udp_encap=1,
+ next_hop_id=2)])
+ route_6o4 = VppIpRoute(self, "2001::1", 128,
+ [VppRoutePath("0.0.0.0",
+ 0xFFFFFFFF,
+ is_udp_encap=1,
+ next_hop_id=1)],
+ is_ip6=1)
+ route_6o6 = VppIpRoute(self, "2001::3", 128,
+ [VppRoutePath("0.0.0.0",
+ 0xFFFFFFFF,
+ is_udp_encap=1,
+ next_hop_id=3)],
+ is_ip6=1)
+ route_4o4.add_vpp_config()
+ route_4o6.add_vpp_config()
+ route_6o6.add_vpp_config()
+ route_6o4.add_vpp_config()
+
+ #
+ # 4o4 encap
+ #
+ p_4o4 = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac) /
+ IP(src="2.2.2.2", dst="1.1.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+ rx = self.send_and_expect(self.pg0, self.pg0, p_4o4*65)
+ for p in rx:
+ self.validate_outer4(p, udp_encap_0)
+ p = IP(p["UDP"].payload.load)
+ self.validate_inner4(p, p_4o4)
+
+ #
+ # 4o6 encap
+ #
+ p_4o6 = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac) /
+ IP(src="2.2.2.2", dst="1.1.2.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+ rx = self.send_and_expect(self.pg0, self.pg2, p_4o6*65)
+ for p in rx:
+ self.validate_outer6(p, udp_encap_2)
+ p = IP(p["UDP"].payload.load)
+ self.validate_inner4(p, p_4o6)
+
+ #
+ # 6o4 encap
+ #
+ p_6o4 = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac) /
+ IPv6(src="2001::100", dst="2001::1") /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+ rx = self.send_and_expect(self.pg0, self.pg1, p_6o4*65)
+ for p in rx:
+ self.validate_outer4(p, udp_encap_1)
+ p = IPv6(p["UDP"].payload.load)
+ self.validate_inner6(p, p_6o4)
+
+ #
+ # 6o6 encap
+ #
+ p_6o6 = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac) /
+ IPv6(src="2001::100", dst="2001::3") /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+ rx = self.send_and_expect(self.pg0, self.pg3, p_6o6*65)
+ for p in rx:
+ self.validate_outer6(p, udp_encap_3)
+ p = IPv6(p["UDP"].payload.load)
+ self.validate_inner6(p, p_6o6)
+
+ #
+ # A route with an output label
+ # the TTL of the inner packet is decremented on LSP ingress
+ #
+ route_4oMPLSo4 = VppIpRoute(self, "1.1.2.22", 32,
+ [VppRoutePath("0.0.0.0",
+ 0xFFFFFFFF,
+ is_udp_encap=1,
+ next_hop_id=1,
+ labels=[66])])
+ route_4oMPLSo4.add_vpp_config()
+
+ p_4omo4 = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac) /
+ IP(src="2.2.2.2", dst="1.1.2.22") /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+ rx = self.send_and_expect(self.pg0, self.pg1, p_4omo4*65)
+ for p in rx:
+ self.validate_outer4(p, udp_encap_1)
+ p = MPLS(p["UDP"].payload.load)
+ self.validate_inner4(p, p_4omo4, ttl=63)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/vpp_ip_route.py b/test/vpp_ip_route.py
index 7501146e96a..34aa5428d18 100644
--- a/test/vpp_ip_route.py
+++ b/test/vpp_ip_route.py
@@ -108,6 +108,8 @@ class VppRoutePath(object):
is_resolve_host=0,
is_resolve_attached=0,
is_source_lookup=0,
+ is_udp_encap=0,
+ next_hop_id=0xffffffff,
proto=DpoProto.DPO_PROTO_IP4):
self.nh_itf = nh_sw_if_index
self.nh_table_id = nh_table_id
@@ -130,6 +132,8 @@ class VppRoutePath(object):
if rpf_id != 0:
self.is_rpf_id = 1
self.nh_itf = rpf_id
+ self.is_udp_encap = is_udp_encap
+ self.next_hop_id = next_hop_id
class VppMRoutePath(VppRoutePath):
@@ -194,12 +198,14 @@ class VppIpRoute(VppObject):
path.nh_labels),
next_hop_via_label=path.nh_via_label,
next_hop_table_id=path.nh_table_id,
+ next_hop_id=path.next_hop_id,
is_ipv6=self.is_ip6,
is_l2_bridged=1
if path.proto == DpoProto.DPO_PROTO_ETHERNET else 0,
is_resolve_host=path.is_resolve_host,
is_resolve_attached=path.is_resolve_attached,
is_source_lookup=path.is_source_lookup,
+ is_udp_encap=path.is_udp_encap,
is_multipath=1 if len(self.paths) > 1 else 0)
self._test.registry.register(self, self._test.logger)
@@ -226,7 +232,9 @@ class VppIpRoute(VppObject):
table_id=self.table_id,
next_hop_table_id=path.nh_table_id,
next_hop_via_label=path.nh_via_label,
+ next_hop_id=path.next_hop_id,
is_add=0,
+ is_udp_encap=path.is_udp_encap,
is_ipv6=self.is_ip6)
def query_vpp_config(self):
diff --git a/test/vpp_papi_provider.py b/test/vpp_papi_provider.py
index 468cf83d57b..31d7ac48d1f 100644
--- a/test/vpp_papi_provider.py
+++ b/test/vpp_papi_provider.py
@@ -735,6 +735,7 @@ class VppPapiProvider(object):
next_hop_n_out_labels=0,
next_hop_out_label_stack=[],
next_hop_via_label=MPLS_LABEL_INVALID,
+ next_hop_id=0xFFFFFFFF,
is_resolve_host=0,
is_resolve_attached=0,
classify_table_index=0xFFFFFFFF,
@@ -747,6 +748,7 @@ class VppPapiProvider(object):
is_classify=0,
is_multipath=0,
is_l2_bridged=0,
+ is_udp_encap=0,
is_source_lookup=0):
"""
@@ -790,9 +792,11 @@ class VppPapiProvider(object):
'is_resolve_attached': is_resolve_attached,
'is_l2_bridged': is_l2_bridged,
'is_source_lookup': is_source_lookup,
+ 'is_udp_encap': is_udp_encap,
'next_hop_weight': next_hop_weight,
'dst_address_length': dst_address_length,
'dst_address': dst_address,
+ 'next_hop_id': next_hop_id,
'next_hop_address': next_hop_address,
'next_hop_n_out_labels': next_hop_n_out_labels,
'next_hop_via_label': next_hop_via_label,
@@ -980,6 +984,41 @@ class VppPapiProvider(object):
'outer_fib_id': outer_fib_id}
)
+ def udp_encap_add_del(self,
+ id,
+ src_ip,
+ dst_ip,
+ src_port,
+ dst_port,
+ table_id=0,
+ is_add=1,
+ is_ip6=0):
+ """ Add a GRE tunnel
+ :param id: user provided ID
+ :param src_ip:
+ :param dst_ip:
+ :param src_port:
+ :param dst_port:
+ :param outer_fib_id: (Default value = 0)
+ :param is_add: (Default value = 1)
+ :param is_ipv6: (Default value = 0)
+ """
+
+ return self.api(
+ self.papi.udp_encap_add_del,
+ {'id': id,
+ 'is_add': is_add,
+ 'is_ip6': is_ip6,
+ 'src_ip': src_ip,
+ 'dst_ip': dst_ip,
+ 'src_port': src_port,
+ 'dst_port': dst_port,
+ 'table_id': table_id}
+ )
+
+ def udp_encap_dump(self):
+ return self.api(self.papi.udp_encap_dump, {})
+
def mpls_fib_dump(self):
return self.api(self.papi.mpls_fib_dump, {})
diff --git a/test/vpp_udp_encap.py b/test/vpp_udp_encap.py
new file mode 100644
index 00000000000..56d23cc45dc
--- /dev/null
+++ b/test/vpp_udp_encap.py
@@ -0,0 +1,73 @@
+"""
+ UDP encap objects
+"""
+
+from vpp_object import *
+from socket import inet_pton, inet_ntop, AF_INET, AF_INET6
+
+
+def find_udp_encap(test, id):
+ encaps = test.vapi.udp_encap_dump()
+ for e in encaps:
+ if id == e.id:
+ return True
+ return False
+
+
+class VppUdpEncap(VppObject):
+
+ def __init__(self,
+ test,
+ id,
+ src_ip,
+ dst_ip,
+ src_port,
+ dst_port,
+ table_id=0,
+ is_ip6=0):
+ self._test = test
+ self.id = id
+ self.table_id = table_id
+ self.is_ip6 = is_ip6
+ self.src_ip_s = src_ip
+ self.dst_ip_s = dst_ip
+ if is_ip6:
+ self.src_ip = inet_pton(AF_INET6, src_ip)
+ self.dst_ip = inet_pton(AF_INET6, dst_ip)
+ else:
+ self.src_ip = inet_pton(AF_INET, src_ip)
+ self.dst_ip = inet_pton(AF_INET, dst_ip)
+ self.src_port = src_port
+ self.dst_port = dst_port
+
+ def add_vpp_config(self):
+ self._test.vapi.udp_encap_add_del(
+ self.id,
+ self.src_ip,
+ self.dst_ip,
+ self.src_port,
+ self.dst_port,
+ self.table_id,
+ is_ip6=self.is_ip6,
+ is_add=1)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.udp_encap_add_del(
+ self.id,
+ self.src_ip,
+ self.dst_ip,
+ self.src_port,
+ self.dst_port,
+ self.table_id,
+ is_ip6=self.is_ip6,
+ is_add=0)
+
+ def query_vpp_config(self):
+ return find_udp_encap(self._test, self.id)
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return ("udp-encap-%d" % self.id)