diff options
author | Eyal Bari <ebari@cisco.com> | 2016-11-24 19:42:43 +0200 |
---|---|---|
committer | John Lo <loj@cisco.com> | 2016-11-30 16:31:32 +0000 |
commit | c5b136004543b9861a203af335d1ce61a976382d (patch) | |
tree | 5a4ad365f4086103afc65892a603a78be2ffbef1 | |
parent | aff7077d2031545a15efd12e7f65eac723799491 (diff) |
VXLAN multicast dst (remote) address support
Added support for multicast vxlan tunnels which are used for bridge domain flooding instead
of flooding the all unicast tunnels in the bridge domain.
features added:
* conditional flooding to some of the BD members - based on existance of multicast tunnel member
* added local multicast adjacency - multicast packets are handled as the same as unicast - based on src (unicast) address
* refactored some of vxlan tunnel creation code - to unify ip4/6 handling
Change-Id: I60cca4124265a8dd4f6b2d6ea8701e52e7c1baa4
Signed-off-by: Eyal Bari <ebari@cisco.com>
-rw-r--r-- | test/vpp_papi_provider.py | 6 | ||||
-rw-r--r-- | vnet/vnet/fib/fib_internal.h | 3 | ||||
-rw-r--r-- | vnet/vnet/fib/fib_path_ext.h | 2 | ||||
-rw-r--r-- | vnet/vnet/fib/fib_types.h | 7 | ||||
-rw-r--r-- | vnet/vnet/interface.c | 10 | ||||
-rw-r--r-- | vnet/vnet/interface.h | 11 | ||||
-rw-r--r-- | vnet/vnet/interface_cli.c | 1 | ||||
-rw-r--r-- | vnet/vnet/ip/ip4_packet.h | 13 | ||||
-rw-r--r-- | vnet/vnet/ip/ip6_packet.h | 17 | ||||
-rw-r--r-- | vnet/vnet/l2/l2_bd.c | 82 | ||||
-rw-r--r-- | vnet/vnet/l2/l2_bd.h | 11 | ||||
-rw-r--r-- | vnet/vnet/l2/l2_flood.c | 21 | ||||
-rw-r--r-- | vnet/vnet/l2/l2_input.c | 4 | ||||
-rw-r--r-- | vnet/vnet/lisp-gpe/lisp_gpe_sub_interface.c | 1 | ||||
-rw-r--r-- | vnet/vnet/vxlan/vxlan.c | 219 | ||||
-rw-r--r-- | vnet/vnet/vxlan/vxlan.h | 10 | ||||
-rw-r--r-- | vpp-api-test/vat/api_format.c | 112 | ||||
-rw-r--r-- | vpp/vpp-api/api.c | 34 | ||||
-rw-r--r-- | vpp/vpp-api/custom_dump.c | 29 | ||||
-rw-r--r-- | vpp/vpp-api/vpe.api | 2 |
20 files changed, 379 insertions, 216 deletions
diff --git a/test/vpp_papi_provider.py b/test/vpp_papi_provider.py index aec052c2..9db26d9f 100644 --- a/test/vpp_papi_provider.py +++ b/test/vpp_papi_provider.py @@ -185,6 +185,7 @@ class VppPapiProvider(object): self, src_addr, dst_addr, + mcast_sw_if_index=0xFFFFFFFF, is_add=1, is_ipv6=0, encap_vrf_id=0, @@ -198,12 +199,13 @@ class VppPapiProvider(object): :param is_ipv6: (Default value = 0) :param encap_vrf_id: (Default value = 0) :param decap_next_index: (Default value = 0xFFFFFFFF) + :param mcast_sw_if_index: (Default value = 0xFFFFFFFF) :param vni: (Default value = 0) """ return self.api(vpp_papi.vxlan_add_del_tunnel, - (is_add, is_ipv6, src_addr, dst_addr, encap_vrf_id, - decap_next_index, vni)) + (is_add, is_ipv6, src_addr, dst_addr, mcast_sw_if_index, + encap_vrf_id, decap_next_index, vni)) def bridge_domain_add_del(self, bd_id, flood=1, uu_flood=1, forward=1, learn=1, arp_term=0, is_add=1): diff --git a/vnet/vnet/fib/fib_internal.h b/vnet/vnet/fib/fib_internal.h index 26b349ee..a0238ac3 100644 --- a/vnet/vnet/fib/fib_internal.h +++ b/vnet/vnet/fib/fib_internal.h @@ -24,9 +24,6 @@ */ #undef FIB_DEBUG -extern void fib_prefix_from_ip46_addr (const ip46_address_t *addr, - fib_prefix_t *prf); - extern int fib_route_path_cmp(const fib_route_path_t *rpath1, const fib_route_path_t *rpath2); diff --git a/vnet/vnet/fib/fib_path_ext.h b/vnet/vnet/fib/fib_path_ext.h index 949b1e2b..6cb7f507 100644 --- a/vnet/vnet/fib/fib_path_ext.h +++ b/vnet/vnet/fib/fib_path_ext.h @@ -20,7 +20,7 @@ #include <vnet/fib/fib_types.h> /** - * A path extension is a per-entry addition to the forwarigind information + * A path extension is a per-entry addition to the forwarding information * when packets are sent for that entry over that path. * * For example: diff --git a/vnet/vnet/fib/fib_types.h b/vnet/vnet/fib/fib_types.h index 83123a51..92371e6b 100644 --- a/vnet/vnet/fib/fib_types.h +++ b/vnet/vnet/fib/fib_types.h @@ -206,6 +206,13 @@ extern int fib_prefix_is_cover(const fib_prefix_t *p1, */ extern int fib_prefix_is_host(const fib_prefix_t *p); + +/** + * \brief Host prefix from ip + */ +extern void fib_prefix_from_ip46_addr (const ip46_address_t *addr, + fib_prefix_t *pfx); + extern u8 * format_fib_prefix(u8 * s, va_list * args); extern u8 * format_fib_forw_chain_type(u8 * s, va_list * args); diff --git a/vnet/vnet/interface.c b/vnet/vnet/interface.c index 33827e2b..e552733e 100644 --- a/vnet/vnet/interface.c +++ b/vnet/vnet/interface.c @@ -712,11 +712,11 @@ vnet_register_interface (vnet_main_t * vnm, /* Make hardware interface point to software interface. */ { - vnet_sw_interface_t sw; - - memset (&sw, 0, sizeof (sw)); - sw.type = VNET_SW_INTERFACE_TYPE_HARDWARE; - sw.hw_if_index = hw_index; + vnet_sw_interface_t sw = { + .type = VNET_SW_INTERFACE_TYPE_HARDWARE, + .flood_class = VNET_FLOOD_CLASS_NORMAL, + .hw_if_index = hw_index + }; hw->sw_if_index = vnet_create_sw_interface_no_callbacks (vnm, &sw); } diff --git a/vnet/vnet/interface.h b/vnet/vnet/interface.h index 5ac7fbd5..412574d1 100644 --- a/vnet/vnet/interface.h +++ b/vnet/vnet/interface.h @@ -503,6 +503,15 @@ typedef struct } eth; } vnet_sub_interface_t; +typedef enum +{ + /* Always flood */ + VNET_FLOOD_CLASS_NORMAL, + VNET_FLOOD_CLASS_TUNNEL_MASTER, + /* Does not flood when tunnel master is in the same L2 BD */ + VNET_FLOOD_CLASS_TUNNEL_NORMAL +} vnet_flood_class_t; + /* Software-interface. This corresponds to a Ethernet VLAN, ATM vc, a tunnel, etc. Configuration (e.g. IP address) gets attached to software interface. */ @@ -545,6 +554,8 @@ typedef struct /* VNET_SW_INTERFACE_TYPE_SUB. */ vnet_sub_interface_t sub; }; + + vnet_flood_class_t flood_class; } vnet_sw_interface_t; typedef enum diff --git a/vnet/vnet/interface_cli.c b/vnet/vnet/interface_cli.c index cd7a620b..1c15eb18 100644 --- a/vnet/vnet/interface_cli.c +++ b/vnet/vnet/interface_cli.c @@ -697,6 +697,7 @@ create_sub_interfaces (vlib_main_t * vm, *kp = sup_and_sub_key; template.type = VNET_SW_INTERFACE_TYPE_SUB; + template.flood_class = VNET_FLOOD_CLASS_NORMAL; template.sup_sw_if_index = hi->sw_if_index; template.sub.id = id; if (id_min < id_max) diff --git a/vnet/vnet/ip/ip4_packet.h b/vnet/vnet/ip/ip4_packet.h index 78db01a1..277b968f 100644 --- a/vnet/vnet/ip/ip4_packet.h +++ b/vnet/vnet/ip/ip4_packet.h @@ -271,6 +271,19 @@ ip4_multicast_address_set_for_group (ip4_address_t * a, ip_multicast_group_t g) } always_inline void +ip4_multicast_ethernet_address (u8 * ethernet_address, ip4_address_t * a) +{ + u8 *d = a->as_u8; + + ethernet_address[0] = 0x01; + ethernet_address[1] = 0x00; + ethernet_address[2] = 0x5e; + ethernet_address[3] = d[1] & 0x7f; + ethernet_address[4] = d[2]; + ethernet_address[5] = d[3]; +} + +always_inline void ip4_tcp_reply_x1 (ip4_header_t * ip0, tcp_header_t * tcp0) { u32 src0, dst0; diff --git a/vnet/vnet/ip/ip6_packet.h b/vnet/vnet/ip/ip6_packet.h index 29fa4a4e..456c011a 100644 --- a/vnet/vnet/ip/ip6_packet.h +++ b/vnet/vnet/ip/ip6_packet.h @@ -64,6 +64,7 @@ typedef CLIB_PACKED (union { ip4_address_t ip4; }; ip6_address_t ip6; + u8 as_u8[16]; u64 as_u64[2]; }) ip46_address_t; #define ip46_address_is_ip4(ip46) (((ip46)->pad[0] | (ip46)->pad[1] | (ip46)->pad[2]) == 0) @@ -74,6 +75,15 @@ typedef CLIB_PACKED (union { #define ip46_address_is_zero(ip46) (((ip46)->as_u64[0] == 0) && ((ip46)->as_u64[1] == 0)) always_inline void +ip46_from_addr_buf(u32 is_ipv6, u8 *buf, ip46_address_t *ip) +{ + if (is_ipv6) + ip->ip6 = *((ip6_address_t *) buf); + else + ip46_address_set_ip4(ip, (ip4_address_t *) buf); +} + +always_inline void ip6_addr_fib_init (ip6_address_fib_t * addr_fib, ip6_address_t * address, u32 fib_index) { @@ -123,6 +133,13 @@ always_inline uword ip6_address_is_multicast (ip6_address_t * a) { return a->as_u8[0] == 0xff; } +always_inline uword +ip46_address_is_multicast (ip46_address_t * a) +{ + return ip46_address_is_ip4(a) ? ip4_address_is_multicast(&a->ip4) : + ip6_address_is_multicast(&a->ip6); +} + always_inline void ip6_set_reserved_multicast_address (ip6_address_t * a, ip6_multicast_address_scope_t scope, diff --git a/vnet/vnet/l2/l2_bd.c b/vnet/vnet/l2/l2_bd.c index 0b5656e6..e2ef6797 100644 --- a/vnet/vnet/l2/l2_bd.c +++ b/vnet/vnet/l2/l2_bd.c @@ -54,6 +54,9 @@ bd_validate (l2_bridge_domain_t * bd_config) bd_config->feature_bitmap = ~L2INPUT_FEAT_ARP_TERM; bd_config->bvi_sw_if_index = ~0; bd_config->members = 0; + bd_config->flood_count = 0; + bd_config->tun_master_count = 0; + bd_config->tun_normal_count = 0; bd_config->mac_by_ip4 = 0; bd_config->mac_by_ip6 = hash_create_mem (0, sizeof (ip6_address_t), sizeof (uword)); @@ -114,32 +117,49 @@ bd_delete_bd_index (bd_main_t * bdm, u32 bd_id) return 0; } +static void +update_flood_count (l2_bridge_domain_t * bd_config) +{ + bd_config->flood_count = vec_len (bd_config->members) - + (bd_config->tun_master_count ? bd_config->tun_normal_count : 0); +} + void bd_add_member (l2_bridge_domain_t * bd_config, l2_flood_member_t * member) { + u32 ix; + vnet_sw_interface_t *sw_if = vnet_get_sw_interface + (vnet_get_main (), member->sw_if_index); + /* * Add one element to the vector - * + * vector is ordered [ bvi, normal/tun_masters..., tun_normals... ] * When flooding, the bvi interface (if present) must be the last member * processed due to how BVI processing can change the packet. To enable * this order, we make the bvi interface the first in the vector and * flooding walks the vector in reverse. */ - if ((member->flags == L2_FLOOD_MEMBER_NORMAL) || - (vec_len (bd_config->members) == 0)) + switch (sw_if->flood_class) { - vec_add1 (bd_config->members, *member); - - } - else - { - /* Move 0th element to the end */ - vec_add1 (bd_config->members, bd_config->members[0]); - bd_config->members[0] = *member; - } + case VNET_FLOOD_CLASS_TUNNEL_MASTER: + bd_config->tun_master_count++; + /* Fall through */ + default: + /* Fall through */ + case VNET_FLOOD_CLASS_NORMAL: + ix = (member->flags & L2_FLOOD_MEMBER_BVI) ? 0 : + vec_len (bd_config->members) - bd_config->tun_normal_count; + break; + case VNET_FLOOD_CLASS_TUNNEL_NORMAL: + ix = vec_len (bd_config->members); + bd_config->tun_normal_count++; + break; + } + + vec_insert_elts (bd_config->members, member, 1, ix); + update_flood_count (bd_config); } - #define BD_REMOVE_ERROR_OK 0 #define BD_REMOVE_ERROR_NOT_FOUND 1 @@ -151,9 +171,22 @@ bd_remove_member (l2_bridge_domain_t * bd_config, u32 sw_if_index) /* Find and delete the member */ vec_foreach_index (ix, bd_config->members) { - if (vec_elt (bd_config->members, ix).sw_if_index == sw_if_index) + l2_flood_member_t *m = vec_elt_at_index (bd_config->members, ix); + if (m->sw_if_index == sw_if_index) { + vnet_sw_interface_t *sw_if = vnet_get_sw_interface + (vnet_get_main (), sw_if_index); + + if (sw_if->flood_class != VNET_FLOOD_CLASS_NORMAL) + { + if (sw_if->flood_class == VNET_FLOOD_CLASS_TUNNEL_MASTER) + bd_config->tun_master_count--; + else if (sw_if->flood_class == VNET_FLOOD_CLASS_TUNNEL_NORMAL) + bd_config->tun_normal_count--; + } vec_del1 (bd_config->members, ix); + update_flood_count (bd_config); + return BD_REMOVE_ERROR_OK; } } @@ -854,28 +887,27 @@ bd_show (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) if (detail || intf) { /* Show all member interfaces */ - - l2_flood_member_t *member; - u32 header = 0; - - vec_foreach (member, bd_config->members) + int i; + vec_foreach_index (i, bd_config->members) { + l2_flood_member_t *member = + vec_elt_at_index (bd_config->members, i); u32 vtr_opr, dot1q, tag1, tag2; - if (!header) + if (i == 0) { - header = 1; - vlib_cli_output (vm, "\n%=30s%=7s%=5s%=5s%=30s", + vlib_cli_output (vm, "\n%=30s%=7s%=5s%=5s%=9s%=30s", "Interface", "Index", "SHG", "BVI", - "VLAN-Tag-Rewrite"); + "TxFlood", "VLAN-Tag-Rewrite"); } l2vtr_get (vm, vnm, member->sw_if_index, &vtr_opr, &dot1q, &tag1, &tag2); - vlib_cli_output (vm, "%=30U%=7d%=5d%=5s%=30U", + vlib_cli_output (vm, "%=30U%=7d%=5d%=5s%=9s%=30U", format_vnet_sw_if_index_name, vnm, member->sw_if_index, member->sw_if_index, member->shg, member->flags & L2_FLOOD_MEMBER_BVI ? "*" : - "-", format_vtr, vtr_opr, dot1q, tag1, tag2); + "-", i < bd_config->flood_count ? "*" : "-", + format_vtr, vtr_opr, dot1q, tag1, tag2); } } diff --git a/vnet/vnet/l2/l2_bd.h b/vnet/vnet/l2/l2_bd.h index 2d7853eb..b9ee8236 100644 --- a/vnet/vnet/l2/l2_bd.h +++ b/vnet/vnet/l2/l2_bd.h @@ -67,9 +67,18 @@ typedef struct /* bridge domain id, not to be confused with bd_index */ u32 bd_id; - /* Vector of members in the replication group */ + /* Vector of member ports */ l2_flood_member_t *members; + /* First flood_count member ports are flooded */ + u32 flood_count; + + /* Tunnel Master (Multicast vxlan) are always flooded */ + u32 tun_master_count; + + /* Tunnels (Unicast vxlan) are flooded if there are no masters */ + u32 tun_normal_count; + /* hash ip4/ip6 -> mac for arp/nd termination */ uword *mac_by_ip4; uword *mac_by_ip6; diff --git a/vnet/vnet/l2/l2_flood.c b/vnet/vnet/l2/l2_flood.c index 5e9881bf..ed9e5ac2 100644 --- a/vnet/vnet/l2/l2_flood.c +++ b/vnet/vnet/l2/l2_flood.c @@ -160,7 +160,7 @@ l2flood_process (vlib_main_t * vm, members = bd_config->members; /* Find first member that passes the reflection and SHG checks */ - current_member = vec_len (members) - 1; + current_member = bd_config->flood_count - 1; while ((current_member >= 0) && ((members[current_member].sw_if_index == *sw_if_index0) || (in_shg && members[current_member].shg == in_shg))) @@ -247,16 +247,7 @@ l2flood_process (vlib_main_t * vm, } /* Forward packet to the current member */ - - if (PREDICT_TRUE (members[current_member].flags == L2_FLOOD_MEMBER_NORMAL)) - { - /* Do normal L2 forwarding */ - vnet_buffer (b0)->sw_if_index[VLIB_TX] = - members[current_member].sw_if_index; - *next0 = L2FLOOD_NEXT_L2_OUTPUT; - - } - else + if (PREDICT_FALSE (members[current_member].flags & L2_FLOOD_MEMBER_BVI)) { /* Do BVI processing */ u32 rc; @@ -280,6 +271,14 @@ l2flood_process (vlib_main_t * vm, } } } + else + { + /* Do normal L2 forwarding */ + vnet_buffer (b0)->sw_if_index[VLIB_TX] = + members[current_member].sw_if_index; + *next0 = L2FLOOD_NEXT_L2_OUTPUT; + + } } diff --git a/vnet/vnet/l2/l2_input.c b/vnet/vnet/l2/l2_input.c index 5d4a3761..babca1b2 100644 --- a/vnet/vnet/l2/l2_input.c +++ b/vnet/vnet/l2/l2_input.c @@ -550,9 +550,7 @@ set_int_l2_mode (vlib_main_t * vm, vnet_main_t * vnet_main, u32 mode, u32 sw_if_ u32 slot; hi = vnet_get_sup_hw_interface (vnet_main, sw_if_index); - - vec_validate (mp->configs, sw_if_index); - config = vec_elt_at_index (mp->configs, sw_if_index); + config = l2input_intf_config (sw_if_index); if (config->bridge) { diff --git a/vnet/vnet/lisp-gpe/lisp_gpe_sub_interface.c b/vnet/vnet/lisp-gpe/lisp_gpe_sub_interface.c index b7802ff2..5b69bd15 100644 --- a/vnet/vnet/lisp-gpe/lisp_gpe_sub_interface.c +++ b/vnet/vnet/lisp-gpe/lisp_gpe_sub_interface.c @@ -133,6 +133,7 @@ lisp_gpe_sub_interface_find_or_create_and_lock (const ip_address_t * lrloc, vnet_sw_interface_t sub_itf_template = { .type = VNET_SW_INTERFACE_TYPE_SUB, + .flood_class = VNET_FLOOD_CLASS_NORMAL, .sup_sw_if_index = main_sw_if_index, .sub.id = lisp_gpe_sub_interface_id++, }; diff --git a/vnet/vnet/vxlan/vxlan.c b/vnet/vnet/vxlan/vxlan.c index d37e9d68..3c6d7bd9 100644 --- a/vnet/vnet/vxlan/vxlan.c +++ b/vnet/vnet/vxlan/vxlan.c @@ -16,6 +16,7 @@ #include <vnet/ip/format.h> #include <vnet/fib/fib_entry.h> #include <vnet/fib/fib_table.h> +#include <vnet/dpo/receive_dpo.h> /** * @file @@ -97,6 +98,19 @@ VNET_HW_INTERFACE_CLASS (vxlan_hw_class) = { .build_rewrite = default_build_rewrite, }; +static void +vxlan_tunnel_restack_dpo(vxlan_tunnel_t * t) +{ + dpo_id_t dpo = DPO_INVALID; + u32 encap_index = ip46_address_is_ip4(&t->dst) ? + vxlan4_encap_node.index : vxlan6_encap_node.index; + fib_forward_chain_type_t forw_type = ip46_address_is_ip4(&t->dst) ? + FIB_FORW_CHAIN_TYPE_UNICAST_IP4 : FIB_FORW_CHAIN_TYPE_UNICAST_IP6; + + fib_entry_contribute_forwarding (t->fib_entry_index, forw_type, &dpo); + dpo_stack_from_node (encap_index, &t->next_dpo, &dpo); + dpo_reset(&dpo); +} static vxlan_tunnel_t * vxlan_tunnel_from_fib_node (fib_node_t *node) @@ -116,22 +130,7 @@ static fib_node_back_walk_rc_t vxlan_tunnel_back_walk (fib_node_t *node, fib_node_back_walk_ctx_t *ctx) { - vxlan_tunnel_t *t = vxlan_tunnel_from_fib_node(node); - dpo_id_t dpo = DPO_INVALID; - - if (ip46_address_is_ip4(&t->dst)) { - fib_entry_contribute_forwarding - (t->fib_entry_index, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, &dpo); - dpo_stack_from_node - (vxlan4_encap_node.index, &t->next_dpo, &dpo); - } else { - fib_entry_contribute_forwarding - (t->fib_entry_index, FIB_FORW_CHAIN_TYPE_UNICAST_IP6, &dpo); - dpo_stack_from_node - (vxlan6_encap_node.index, &t->next_dpo, &dpo); - } - dpo_reset(&dpo); - + vxlan_tunnel_restack_dpo(vxlan_tunnel_from_fib_node(node)); return (FIB_NODE_BACK_WALK_CONTINUE); } @@ -175,19 +174,10 @@ const static fib_node_vft_t vxlan_vft = { #define foreach_copy_field \ _(vni) \ -_(encap_fib_index) - -#define foreach_copy_ipv4 { \ - _(src.ip4.as_u32) \ - _(dst.ip4.as_u32) \ -} - -#define foreach_copy_ipv6 { \ - _(src.ip6.as_u64[0]) \ - _(src.ip6.as_u64[1]) \ - _(dst.ip6.as_u64[0]) \ - _(dst.ip6.as_u64[1]) \ -} +_(mcast_sw_if_index) \ +_(encap_fib_index) \ +_(src) \ +_(dst) static int vxlan4_rewrite (vxlan_tunnel_t * t) { @@ -297,8 +287,6 @@ int vnet_vxlan_add_del_tunnel /* copy from arg structure */ #define _(x) t->x = a->x; foreach_copy_field; - if (!is_ip6) foreach_copy_ipv4 - else foreach_copy_ipv6 #undef _ /* copy the key */ @@ -372,66 +360,59 @@ int vnet_vxlan_add_del_tunnel vnet_sw_interface_set_flags (vnm, sw_if_index, VNET_SW_INTERFACE_FLAG_ADMIN_UP); - /* - * source the FIB entry for the tunnel's destination - * and become a child thereof. The tunnel will then get poked - * when the forwarding for the entry updates, and the tunnel can - * re-stack accordingly - */ fib_node_init(&t->node, FIB_NODE_TYPE_VXLAN_TUNNEL); - if (!is_ip6) - { - dpo_id_t dpo = DPO_INVALID; - const fib_prefix_t tun_dst_pfx = - { - .fp_len = 32, - .fp_proto = FIB_PROTOCOL_IP4, - .fp_addr = - { - .ip4 = t->dst.ip4, - } - }; - - t->fib_entry_index = fib_table_entry_special_add - (t->encap_fib_index, &tun_dst_pfx, - FIB_SOURCE_RR, FIB_ENTRY_FLAG_NONE, ADJ_INDEX_INVALID); - t->sibling_index = fib_entry_child_add - (t->fib_entry_index, FIB_NODE_TYPE_VXLAN_TUNNEL, t - vxm->tunnels); - fib_entry_contribute_forwarding - (t->fib_entry_index, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, &dpo); - dpo_stack_from_node (vxlan4_encap_node.index, &t->next_dpo, &dpo); - dpo_reset(&dpo); - - /* Set vxlan tunnel output node to ip4 version */ - hi->output_node_index = vxlan4_encap_node.index; - } - else - { - dpo_id_t dpo = DPO_INVALID; - const fib_prefix_t tun_dst_pfx = - { - .fp_len = 128, - .fp_proto = FIB_PROTOCOL_IP6, - .fp_addr = - { - .ip6 = t->dst.ip6, - } - }; - - t->fib_entry_index = fib_table_entry_special_add - (t->encap_fib_index, &tun_dst_pfx, - FIB_SOURCE_RR, FIB_ENTRY_FLAG_NONE, ADJ_INDEX_INVALID); - t->sibling_index = fib_entry_child_add - (t->fib_entry_index, FIB_NODE_TYPE_VXLAN_TUNNEL, t - vxm->tunnels); - fib_entry_contribute_forwarding - (t->fib_entry_index, FIB_FORW_CHAIN_TYPE_UNICAST_IP6, &dpo); - dpo_stack_from_node - (vxlan6_encap_node.index, &t->next_dpo, &dpo); - dpo_reset(&dpo); - - /* Set vxlan tunnel output node to ip6 version */ - hi->output_node_index = vxlan6_encap_node.index; - } + fib_prefix_t tun_dst_pfx; + u32 encap_index = !is_ip6 ? + vxlan4_encap_node.index : vxlan6_encap_node.index; + vnet_flood_class_t flood_class = VNET_FLOOD_CLASS_TUNNEL_NORMAL; + + fib_prefix_from_ip46_addr(&t->dst, &tun_dst_pfx); + if (ip46_address_is_multicast(&t->dst)) + { + fib_protocol_t fp; + u8 mcast_mac[6]; + if (!is_ip6) { + ip4_multicast_ethernet_address(mcast_mac, &t->dst.ip4); + fp = FIB_PROTOCOL_IP4; + } else { + ip6_multicast_ethernet_address(mcast_mac, t->dst.ip6.as_u32[0]); + fp = FIB_PROTOCOL_IP6; + } + t->mcast_adj_index = adj_rewrite_add_and_lock + (fp, fib_proto_to_link(fp), t->mcast_sw_if_index, mcast_mac); + + flood_class = VNET_FLOOD_CLASS_TUNNEL_MASTER; + + /* Stack mcast dst mac addr rewrite on encap */ + dpo_proto_t dproto = fib_proto_to_dpo(fp); + dpo_id_t dpo = DPO_INVALID; + + dpo_set (&dpo, DPO_ADJACENCY, dproto, t->mcast_adj_index); + dpo_stack_from_node (encap_index, &t->next_dpo, &dpo); + dpo_reset(&dpo); + + /* Add local mcast adj. */ + receive_dpo_add_or_lock(dproto, ~0, NULL, &dpo); + t->fib_entry_index = fib_table_entry_special_dpo_add + (t->encap_fib_index, &tun_dst_pfx, FIB_SOURCE_SPECIAL, FIB_ENTRY_FLAG_NONE, &dpo); + dpo_reset(&dpo); + } else { + /* + * source the FIB entry for the tunnel's destination + * and become a child thereof. The tunnel will then get poked + * when the forwarding for the entry updates, and the tunnel can + * re-stack accordingly + */ + t->fib_entry_index = fib_table_entry_special_add + (t->encap_fib_index, &tun_dst_pfx, FIB_SOURCE_RR, FIB_ENTRY_FLAG_NONE, ADJ_INDEX_INVALID); + t->sibling_index = fib_entry_child_add + (t->fib_entry_index, FIB_NODE_TYPE_VXLAN_TUNNEL, t - vxm->tunnels); + vxlan_tunnel_restack_dpo(t); + } + /* Set vxlan tunnel output node */ + hi->output_node_index = encap_index; + + vnet_get_sw_interface (vnet_get_main(), sw_if_index)->flood_class = flood_class; } else { @@ -448,8 +429,16 @@ int vnet_vxlan_add_del_tunnel vxm->tunnel_index_by_sw_if_index[t->sw_if_index] = ~0; - fib_entry_child_remove(t->fib_entry_index, t->sibling_index); - fib_table_entry_delete_index(t->fib_entry_index, FIB_SOURCE_RR); + if (ip46_address_is_multicast(&t->dst)) + { + adj_unlock(t->mcast_adj_index); + fib_table_entry_delete_index(t->fib_entry_index, FIB_SOURCE_SPECIAL); + } + else + { + fib_entry_child_remove(t->fib_entry_index, t->sibling_index); + fib_table_entry_delete_index(t->fib_entry_index, FIB_SOURCE_RR); + } fib_node_deinit(&t->node); if (!is_ip6) @@ -515,20 +504,26 @@ vxlan_add_del_tunnel_command_fn (vlib_main_t * vm, vlib_cli_command_t * cmd) { unformat_input_t _line_input, * line_input = &_line_input; - ip46_address_t src, dst; + ip46_address_t src , dst; u8 is_add = 1; u8 src_set = 0; u8 dst_set = 0; + u8 grp_set = 0; u8 ipv4_set = 0; u8 ipv6_set = 0; u32 encap_fib_index = 0; + u32 mcast_sw_if_index = ~0; u32 decap_next_index = ~0; u32 vni = 0; u32 tmp; int rv; vnet_vxlan_add_del_tunnel_args_t _a, * a = &_a; - u32 sw_if_index; - + u32 tunnel_sw_if_index; + + /* Cant "universally zero init" (={0}) due to GCC bug 53119 */ + memset(&src, 0, sizeof src); + memset(&dst, 0, sizeof dst); + /* Get a line of input. */ if (! unformat_user (input, unformat_line_input, line_input)) return 0; @@ -562,6 +557,22 @@ vxlan_add_del_tunnel_command_fn (vlib_main_t * vm, dst_set = 1; ipv6_set = 1; } + else if (unformat (line_input, "group %U %U", + unformat_ip4_address, &dst.ip4, + unformat_vnet_sw_interface, + vnet_get_main(), &mcast_sw_if_index)) + { + grp_set = dst_set = 1; + ipv4_set = 1; + } + else if (unformat (line_input, "group %U %U", + unformat_ip6_address, &dst.ip6, + unformat_vnet_sw_interface, + vnet_get_main(), &mcast_sw_if_index)) + { + grp_set = dst_set = 1; + ipv6_set = 1; + } else if (unformat (line_input, "encap-vrf-id %d", &tmp)) { if (ipv6_set) @@ -592,11 +603,16 @@ vxlan_add_del_tunnel_command_fn (vlib_main_t * vm, if (dst_set == 0) return clib_error_return (0, "tunnel dst address not specified"); + if (grp_set && !ip46_address_is_multicast(&dst)) + return clib_error_return (0, "tunnel group address not multicast"); + + if (grp_set && mcast_sw_if_index == ~0) + return clib_error_return (0, "tunnel nonexistent multicast device"); + if (ipv4_set && ipv6_set) return clib_error_return (0, "both IPv4 and IPv6 addresses specified"); - if ((ipv4_set && memcmp(&src.ip4, &dst.ip4, sizeof(src.ip4)) == 0) || - (ipv6_set && memcmp(&src.ip6, &dst.ip6, sizeof(src.ip6)) == 0)) + if (ip46_address_cmp(&src, &dst) == 0) return clib_error_return (0, "src and dst addresses are identical"); if (vni == 0) @@ -609,18 +625,16 @@ vxlan_add_del_tunnel_command_fn (vlib_main_t * vm, #define _(x) a->x = x; foreach_copy_field; - if (ipv4_set) foreach_copy_ipv4 - else foreach_copy_ipv6 #undef _ - rv = vnet_vxlan_add_del_tunnel (a, &sw_if_index); + rv = vnet_vxlan_add_del_tunnel (a, &tunnel_sw_if_index); switch(rv) { case 0: if (is_add) vlib_cli_output(vm, "%U\n", format_vnet_sw_if_index_name, - vnet_get_main(), sw_if_index); + vnet_get_main(), tunnel_sw_if_index); break; case VNET_API_ERROR_TUNNEL_EXIST: @@ -661,7 +675,8 @@ vxlan_add_del_tunnel_command_fn (vlib_main_t * vm, VLIB_CLI_COMMAND (create_vxlan_tunnel_command, static) = { .path = "create vxlan tunnel", .short_help = - "create vxlan tunnel src <local-vtep-addr> dst <remote-vtep-addr> vni <nn>" + "create vxlan tunnel src <local-vtep-addr>" + " {dst <remote-vtep-addr>|group <mcast-vtep-addr> <intf-name>} vni <nn>" " [encap-vrf-id <nn>]", .function = vxlan_add_del_tunnel_command_fn, }; diff --git a/vnet/vnet/vxlan/vxlan.h b/vnet/vnet/vxlan/vxlan.h index e37f09aa..28ce2a93 100644 --- a/vnet/vnet/vxlan/vxlan.h +++ b/vnet/vnet/vxlan/vxlan.h @@ -28,6 +28,7 @@ #include <vnet/ip/ip6_packet.h> #include <vnet/ip/udp.h> #include <vnet/dpo/dpo.h> +#include <vnet/adj/adj_types.h> typedef CLIB_PACKED (struct { ip4_header_t ip4; /* 20 bytes */ @@ -84,6 +85,8 @@ typedef struct { ip46_address_t src; ip46_address_t dst; + u32 mcast_sw_if_index; + /* The FIB index for src/dst addresses */ u32 encap_fib_index; @@ -96,8 +99,12 @@ typedef struct { */ fib_node_t node; - /* The FIB entry sourced by the tunnel for its destination prefix */ + /* + * The FIB entry for (depending on VXLAN tunnel is unicast or mcast) + * sending unicast VXLAN encap packets or receiving mcast VXLAN packets + */ fib_node_index_t fib_entry_index; + adj_index_t mcast_adj_index; /** * The tunnel is a child of the FIB entry for its desintion. This is @@ -161,6 +168,7 @@ typedef struct { * structure, this seems less of abreaking change */ u8 is_ip6; ip46_address_t src, dst; + u32 mcast_sw_if_index; u32 encap_fib_index; u32 decap_next_index; u32 vni; diff --git a/vpp-api-test/vat/api_format.c b/vpp-api-test/vat/api_format.c index 9b02f1e4..8522428e 100644 --- a/vpp-api-test/vat/api_format.c +++ b/vpp-api-test/vat/api_format.c @@ -10082,40 +10082,78 @@ api_vxlan_add_del_tunnel (vat_main_t * vam) unformat_input_t *line_input = vam->input; vl_api_vxlan_add_del_tunnel_t *mp; f64 timeout; - ip4_address_t src4, dst4; - ip6_address_t src6, dst6; + ip46_address_t src, dst; u8 is_add = 1; u8 ipv4_set = 0, ipv6_set = 0; u8 src_set = 0; u8 dst_set = 0; + u8 grp_set = 0; + u32 mcast_sw_if_index = ~0; u32 encap_vrf_id = 0; u32 decap_next_index = ~0; u32 vni = 0; + /* Can't "universally zero init" (={0}) due to GCC bug 53119 */ + memset (&src, 0, sizeof src); + memset (&dst, 0, sizeof dst); + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { if (unformat (line_input, "del")) is_add = 0; - else if (unformat (line_input, "src %U", unformat_ip4_address, &src4)) + else + if (unformat (line_input, "src %U", unformat_ip4_address, &src.ip4)) { ipv4_set = 1; src_set = 1; } - else if (unformat (line_input, "dst %U", unformat_ip4_address, &dst4)) + else + if (unformat (line_input, "dst %U", unformat_ip4_address, &dst.ip4)) { ipv4_set = 1; dst_set = 1; } - else if (unformat (line_input, "src %U", unformat_ip6_address, &src6)) + else + if (unformat (line_input, "src %U", unformat_ip6_address, &src.ip6)) { ipv6_set = 1; src_set = 1; } - else if (unformat (line_input, "dst %U", unformat_ip6_address, &dst6)) + else + if (unformat (line_input, "dst %U", unformat_ip6_address, &dst.ip6)) { ipv6_set = 1; dst_set = 1; } + else if (unformat (line_input, "group %U %U", + unformat_ip4_address, &dst.ip4, + unformat_sw_if_index, vam, &mcast_sw_if_index)) + { + grp_set = dst_set = 1; + ipv4_set = 1; + } + else if (unformat (line_input, "group %U", + unformat_ip4_address, &dst.ip4)) + { + grp_set = dst_set = 1; + ipv4_set = 1; + } + else if (unformat (line_input, "group %U %U", + unformat_ip6_address, &dst.ip6, + unformat_sw_if_index, vam, &mcast_sw_if_index)) + { + grp_set = dst_set = 1; + ipv6_set = 1; + } + else if (unformat (line_input, "group %U", + unformat_ip6_address, &dst.ip6)) + { + grp_set = dst_set = 1; + ipv6_set = 1; + } + else + if (unformat (line_input, "mcast_sw_if_index %u", &mcast_sw_if_index)) + ; else if (unformat (line_input, "encap-vrf-id %d", &encap_vrf_id)) ; else if (unformat (line_input, "decap-next %U", @@ -10141,6 +10179,18 @@ api_vxlan_add_del_tunnel (vat_main_t * vam) return -99; } + if (grp_set && !ip46_address_is_multicast (&dst)) + { + errmsg ("tunnel group address not multicast\n"); + return -99; + } + if (grp_set && mcast_sw_if_index == ~0) + { + errmsg ("tunnel nonexistent multicast device\n"); + return -99; + } + + if (ipv4_set && ipv6_set) { errmsg ("both IPv4 and IPv6 addresses specified"); @@ -10157,16 +10207,17 @@ api_vxlan_add_del_tunnel (vat_main_t * vam) if (ipv6_set) { - clib_memcpy (&mp->src_address, &src6, sizeof (src6)); - clib_memcpy (&mp->dst_address, &dst6, sizeof (dst6)); + clib_memcpy (mp->src_address, &src.ip6, sizeof (src.ip6)); + clib_memcpy (mp->dst_address, &dst.ip6, sizeof (dst.ip6)); } else { - clib_memcpy (&mp->src_address, &src4, sizeof (src4)); - clib_memcpy (&mp->dst_address, &dst4, sizeof (dst4)); + clib_memcpy (mp->src_address, &src.ip4, sizeof (src.ip4)); + clib_memcpy (mp->dst_address, &dst.ip4, sizeof (dst.ip4)); } mp->encap_vrf_id = ntohl (encap_vrf_id); mp->decap_next_index = ntohl (decap_next_index); + mp->mcast_sw_if_index = ntohl (mcast_sw_if_index); mp->vni = ntohl (vni); mp->is_add = is_add; mp->is_ipv6 = ipv6_set; @@ -10181,15 +10232,18 @@ static void vl_api_vxlan_tunnel_details_t_handler (vl_api_vxlan_tunnel_details_t * mp) { vat_main_t *vam = &vat_main; + ip46_address_t src, dst; + + ip46_from_addr_buf (mp->is_ipv6, mp->src_address, &src); + ip46_from_addr_buf (mp->is_ipv6, mp->dst_address, &dst); - fformat (vam->ofp, "%11d%24U%24U%14d%18d%13d\n", + fformat (vam->ofp, "%11d%24U%24U%14d%18d%13d%19d\n", ntohl (mp->sw_if_index), - format_ip46_address, &(mp->src_address[0]), - IP46_TYPE_ANY, - format_ip46_address, &(mp->dst_address[0]), - IP46_TYPE_ANY, + format_ip46_address, &src, IP46_TYPE_ANY, + format_ip46_address, &dst, IP46_TYPE_ANY, ntohl (mp->encap_vrf_id), - ntohl (mp->decap_next_index), ntohl (mp->vni)); + ntohl (mp->decap_next_index), ntohl (mp->vni), + ntohl (mp->mcast_sw_if_index)); } static void vl_api_vxlan_tunnel_details_t_handler_json @@ -10197,8 +10251,6 @@ static void vl_api_vxlan_tunnel_details_t_handler_json { vat_main_t *vam = &vat_main; vat_json_node_t *node = NULL; - struct in_addr ip4; - struct in6_addr ip6; if (VAT_JSON_ARRAY != vam->json_tree.type) { @@ -10211,16 +10263,20 @@ static void vl_api_vxlan_tunnel_details_t_handler_json vat_json_object_add_uint (node, "sw_if_index", ntohl (mp->sw_if_index)); if (mp->is_ipv6) { - clib_memcpy (&ip6, &(mp->src_address[0]), sizeof (ip6)); + struct in6_addr ip6; + + clib_memcpy (&ip6, mp->src_address, sizeof (ip6)); vat_json_object_add_ip6 (node, "src_address", ip6); - clib_memcpy (&ip6, &(mp->dst_address[0]), sizeof (ip6)); + clib_memcpy (&ip6, mp->dst_address, sizeof (ip6)); vat_json_object_add_ip6 (node, "dst_address", ip6); } else { - clib_memcpy (&ip4, &(mp->src_address[0]), sizeof (ip4)); + struct in_addr ip4; + + clib_memcpy (&ip4, mp->src_address, sizeof (ip4)); vat_json_object_add_ip4 (node, "src_address", ip4); - clib_memcpy (&ip4, &(mp->dst_address[0]), sizeof (ip4)); + clib_memcpy (&ip4, mp->dst_address, sizeof (ip4)); vat_json_object_add_ip4 (node, "dst_address", ip4); } vat_json_object_add_uint (node, "encap_vrf_id", ntohl (mp->encap_vrf_id)); @@ -10228,6 +10284,8 @@ static void vl_api_vxlan_tunnel_details_t_handler_json ntohl (mp->decap_next_index)); vat_json_object_add_uint (node, "vni", ntohl (mp->vni)); vat_json_object_add_uint (node, "is_ipv6", mp->is_ipv6 ? 1 : 0); + vat_json_object_add_uint (node, "mcast_sw_if_index", + ntohl (mp->mcast_sw_if_index)); } static int @@ -10255,9 +10313,10 @@ api_vxlan_tunnel_dump (vat_main_t * vam) if (!vam->json_output) { - fformat (vam->ofp, "%11s%24s%24s%14s%18s%13s\n", + fformat (vam->ofp, "%11s%24s%24s%14s%18s%13s%19s\n", "sw_if_index", "src_address", "dst_address", - "encap_vrf_id", "decap_next_index", "vni"); + "encap_vrf_id", "decap_next_index", "vni", + "mcast_sw_if_index"); } /* Get list of vxlan-tunnel interfaces */ @@ -16772,8 +16831,9 @@ _(l2tpv3_set_lookup_key, \ "lookup_v6_src | lookup_v6_dst | lookup_session_id") \ _(sw_if_l2tpv3_tunnel_dump, "") \ _(vxlan_add_del_tunnel, \ - "src <ip-addr> dst <ip-addr> vni <vni> [encap-vrf-id <nn>]\n" \ - " [decap-next l2|ip4|ip6] [del]") \ + "src <ip-addr> { dst <ip-addr> | group <mcast-ip-addr>\n" \ + "{ <intfc> | mcast_sw_if_index <nn> } }\n" \ + "vni <vni> [encap-vrf-id <nn>] [decap-next l2|ip4|ip6] [del]") \ _(vxlan_tunnel_dump, "[<intfc> | sw_if_index <nn>]") \ _(gre_add_del_tunnel, \ "src <ip4-addr> dst <ip4-addr> [outer-fib-id <nn>] [teb] [del]\n") \ diff --git a/vpp/vpp-api/api.c b/vpp/vpp-api/api.c index bbe1963a..8f0165a1 100644 --- a/vpp/vpp-api/api.c +++ b/vpp/vpp-api/api.c @@ -4679,31 +4679,22 @@ static void vl_api_vxlan_add_del_tunnel_t_handler goto out; } encap_fib_index = p[0]; - - /* Check src & dst are different */ - if ((mp->is_ipv6 && memcmp (mp->src_address, mp->dst_address, 16) == 0) || - (!mp->is_ipv6 && memcmp (mp->src_address, mp->dst_address, 4) == 0)) - { - rv = VNET_API_ERROR_SAME_SRC_DST; - goto out; - } memset (a, 0, sizeof (*a)); a->is_add = mp->is_add; a->is_ip6 = mp->is_ipv6; /* ip addresses sent in network byte order */ - if (a->is_ip6) - { - memcpy (&(a->src.ip6), mp->src_address, 16); - memcpy (&(a->dst.ip6), mp->dst_address, 16); - } - else + ip46_from_addr_buf (mp->is_ipv6, mp->dst_address, &a->dst); + ip46_from_addr_buf (mp->is_ipv6, mp->src_address, &a->src); + + /* Check src & dst are different */ + if (ip46_address_cmp (&a->dst, &a->src) == 0) { - memcpy (&(a->src.ip4), mp->src_address, 4); - memcpy (&(a->dst.ip4), mp->dst_address, 4); + rv = VNET_API_ERROR_SAME_SRC_DST; + goto out; } - + a->mcast_sw_if_index = ntohl (mp->mcast_sw_if_index); a->encap_fib_index = encap_fib_index; a->decap_next_index = ntohl (mp->decap_next_index); a->vni = ntohl (mp->vni); @@ -4731,16 +4722,17 @@ static void send_vxlan_tunnel_details rmp->_vl_msg_id = ntohs (VL_API_VXLAN_TUNNEL_DETAILS); if (is_ipv6) { - memcpy (rmp->src_address, &(t->src.ip6), 16); - memcpy (rmp->dst_address, &(t->dst.ip6), 16); + memcpy (rmp->src_address, t->src.ip6.as_u8, 16); + memcpy (rmp->dst_address, t->dst.ip6.as_u8, 16); rmp->encap_vrf_id = htonl (im6->fibs[t->encap_fib_index].ft_table_id); } else { - memcpy (rmp->src_address, &(t->src.ip4), 4); - memcpy (rmp->dst_address, &(t->dst.ip4), 4); + memcpy (rmp->src_address, t->src.ip4.as_u8, 4); + memcpy (rmp->dst_address, t->dst.ip4.as_u8, 4); rmp->encap_vrf_id = htonl (im4->fibs[t->encap_fib_index].ft_table_id); } + rmp->mcast_sw_if_index = htonl (t->mcast_sw_if_index); rmp->vni = htonl (t->vni); /* decap_next_index is deprecated, hard code to l2-input */ rmp->decap_next_index = htonl (VXLAN_INPUT_NEXT_L2_INPUT); diff --git a/vpp/vpp-api/custom_dump.c b/vpp/vpp-api/custom_dump.c index 6c181eec..2a9fd741 100644 --- a/vpp/vpp-api/custom_dump.c +++ b/vpp/vpp-api/custom_dump.c @@ -1410,23 +1410,22 @@ static void *vl_api_vxlan_add_del_tunnel_t_print (vl_api_vxlan_add_del_tunnel_t * mp, void *handle) { u8 *s; - s = format (0, "SCRIPT: vxlan_add_del_tunnel "); - if (mp->is_ipv6) - { - s = format (s, "src %U ", format_ip6_address, - (ip6_address_t *) mp->src_address); - s = format (s, "dst %U ", format_ip6_address, - (ip6_address_t *) mp->dst_address); - } - else - { - s = format (s, "src %U ", format_ip4_address, - (ip4_address_t *) mp->src_address); - s = format (s, "dst %U ", format_ip4_address, - (ip4_address_t *) mp->dst_address); - } + ip46_address_t src, dst; + + ip46_from_addr_buf (mp->is_ipv6, mp->dst_address, &dst); + ip46_from_addr_buf (mp->is_ipv6, mp->src_address, &src); + + u8 is_grp = ip46_address_is_multicast (&dst); + char *dst_name = is_grp ? "group" : "dst"; + + s = format (s, "src %U ", format_ip46_address, &src, IP46_TYPE_ANY); + s = format (s, "%s %U ", dst_name, format_ip46_address, + &dst, IP46_TYPE_ANY); + + if (is_grp) + s = format (s, "mcast_sw_if_index %d ", ntohl (mp->mcast_sw_if_index)); if (mp->encap_vrf_id) s = format (s, "encap-vrf-id %d ", ntohl (mp->encap_vrf_id)); diff --git a/vpp/vpp-api/vpe.api b/vpp/vpp-api/vpe.api index 0e2f2292..69648afc 100644 --- a/vpp/vpp-api/vpe.api +++ b/vpp/vpp-api/vpe.api @@ -2126,6 +2126,7 @@ define vxlan_add_del_tunnel u8 is_ipv6; u8 src_address[16]; u8 dst_address[16]; + u32 mcast_sw_if_index; u32 encap_vrf_id; u32 decap_next_index; u32 vni; @@ -2151,6 +2152,7 @@ define vxlan_tunnel_details u32 sw_if_index; u8 src_address[16]; u8 dst_address[16]; + u32 mcast_sw_if_index; u32 encap_vrf_id; u32 decap_next_index; u32 vni; |