diff options
author | Florin Coras <fcoras@cisco.com> | 2016-07-22 01:45:30 +0200 |
---|---|---|
committer | Florin Coras <fcoras@cisco.com> | 2016-07-31 15:52:42 +0200 |
commit | 1a1adc7121e698743a64141373966041efc5f2e7 (patch) | |
tree | 1ebc8b57943f706ae2ac3268e631e8c07d43b3f2 /vnet/vnet/lisp-gpe/interface.c | |
parent | d038dbf4aa42781a3962326eb49235b04a95f416 (diff) |
Initial L2 LISP support
This introduces support for layer 2 overlays with LISP. Similarly to L3,
all tenant packets to be encapsulated are captured by an interface, but
the mapping (layer binding) instead of being between an L3 VRF and a
LISP VNI, it is between and an L2 bridge domain and a VNI. At a high
level, this results in two important properties:
1) the source and destinations of all packets flooded in the
bridge-domain are mapped via the LISP control plane and the replies are
converted into data-plane tunnels tracked via a LISP specific
source/dest L2 FIB
2) All packets reaching the interface and matching a source/dest L2 LISP
FIB entry are L3 (IP4/6) encapsulated.
This is solely a unicast feature, therefore at this time ARPs are not
handled in any special way.
Change-Id: I0b7badcd7c6d5166db07d4acd2cc4ae7fba3e18e
Signed-off-by: Florin Coras <fcoras@cisco.com>
Diffstat (limited to 'vnet/vnet/lisp-gpe/interface.c')
-rw-r--r-- | vnet/vnet/lisp-gpe/interface.c | 559 |
1 files changed, 476 insertions, 83 deletions
diff --git a/vnet/vnet/lisp-gpe/interface.c b/vnet/vnet/lisp-gpe/interface.c index 566fe7734ca..1f40ea86bbf 100644 --- a/vnet/vnet/lisp-gpe/interface.c +++ b/vnet/vnet/lisp-gpe/interface.c @@ -82,13 +82,15 @@ encap_one_inline (lisp_gpe_main_t * lgm, vlib_buffer_t * b0, { ip_udp_encap_one (lgm->vlib_main, b0, t0->rewrite, 36, 1); next0[0] = LISP_GPE_TX_NEXT_IP4_LOOKUP; - } else { ip_udp_encap_one (lgm->vlib_main, b0, t0->rewrite, 56, 0); next0[0] = LISP_GPE_TX_NEXT_IP6_LOOKUP; } + + /* Reset to look up tunnel partner in the configured FIB */ + vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index; } always_inline void @@ -144,6 +146,10 @@ encap_two_inline (lisp_gpe_main_t * lgm, vlib_buffer_t * b0, vlib_buffer_t * b1, ip_udp_encap_one (lgm->vlib_main, b1, t1->rewrite, 56, 0); next0[0] = next1[0] = LISP_GPE_TX_NEXT_IP6_LOOKUP; } + + /* Reset to look up tunnel partner in the configured FIB */ + vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index; + vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index; } #define is_v4_packet(_h) ((*(u8*) _h) & 0xF0) == 0x40 @@ -231,10 +237,6 @@ lisp_gpe_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node, ip_addr_version(&t1->dst) == IP4 ? 1 : 0); } - /* Reset to look up tunnel partner in the configured FIB */ - vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index; - vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index; - if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) { lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0, @@ -275,9 +277,6 @@ lisp_gpe_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node, encap_one_inline (lgm, b0, t0, &next0, ip_addr_version(&t0->dst) == IP4 ? 1 : 0); - /* Reset to look up tunnel partner in the configured FIB */ - vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index; - if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) { lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0, @@ -474,24 +473,380 @@ lisp_gpe_iface_set_table (u32 sw_if_index, u32 table_id, u8 is_ip4) } } -int -vnet_lisp_gpe_add_del_iface (vnet_lisp_gpe_add_del_iface_args_t * a, - u32 * hw_if_indexp) +#define foreach_l2_lisp_gpe_tx_next \ + _(DROP, "error-drop") \ + _(IP4_LOOKUP, "ip4-lookup") \ + _(IP6_LOOKUP, "ip6-lookup") \ + _(LISP_CP_LOOKUP, "lisp-cp-lookup") + +typedef enum +{ +#define _(sym,str) L2_LISP_GPE_TX_NEXT_##sym, + foreach_l2_lisp_gpe_tx_next +#undef _ + L2_LISP_GPE_TX_N_NEXT, +} l2_lisp_gpe_tx_next_t; + +typedef struct +{ + u32 tunnel_index; +} l2_lisp_gpe_tx_trace_t; + +u8 * +format_l2_lisp_gpe_tx_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + l2_lisp_gpe_tx_trace_t * t = va_arg (*args, l2_lisp_gpe_tx_trace_t *); + + s = format (s, "L2-LISP-GPE-TX: tunnel %d", t->tunnel_index); + return s; +} + +always_inline void +l2_process_tunnel_action (vlib_buffer_t * b0, u8 action, u32 * next0) { + if (LISP_SEND_MAP_REQUEST == action) + { + next0[0] = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP; + vnet_buffer(b0)->lisp.overlay_afi = LISP_AFI_MAC; + } + else + { + next0[0] = L2_LISP_GPE_TX_NEXT_DROP; + } +} + +always_inline void +l2_process_one (lisp_gpe_main_t * lgm, vlib_buffer_t * b0, u32 ti0, u32 * next0) +{ + lisp_gpe_tunnel_t * t0; + + t0 = pool_elt_at_index(lgm->tunnels, ti0); + ASSERT(0 != t0); + + if (PREDICT_TRUE(LISP_NO_ACTION == t0->action)) + { + encap_one_inline (lgm, b0, t0, next0, + ip_addr_version(&t0->dst) == IP4 ? 1 : 0); + } + else + { + l2_process_tunnel_action(b0, t0->action, next0); + } +} + +always_inline void +l2_process_two (lisp_gpe_main_t * lgm, vlib_buffer_t * b0, vlib_buffer_t * b1, + u32 ti0, u32 ti1, u32 * next0, u32 * next1) +{ + lisp_gpe_tunnel_t * t0, * t1; + + t0 = pool_elt_at_index(lgm->tunnels, ti0); + t1 = pool_elt_at_index(lgm->tunnels, ti1); + + ASSERT(0 != t0 && 0 != t1); + + if (PREDICT_TRUE(LISP_NO_ACTION == t0->action + && LISP_NO_ACTION == t1->action)) + { + encap_two_inline (lgm, b0, b1, t0, t1, next0, next1, + ip_addr_version(&t0->dst) == IP4 ? 1 : 0); + } + else + { + if (LISP_NO_ACTION == t0->action) + { + encap_one_inline (lgm, b0, t0, next0, + ip_addr_version(&t0->dst) == IP4 ? 1 : 0); + l2_process_tunnel_action (b1, t1->action, next1); + } + else if (LISP_NO_ACTION == t1->action) + { + encap_one_inline (lgm, b1, t1, next1, + ip_addr_version(&t1->dst) == IP4 ? 1 : 0); + l2_process_tunnel_action (b0, t0->action, next0); + } + else + { + l2_process_tunnel_action (b0, t0->action, next0); + l2_process_tunnel_action (b1, t1->action, next1); + } + } +} + +static uword +l2_lisp_gpe_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + u32 n_left_from, next_index, * from, * to_next; lisp_gpe_main_t * lgm = &lisp_gpe_main; - vnet_main_t * vnm = lgm->vnet_main; - vnet_hw_interface_t * hi; - u32 hw_if_index = ~0, lookup_next_index4, lookup_next_index6, flen; - uword * hip, * vni, * si; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, + to_next, n_left_to_next); + + while (n_left_from >= 4 && n_left_to_next >= 2) + { + u32 bi0, bi1; + vlib_buffer_t * b0, * b1; + u32 next0, next1, ti0, ti1; + lisp_gpe_tunnel_t * t0 = 0, * t1 = 0; + ethernet_header_t * e0, * e1; + + next0 = next1 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP; + + /* Prefetch next iteration. */ + { + vlib_buffer_t * p2, *p3; + + p2 = vlib_get_buffer (vm, from[2]); + p3 = vlib_get_buffer (vm, from[3]); + + vlib_prefetch_buffer_header(p2, LOAD); + vlib_prefetch_buffer_header(p3, LOAD); + + CLIB_PREFETCH(p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH(p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); + } + + bi0 = from[0]; + bi1 = from[1]; + to_next[0] = bi0; + to_next[1] = bi1; + from += 2; + to_next += 2; + n_left_to_next -= 2; + n_left_from -= 2; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + + e0 = vlib_buffer_get_current (b0); + e1 = vlib_buffer_get_current (b1); + + /* lookup dst + src mac */ + ti0 = lisp_l2_fib_lookup (lgm, vnet_buffer(b0)->l2.bd_index, + e0->src_address, e0->dst_address); + ti1 = lisp_l2_fib_lookup (lgm, vnet_buffer(b1)->l2.bd_index, + e1->src_address, e1->dst_address); + + if (PREDICT_TRUE((u32)~0 != ti0) && (u32)~0 != ti1) + { + /* process both tunnels */ + l2_process_two (lgm, b0, b1, ti0, ti1, &next0, &next1); + } + else + { + if ((u32)~0 != ti0) + { + /* process tunnel for b0 */ + l2_process_one (lgm, b0, ti0, &next0); + + /* no tunnel found for b1, send to control plane */ + next1 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP; + vnet_buffer(b1)->lisp.overlay_afi = LISP_AFI_MAC; + } + else if ((u32)~0 != ti1) + { + /* process tunnel for b1 */ + l2_process_one (lgm, b1, ti1, &next1); + + /* no tunnel found b0, send to control plane */ + next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP; + vnet_buffer(b0)->lisp.overlay_afi = LISP_AFI_MAC; + } + else + { + /* no tunnels found */ + next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP; + vnet_buffer(b0)->lisp.overlay_afi = LISP_AFI_MAC; + next1 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP; + vnet_buffer(b1)->lisp.overlay_afi = LISP_AFI_MAC; + } + } + + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + l2_lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0, + sizeof(*tr)); + tr->tunnel_index = t0 - lgm->tunnels; + } + if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) + { + l2_lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b1, + sizeof(*tr)); + tr->tunnel_index = t1 - lgm->tunnels; + } + + vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, + n_left_to_next, bi0, bi1, next0, + next1); + } + + while (n_left_from > 0 && n_left_to_next > 0) + { + vlib_buffer_t * b0; + u32 bi0, ti0, next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP; + ethernet_header_t * e0; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + e0 = vlib_buffer_get_current (b0); + + /* lookup dst + src mac */ + ti0 = lisp_l2_fib_lookup (lgm, vnet_buffer(b0)->l2.bd_index, + e0->src_address, e0->dst_address); + + if (PREDICT_TRUE((u32)~0 != ti0)) + { + l2_process_one (lgm, b0, ti0, &next0); + } + else + { + /* no tunnel found send to control plane */ + next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP; + vnet_buffer(b0)->lisp.overlay_afi = LISP_AFI_MAC; + } + + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + l2_lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0, + sizeof(*tr)); + tr->tunnel_index = ti0 ? ti0 : ~0; + } + vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + return from_frame->n_vectors; +} + +static u8 * +format_l2_lisp_gpe_name (u8 * s, va_list * args) +{ + u32 dev_instance = va_arg (*args, u32); + return format (s, "l2_lisp_gpe%d", dev_instance); +} + +VNET_DEVICE_CLASS (l2_lisp_gpe_device_class,static) = { + .name = "L2_LISP_GPE", + .format_device_name = format_l2_lisp_gpe_name, + .format_tx_trace = format_lisp_gpe_tx_trace, + .tx_function = l2_lisp_gpe_interface_tx, + .no_flatten_output_chains = 1, +}; + + +static vnet_hw_interface_t * +create_lisp_gpe_iface (lisp_gpe_main_t * lgm, u32 vni, u32 dp_table, + vnet_device_class_t * dev_class, + tunnel_lookup_t * tuns) +{ + u32 flen; + u32 hw_if_index = ~0; u8 * new_name; + vnet_hw_interface_t * hi; + vnet_main_t * vnm = lgm->vnet_main; - if (vnet_lisp_gpe_enable_disable_status() == 0) + /* create hw lisp_gpeX iface if needed, otherwise reuse existing */ + flen = vec_len(lgm->free_tunnel_hw_if_indices); + if (flen > 0) { - clib_warning ("LISP is disabled!"); - return VNET_API_ERROR_LISP_DISABLED; + hw_if_index = lgm->free_tunnel_hw_if_indices[flen - 1]; + _vec_len(lgm->free_tunnel_hw_if_indices) -= 1; + + hi = vnet_get_hw_interface (vnm, hw_if_index); + + /* rename interface */ + new_name = format (0, "%U", dev_class->format_device_name, + vni); + + vec_add1(new_name, 0); + vnet_rename_interface (vnm, hw_if_index, (char *) new_name); + vec_free(new_name); + + /* clear old stats of freed interface before reuse */ + vnet_interface_main_t * im = &vnm->interface_main; + vnet_interface_counter_lock (im); + vlib_zero_combined_counter ( + &im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_TX], + hi->sw_if_index); + vlib_zero_combined_counter ( + &im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_RX], + hi->sw_if_index); + vlib_zero_simple_counter ( + &im->sw_if_counters[VNET_INTERFACE_COUNTER_DROP], + hi->sw_if_index); + vnet_interface_counter_unlock (im); } + else + { + hw_if_index = vnet_register_interface (vnm, dev_class->index, vni, + lisp_gpe_hw_class.index, 0); + hi = vnet_get_hw_interface (vnm, hw_if_index); + } + + hash_set(tuns->hw_if_index_by_dp_table, dp_table, hw_if_index); - hip = hash_get(lgm->lisp_gpe_hw_if_index_by_table_id, a->table_id); + /* set tunnel termination: post decap, packets are tagged as having been + * originated by lisp-gpe interface */ + hash_set(tuns->sw_if_index_by_vni, vni, hi->sw_if_index); + hash_set(tuns->vni_by_sw_if_index, hi->sw_if_index, vni); + + return hi; +} + +static void +remove_lisp_gpe_iface (lisp_gpe_main_t * lgm, u32 hi_index, u32 dp_table, + tunnel_lookup_t * tuns) +{ + vnet_main_t * vnm = lgm->vnet_main; + vnet_hw_interface_t * hi; + uword * vnip; + + hi = vnet_get_hw_interface (vnm, hi_index); + + /* disable interface */ + vnet_sw_interface_set_flags (vnm, hi->sw_if_index, 0/* down */); + vnet_hw_interface_set_flags (vnm, hi->hw_if_index, 0/* down */); + hash_unset(tuns->hw_if_index_by_dp_table, dp_table); + vec_add1(lgm->free_tunnel_hw_if_indices, hi->hw_if_index); + + /* clean tunnel termination and vni to sw_if_index binding */ + vnip = hash_get(tuns->vni_by_sw_if_index, hi->sw_if_index); + hash_unset(tuns->sw_if_index_by_vni, vnip[0]); + hash_unset(tuns->vni_by_sw_if_index, hi->sw_if_index); +} + +static int +lisp_gpe_add_del_l3_iface (lisp_gpe_main_t * lgm, + vnet_lisp_gpe_add_del_iface_args_t * a) +{ + vnet_main_t * vnm = lgm->vnet_main; + tunnel_lookup_t * l3_ifaces = &lgm->l3_ifaces; + vnet_hw_interface_t * hi; + u32 lookup_next_index4, lookup_next_index6; + uword * hip, * si; + + hip = hash_get(l3_ifaces->hw_if_index_by_dp_table, a->table_id); if (a->is_add) { @@ -501,58 +856,16 @@ vnet_lisp_gpe_add_del_iface (vnet_lisp_gpe_add_del_iface_args_t * a, return -1; } - si = hash_get(lgm->tunnel_term_sw_if_index_by_vni, a->vni); + si = hash_get(l3_ifaces->sw_if_index_by_vni, a->vni); if (si) { clib_warning ("Interface for vni %d already exists", a->vni); return -1; } - /* create hw lisp_gpeX iface if needed, otherwise reuse existing */ - flen = vec_len(lgm->free_lisp_gpe_tunnel_hw_if_indices); - if (flen > 0) - { - hw_if_index = lgm->free_lisp_gpe_tunnel_hw_if_indices[flen - 1]; - _vec_len(lgm->free_lisp_gpe_tunnel_hw_if_indices) -= 1; - - hi = vnet_get_hw_interface (vnm, hw_if_index); - - /* rename interface */ - new_name = format (0, "%U", lisp_gpe_device_class.format_device_name, - a->vni); - vec_add1(new_name, 0); - vnet_rename_interface (vnm, hw_if_index, (char *) new_name); - vec_free(new_name); - - /* clear old stats of freed interface before reuse */ - vnet_interface_main_t * im = &vnm->interface_main; - vnet_interface_counter_lock (im); - vlib_zero_combined_counter ( - &im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_TX], - hi->sw_if_index); - vlib_zero_combined_counter ( - &im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_RX], - hi->sw_if_index); - vlib_zero_simple_counter ( - &im->sw_if_counters[VNET_INTERFACE_COUNTER_DROP], - hi->sw_if_index); - vnet_interface_counter_unlock (im); - } - else - { - hw_if_index = vnet_register_interface (vnm, - lisp_gpe_device_class.index, - a->vni, - lisp_gpe_hw_class.index, 0); - hi = vnet_get_hw_interface (vnm, hw_if_index); - } - - hash_set(lgm->lisp_gpe_hw_if_index_by_table_id, a->table_id, hw_if_index); - - /* set tunnel termination: post decap, packets are tagged as having been - * originated by lisp-gpe interface */ - hash_set(lgm->tunnel_term_sw_if_index_by_vni, a->vni, hi->sw_if_index); - hash_set(lgm->vni_by_tunnel_term_sw_if_index, hi->sw_if_index, a->vni); + /* create lisp iface and populate tunnel tables */ + hi = create_lisp_gpe_iface (lgm, a->vni, a->table_id, + &lisp_gpe_device_class, l3_ifaces); /* set ingress arc from lgpe_ipX_lookup */ lookup_next_index4 = vlib_node_add_next (lgm->vlib_main, @@ -593,18 +906,8 @@ vnet_lisp_gpe_add_del_iface (vnet_lisp_gpe_add_del_iface_args_t * a, clib_warning("The interface for vrf %d doesn't exist", a->table_id); return -1; } - hi = vnet_get_hw_interface (vnm, hip[0]); - - /* disable interface */ - vnet_sw_interface_set_flags (vnm, hi->sw_if_index, 0/* down */); - vnet_hw_interface_set_flags (vnm, hi->hw_if_index, 0/* down */); - hash_unset(lgm->lisp_gpe_hw_if_index_by_table_id, a->table_id); - vec_add1(lgm->free_lisp_gpe_tunnel_hw_if_indices, hi->hw_if_index); - /* clean tunnel termination and vni to sw_if_index binding */ - vni = hash_get(lgm->vni_by_tunnel_term_sw_if_index, hi->sw_if_index); - hash_unset(lgm->tunnel_term_sw_if_index_by_vni, vni[0]); - hash_unset(lgm->vni_by_tunnel_term_sw_if_index, hi->sw_if_index); + remove_lisp_gpe_iface (lgm, hip[0], a->table_id, &lgm->l3_ifaces); /* unset default routes */ add_del_lisp_gpe_default_route (a->table_id, /* is_v4 */1, 0); @@ -614,6 +917,86 @@ vnet_lisp_gpe_add_del_iface (vnet_lisp_gpe_add_del_iface_args_t * a, return 0; } +static int +lisp_gpe_add_del_l2_iface (lisp_gpe_main_t * lgm, + vnet_lisp_gpe_add_del_iface_args_t * a) +{ + vnet_main_t * vnm = lgm->vnet_main; + tunnel_lookup_t * l2_ifaces = &lgm->l2_ifaces; + vnet_hw_interface_t * hi; + uword * hip, * si; + u16 bd_index; + + bd_index = bd_find_or_add_bd_index(&bd_main, a->bd_id); + hip = hash_get(l2_ifaces->hw_if_index_by_dp_table, bd_index); + + if (a->is_add) + { + if (hip) + { + clib_warning("bridge domain %d already mapped to a vni", a->bd_id); + return -1; + } + + si = hash_get(l2_ifaces->sw_if_index_by_vni, a->vni); + if (si) + { + clib_warning ("Interface for vni %d already exists", a->vni); + return -1; + } + + /* create lisp iface and populate tunnel tables */ + hi = create_lisp_gpe_iface (lgm, a->vni, bd_index, + &l2_lisp_gpe_device_class, &lgm->l2_ifaces); + + /* add iface to l2 bridge domain */ + set_int_l2_mode (lgm->vlib_main, vnm, MODE_L2_BRIDGE, hi->sw_if_index, + bd_index, 0, 0, 0); + + /* set egress arcs */ +#define _(sym,str) vlib_node_add_named_next_with_slot (vnm->vlib_main, \ + hi->tx_node_index, str, L2_LISP_GPE_TX_NEXT_##sym); + foreach_l2_lisp_gpe_tx_next +#undef _ + + /* enable interface */ + vnet_sw_interface_set_flags (vnm, hi->sw_if_index, + VNET_SW_INTERFACE_FLAG_ADMIN_UP); + vnet_hw_interface_set_flags (vnm, hi->hw_if_index, + VNET_HW_INTERFACE_FLAG_LINK_UP); + } + else + { + if (hip == 0) + { + clib_warning("The interface for bridge domain %d doesn't exist", + a->bd_id); + return -1; + } + remove_lisp_gpe_iface (lgm, hip[0], bd_index, &lgm->l2_ifaces); + } + + return 0; +} + +int +vnet_lisp_gpe_add_del_iface (vnet_lisp_gpe_add_del_iface_args_t * a, + u32 * hw_if_indexp) +{ + lisp_gpe_main_t * lgm = &lisp_gpe_main; + + if (vnet_lisp_gpe_enable_disable_status() == 0) + { + clib_warning ("LISP is disabled!"); + return VNET_API_ERROR_LISP_DISABLED; + } + + if (!a->is_l2) + return lisp_gpe_add_del_l3_iface (lgm, a); + else + return lisp_gpe_add_del_l2_iface (lgm, a); +} + static clib_error_t * lisp_gpe_add_del_iface_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) @@ -622,8 +1005,8 @@ lisp_gpe_add_del_iface_command_fn (vlib_main_t * vm, unformat_input_t * input, u8 is_add = 1; clib_error_t * error = 0; int rv = 0; - u32 table_id, vni; - u8 vni_is_set = 0, vrf_is_set = 0; + u32 table_id, vni, bd_id; + u8 vni_is_set = 0, vrf_is_set = 0, bd_index_is_set = 0; vnet_lisp_gpe_add_del_iface_args_t _a, * a = &_a; @@ -645,6 +1028,10 @@ lisp_gpe_add_del_iface_command_fn (vlib_main_t * vm, unformat_input_t * input, { vni_is_set = 1; } + else if (unformat (line_input, "bd %d", &bd_id)) + { + bd_index_is_set = 1; + } else { return clib_error_return (0, "parse error: '%U'", @@ -652,14 +1039,20 @@ lisp_gpe_add_del_iface_command_fn (vlib_main_t * vm, unformat_input_t * input, } } - if (!vni_is_set || !vrf_is_set) - { - return clib_error_return (0, "Both vni and vrf must be set!"); - } + if (vrf_is_set && bd_index_is_set) + return clib_error_return(0, "Cannot set both vrf and brdige domain index!"); + + if (!vni_is_set) + return clib_error_return(0, "vni must be set!"); + + if (!vrf_is_set && !bd_index_is_set) + return clib_error_return(0, "vrf or bridge domain index must be set!"); a->is_add = is_add; - a->table_id = table_id; + a->dp_table = vrf_is_set ? table_id : bd_id; a->vni = vni; + a->is_l2 = bd_index_is_set; + rv = vnet_lisp_gpe_add_del_iface (a, 0); if (0 != rv) { |