From 521a8d7df423a0b5aaf259d49ca9230705bc25ee Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Thu, 6 Dec 2018 13:46:49 +0000 Subject: FIB recusrion loop checks traverse midchain adjacencies if a tunnel's destination address is reachable through the tunnel (see example config belwo) then search for and detect a recursion loop and don't stack the adjacency. Otherwise this results in a nasty surprise. DBGvpp# loop cre DBGvpp# set int state loop0 up DBGvpp# set int ip addr loop0 10.0.0.1/24 DBGvpp# create gre tunnel src 10.0.0.1 dst 1.1.1.1 DBGvpp# set int state gre0 up DBGvpp# set int unnum gre0 use loop0 DBGvpp# ip route 1.1.1.1/32 via gre0 DBGvpp# sh ip fib 1.1.1.1 ipv4-VRF:0, fib_index:0, flow hash:[src dst sport dport proto ] locks:[src:plugin-hi:2, src:default-route:1, ] 1.1.1.1/32 fib:0 index:11 locks:4 <<< this is entry #11 src:CLI refs:1 entry-flags:attached, src-flags:added,contributing,active, path-list:[14] locks:2 flags:shared,looped, uPRF-list:12 len:1 itfs:[2, ] path:[14] pl-index:14 ip4 weight=1 pref=0 attached-nexthop: oper-flags:recursive-loop,resolved, cfg-flags:attached, 1.1.1.1 gre0 (p2p) [@0]: ipv4 via 0.0.0.0 gre0: mtu:9000 4500000000000000fe2fb0cc0a0000010101010100000800 stacked-on entry:11: <<<< and the midchain forwards via entry #11 [@2]: dpo-drop ip4 src:recursive-resolution refs:1 src-flags:added, cover:-1 forwarding: unicast-ip4-chain [@0]: dpo-load-balance: [proto:ip4 index:13 buckets:1 uRPF:12 to:[0:0]] [0] [@6]: ipv4 via 0.0.0.0 gre0: mtu:9000 4500000000000000fe2fb0cc0a0000010101010100000800 stacked-on entry:11: [@2]: dpo-drop ip4 DBGvpp# sh adj 1 [@1] ipv4 via 0.0.0.0 gre0: mtu:9000 4500000000000000fe2fb0cc0a0000010101010100000800 stacked-on entry:11: [@2]: dpo-drop ip4 flags:midchain-ip-stack midchain-looped <<<<< this is a loop counts:[0:0] locks:4 delegates: children: {path:14} Change-Id: I39b82bd1ea439be4611c88b130d40289fa0c1b59 Signed-off-by: Neale Ranns --- src/vnet/ipip/ipip.c | 65 ++++++++++++++------------------------------------- src/vnet/ipip/sixrd.c | 6 ++--- 2 files changed, 20 insertions(+), 51 deletions(-) (limited to 'src/vnet/ipip') diff --git a/src/vnet/ipip/ipip.c b/src/vnet/ipip/ipip.c index 9c58e520623..a5e46c41d6c 100644 --- a/src/vnet/ipip/ipip.c +++ b/src/vnet/ipip/ipip.c @@ -186,46 +186,15 @@ ipip_tunnel_stack (adj_index_t ai) VNET_HW_INTERFACE_FLAG_LINK_UP) == 0) { adj_nbr_midchain_unstack (ai); - return; } - - dpo_id_t tmp = DPO_INVALID; - fib_forward_chain_type_t fib_fwd = - t->transport == - IPIP_TRANSPORT_IP6 ? FIB_FORW_CHAIN_TYPE_UNICAST_IP6 : - FIB_FORW_CHAIN_TYPE_UNICAST_IP4; - - fib_entry_contribute_forwarding (t->p2p.fib_entry_index, fib_fwd, &tmp); - if (DPO_LOAD_BALANCE == tmp.dpoi_type) + else { - /* - * post IPIP rewrite we will load-balance. However, the IPIP encap - * is always the same for this adjacency/tunnel and hence the IP/IPIP - * src,dst hash is always the same result too. So we do that hash now and - * stack on the choice. - * If the choice is an incomplete adj then we will need a poke when - * it becomes complete. This happens since the adj update walk propagates - * as far a recursive paths. - */ - const dpo_id_t *choice; - load_balance_t *lb; - int hash; - - lb = load_balance_get (tmp.dpoi_index); - - if (fib_fwd == FIB_FORW_CHAIN_TYPE_UNICAST_IP4) - hash = ip4_compute_flow_hash ((ip4_header_t *) adj_get_rewrite (ai), - lb->lb_hash_config); - else - hash = ip6_compute_flow_hash ((ip6_header_t *) adj_get_rewrite (ai), - lb->lb_hash_config); - choice = - load_balance_get_bucket_i (lb, hash & lb->lb_n_buckets_minus_1); - dpo_copy (&tmp, choice); + adj_nbr_midchain_stack_on_fib_entry + (ai, + t->p2p.fib_entry_index, + (t->transport == IPIP_TRANSPORT_IP6) ? + FIB_FORW_CHAIN_TYPE_UNICAST_IP6 : FIB_FORW_CHAIN_TYPE_UNICAST_IP4); } - - adj_nbr_midchain_stack (ai, &tmp); - dpo_reset (&tmp); } static adj_walk_rc_t @@ -253,24 +222,24 @@ ipip_tunnel_restack (ipip_tunnel_t * gt) void ipip_update_adj (vnet_main_t * vnm, u32 sw_if_index, adj_index_t ai) { - ipip_tunnel_t *t; adj_midchain_fixup_t f; + ipip_tunnel_t *t; + adj_flags_t af; t = ipip_tunnel_db_find_by_sw_if_index (sw_if_index); if (!t) return; f = t->transport == IPIP_TRANSPORT_IP6 ? ipip6_fixup : ipip4_fixup; - - adj_nbr_midchain_update_rewrite (ai, f, t, - (VNET_LINK_ETHERNET == - adj_get_link_type (ai) ? - ADJ_FLAG_MIDCHAIN_NO_COUNT : - ADJ_FLAG_NONE), ipip_build_rewrite (vnm, - sw_if_index, - adj_get_link_type - (ai), - NULL)); + af = ADJ_FLAG_MIDCHAIN_IP_STACK; + if (VNET_LINK_ETHERNET == adj_get_link_type (ai)) + af |= ADJ_FLAG_MIDCHAIN_NO_COUNT; + + adj_nbr_midchain_update_rewrite (ai, f, t, af, + ipip_build_rewrite (vnm, + sw_if_index, + adj_get_link_type + (ai), NULL)); ipip_tunnel_stack (ai); } diff --git a/src/vnet/ipip/sixrd.c b/src/vnet/ipip/sixrd.c index cc5bfa33d91..30c37c80fe8 100644 --- a/src/vnet/ipip/sixrd.c +++ b/src/vnet/ipip/sixrd.c @@ -152,9 +152,9 @@ ip6ip_tunnel_stack (adj_index_t ai, u32 fib_entry_index) if (vnet_hw_interface_get_flags (vnet_get_main (), t->hw_if_index) & VNET_HW_INTERFACE_FLAG_LINK_UP) { - adj_nbr_midchain_stack (ai, - fib_entry_contribute_ip_forwarding - (fib_entry_index)); + adj_nbr_midchain_stack_on_fib_entry (ai, + fib_entry_index, + FIB_FORW_CHAIN_TYPE_UNICAST_IP4); } else { -- cgit 1.2.3-korg