aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/vnet/adj/adj.c74
-rw-r--r--src/vnet/adj/adj.h63
-rw-r--r--src/vnet/adj/adj_midchain.c124
-rw-r--r--src/vnet/adj/adj_midchain.h34
-rw-r--r--src/vnet/fib/fib_path.c16
-rw-r--r--src/vnet/gre/gre.c11
-rw-r--r--src/vnet/gre/interface.c40
-rw-r--r--src/vnet/ipip/ipip.c65
-rw-r--r--src/vnet/ipip/sixrd.c6
-rw-r--r--src/vnet/lisp-gpe/lisp_gpe_adjacency.c54
-rw-r--r--test/test_gre.py61
11 files changed, 399 insertions, 149 deletions
diff --git a/src/vnet/adj/adj.c b/src/vnet/adj/adj.c
index 8740bb41465..b844073ecfb 100644
--- a/src/vnet/adj/adj.c
+++ b/src/vnet/adj/adj.c
@@ -45,6 +45,11 @@ const ip46_address_t ADJ_BCAST_ADDR = {
},
};
+/**
+ * Adj flag names
+ */
+static const char *adj_attr_names[] = ADJ_ATTR_NAMES;
+
always_inline void
adj_poison (ip_adjacency_t * adj)
{
@@ -95,6 +100,28 @@ adj_index_is_special (adj_index_t adj_index)
return (0);
}
+u8*
+format_adj_flags (u8 * s, va_list * args)
+{
+ adj_flags_t af;
+ adj_attr_t at;
+
+ af = va_arg (*args, int);
+
+ if (ADJ_FLAG_NONE == af)
+ {
+ return (format(s, "None"));
+ }
+ FOR_EACH_ADJ_ATTR(at)
+ {
+ if (af & (1 << at))
+ {
+ s = format(s, "%s ", adj_attr_names[at]);
+ }
+ }
+ return (s);
+}
+
/**
* @brief Pretty print helper function for formatting specific adjacencies.
* @param s - input string to format
@@ -113,10 +140,11 @@ format_ip_adjacency (u8 * s, va_list * args)
adj_index = va_arg (*args, u32);
fiaf = va_arg (*args, format_ip_adjacency_flags_t);
adj = adj_get(adj_index);
-
+
switch (adj->lookup_next_index)
{
case IP_LOOKUP_NEXT_REWRITE:
+ case IP_LOOKUP_NEXT_BCAST:
s = format (s, "%U", format_adj_nbr, adj_index, 0);
break;
case IP_LOOKUP_NEXT_ARP:
@@ -134,8 +162,12 @@ format_ip_adjacency (u8 * s, va_list * args)
case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
s = format (s, "%U", format_adj_mcast_midchain, adj_index, 0);
break;
- default:
- break;
+ case IP_LOOKUP_NEXT_DROP:
+ case IP_LOOKUP_NEXT_PUNT:
+ case IP_LOOKUP_NEXT_LOCAL:
+ case IP_LOOKUP_NEXT_ICMP_ERROR:
+ case IP_LOOKUP_N_NEXT:
+ break;
}
if (fiaf & FORMAT_IP_ADJACENCY_DETAIL)
@@ -143,6 +175,7 @@ format_ip_adjacency (u8 * s, va_list * args)
vlib_counter_t counts;
vlib_get_combined_counter(&adjacency_counters, adj_index, &counts);
+ s = format (s, "\n flags:%U", format_adj_flags, adj->ia_flags);
s = format (s, "\n counts:[%Ld:%Ld]", counts.packets, counts.bytes);
s = format (s, "\n locks:%d", adj->ia_node.fn_locks);
s = format(s, "\n delegates:\n ");
@@ -159,6 +192,39 @@ format_ip_adjacency (u8 * s, va_list * args)
return s;
}
+int
+adj_recursive_loop_detect (adj_index_t ai,
+ fib_node_index_t **entry_indicies)
+{
+ ip_adjacency_t * adj;
+
+ adj = adj_get(ai);
+
+ switch (adj->lookup_next_index)
+ {
+ case IP_LOOKUP_NEXT_REWRITE:
+ case IP_LOOKUP_NEXT_ARP:
+ case IP_LOOKUP_NEXT_GLEAN:
+ case IP_LOOKUP_NEXT_MCAST:
+ case IP_LOOKUP_NEXT_BCAST:
+ case IP_LOOKUP_NEXT_DROP:
+ case IP_LOOKUP_NEXT_PUNT:
+ case IP_LOOKUP_NEXT_LOCAL:
+ case IP_LOOKUP_NEXT_ICMP_ERROR:
+ case IP_LOOKUP_N_NEXT:
+ /*
+ * these adjcencey types are terminal graph nodes, so there's no
+ * possibility of a loop down here.
+ */
+ break;
+ case IP_LOOKUP_NEXT_MIDCHAIN:
+ case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
+ return (adj_ndr_midchain_recursive_loop_detect(ai, entry_indicies));
+ }
+
+ return (0);
+}
+
/*
* adj_last_lock_gone
*
@@ -403,7 +469,7 @@ adj_get_link_type (adj_index_t ai)
adj = adj_get(ai);
- return (adj->ia_link);
+ return (adj->ia_link);
}
/**
diff --git a/src/vnet/adj/adj.h b/src/vnet/adj/adj.h
index 18a2e1ddbbb..fb3dc368db0 100644
--- a/src/vnet/adj/adj.h
+++ b/src/vnet/adj/adj.h
@@ -157,14 +157,12 @@ typedef void (*adj_midchain_fixup_t) (vlib_main_t * vm,
/**
* @brief Flags on an IP adjacency
*/
-typedef enum ip_adjacency_flags_t_
+typedef enum adj_attr_t_
{
- ADJ_FLAG_NONE = 0,
-
/**
* Currently a sync walk is active. Used to prevent re-entrant walking
*/
- ADJ_FLAG_SYNC_WALK_ACTIVE = (1 << 0),
+ ADJ_ATTR_SYNC_WALK_ACTIVE = 0,
/**
* Packets TX through the midchain do not increment the interface
@@ -173,10 +171,48 @@ typedef enum ip_adjacency_flags_t_
* the packet will have traversed the interface's TX node, and hence have
* been counted, before it traverses ths midchain
*/
- ADJ_FLAG_MIDCHAIN_NO_COUNT = (1 << 1),
+ ADJ_ATTR_MIDCHAIN_NO_COUNT,
+ /**
+ * When stacking midchains on a fib-entry extract the choice from the
+ * load-balance returned based on an IP hash of the adj's rewrite
+ */
+ ADJ_ATTR_MIDCHAIN_IP_STACK,
+ /**
+ * If the midchain were to stack on its FIB entry a loop would form.
+ */
+ ADJ_ATTR_MIDCHAIN_LOOPED,
+} adj_attr_t;
+
+#define ADJ_ATTR_NAMES { \
+ [ADJ_ATTR_SYNC_WALK_ACTIVE] = "walk-active", \
+ [ADJ_ATTR_MIDCHAIN_NO_COUNT] = "midchain-no-count", \
+ [ADJ_ATTR_MIDCHAIN_IP_STACK] = "midchain-ip-stack", \
+ [ADJ_ATTR_MIDCHAIN_LOOPED] = "midchain-looped", \
+}
+
+#define FOR_EACH_ADJ_ATTR(_attr) \
+ for (_attr = ADJ_ATTR_SYNC_WALK_ACTIVE; \
+ _attr <= ADJ_ATTR_MIDCHAIN_LOOPED; \
+ _attr++)
+
+/**
+ * @brief Flags on an IP adjacency
+ */
+typedef enum adj_flags_t_
+{
+ ADJ_FLAG_NONE = 0,
+ ADJ_FLAG_SYNC_WALK_ACTIVE = (1 << ADJ_ATTR_SYNC_WALK_ACTIVE),
+ ADJ_FLAG_MIDCHAIN_NO_COUNT = (1 << ADJ_ATTR_MIDCHAIN_NO_COUNT),
+ ADJ_FLAG_MIDCHAIN_IP_STACK = (1 << ADJ_ATTR_MIDCHAIN_IP_STACK),
+ ADJ_FLAG_MIDCHAIN_LOOPED = (1 << ADJ_ATTR_MIDCHAIN_LOOPED),
} __attribute__ ((packed)) adj_flags_t;
/**
+ * @brief Format adjacency flags
+ */
+extern u8* format_adj_flags(u8 * s, va_list * args);
+
+/**
* @brief IP unicast adjacency.
* @note cache aligned.
*
@@ -257,6 +293,11 @@ typedef struct ip_adjacency_t_
* Fixup data passed back to the client in the fixup function
*/
const void *fixup_data;
+ /**
+ * the FIB entry this midchain resolves through. required for recursive
+ * loop detection.
+ */
+ fib_node_index_t fei;
} midchain;
/**
* IP_LOOKUP_NEXT_GLEAN
@@ -355,6 +396,18 @@ extern const u8* adj_get_rewrite (adj_index_t ai);
extern void adj_feature_update (u32 sw_if_index, u8 arc_index, u8 is_enable);
/**
+ * @brief descend the FIB graph looking for loops
+ *
+ * @param ai
+ * The adj index to traverse
+ *
+ * @param entry_indicies)
+ * A pointer to a vector of FIB entries already visited.
+ */
+extern int adj_recursive_loop_detect (adj_index_t ai,
+ fib_node_index_t **entry_indicies);
+
+/**
* @brief
* The global adjacnecy pool. Exposed for fast/inline data-plane access
*/
diff --git a/src/vnet/adj/adj_midchain.c b/src/vnet/adj/adj_midchain.c
index 268d9409abf..a4b29c8ce35 100644
--- a/src/vnet/adj/adj_midchain.c
+++ b/src/vnet/adj/adj_midchain.c
@@ -20,7 +20,9 @@
#include <vnet/adj/adj_midchain.h>
#include <vnet/ethernet/arp_packet.h>
#include <vnet/dpo/drop_dpo.h>
+#include <vnet/dpo/load_balance.h>
#include <vnet/fib/fib_walk.h>
+#include <vnet/fib/fib_entry.h>
/**
* The two midchain tx feature node indices
@@ -473,6 +475,7 @@ adj_midchain_setup (adj_index_t adj_index,
adj->sub_type.midchain.fixup_func = fixup;
adj->sub_type.midchain.fixup_data = data;
+ adj->sub_type.midchain.fei = FIB_NODE_INDEX_INVALID;
adj->ia_flags |= flags;
arc_index = adj_midchain_get_feature_arc_index_for_link_type (adj);
@@ -548,11 +551,24 @@ adj_nbr_midchain_update_rewrite (adj_index_t adj_index,
void
adj_nbr_midchain_unstack (adj_index_t adj_index)
{
+ fib_node_index_t *entry_indicies, tmp;
ip_adjacency_t *adj;
ASSERT(ADJ_INDEX_INVALID != adj_index);
+ adj = adj_get (adj_index);
- adj = adj_get(adj_index);
+ /*
+ * check to see if this unstacking breaks a recursion loop
+ */
+ entry_indicies = NULL;
+ tmp = adj->sub_type.midchain.fei;
+ adj->sub_type.midchain.fei = FIB_NODE_INDEX_INVALID;
+
+ if (FIB_NODE_INDEX_INVALID != tmp)
+ {
+ fib_entry_recursive_loop_detect(tmp, &entry_indicies);
+ vec_free(entry_indicies);
+ }
/*
* stack on the drop
@@ -564,6 +580,74 @@ adj_nbr_midchain_unstack (adj_index_t adj_index)
CLIB_MEMORY_BARRIER();
}
+void
+adj_nbr_midchain_stack_on_fib_entry (adj_index_t ai,
+ fib_node_index_t fei,
+ fib_forward_chain_type_t fct)
+{
+ fib_node_index_t *entry_indicies;
+ dpo_id_t tmp = DPO_INVALID;
+ ip_adjacency_t *adj;
+
+ adj = adj_get (ai);
+
+ /*
+ * check to see if this stacking will form a recursion loop
+ */
+ entry_indicies = NULL;
+ adj->sub_type.midchain.fei = fei;
+
+ if (fib_entry_recursive_loop_detect(adj->sub_type.midchain.fei, &entry_indicies))
+ {
+ /*
+ * loop formed, stack on the drop.
+ */
+ dpo_copy(&tmp, drop_dpo_get(fib_forw_chain_type_to_dpo_proto(fct)));
+ }
+ else
+ {
+ fib_entry_contribute_forwarding (fei, fct, &tmp);
+
+ if ((adj->ia_flags & ADJ_FLAG_MIDCHAIN_IP_STACK) &&
+ (DPO_LOAD_BALANCE == tmp.dpoi_type))
+ {
+ /*
+ * do that hash now and stack on the choice.
+ * If the choice is an incomplete adj then we will need a poke when
+ * it becomes complete. This happens since the adj update walk propagates
+ * as far a recursive paths.
+ */
+ const dpo_id_t *choice;
+ load_balance_t *lb;
+ int hash;
+
+ lb = load_balance_get (tmp.dpoi_index);
+
+ if (FIB_FORW_CHAIN_TYPE_UNICAST_IP4 == fct)
+ {
+ hash = ip4_compute_flow_hash ((ip4_header_t *) adj_get_rewrite (ai),
+ lb->lb_hash_config);
+ }
+ else if (FIB_FORW_CHAIN_TYPE_UNICAST_IP6 == fct)
+ {
+ hash = ip6_compute_flow_hash ((ip6_header_t *) adj_get_rewrite (ai),
+ lb->lb_hash_config);
+ }
+ else
+ {
+ hash = 0;
+ ASSERT(0);
+ }
+
+ choice = load_balance_get_bucket_i (lb, hash & lb->lb_n_buckets_minus_1);
+ dpo_copy (&tmp, choice);
+ }
+ }
+ adj_nbr_midchain_stack (ai, &tmp);
+ dpo_reset(&tmp);
+ vec_free(entry_indicies);
+}
+
/**
* adj_nbr_midchain_stack
*/
@@ -585,6 +669,33 @@ adj_nbr_midchain_stack (adj_index_t adj_index,
next);
}
+int
+adj_ndr_midchain_recursive_loop_detect (adj_index_t ai,
+ fib_node_index_t **entry_indicies)
+{
+ fib_node_index_t *entry_index, *entries;
+ ip_adjacency_t * adj;
+
+ adj = adj_get(ai);
+ entries = *entry_indicies;
+
+ vec_foreach(entry_index, entries)
+ {
+ if (*entry_index == adj->sub_type.midchain.fei)
+ {
+ /*
+ * The entry this midchain links to is already in the set
+ * of visisted entries, this is a loop
+ */
+ adj->ia_flags |= ADJ_FLAG_MIDCHAIN_LOOPED;
+ return (1);
+ }
+ }
+
+ adj->ia_flags &= ~ADJ_FLAG_MIDCHAIN_LOOPED;
+ return (0);
+}
+
u8*
format_adj_midchain (u8* s, va_list *ap)
{
@@ -599,8 +710,15 @@ format_adj_midchain (u8* s, va_list *ap)
s = format (s, " %U",
format_vnet_rewrite,
&adj->rewrite_header, sizeof (adj->rewrite_data), indent);
- s = format (s, "\n%Ustacked-on:\n%U%U",
- format_white_space, indent,
+ s = format (s, "\n%Ustacked-on",
+ format_white_space, indent);
+
+ if (FIB_NODE_INDEX_INVALID != adj->sub_type.midchain.fei)
+ {
+ s = format (s, " entry:%d", adj->sub_type.midchain.fei);
+
+ }
+ s = format (s, ":\n%U%U",
format_white_space, indent+2,
format_dpo_id, &adj->sub_type.midchain.next_dpo, indent+2);
diff --git a/src/vnet/adj/adj_midchain.h b/src/vnet/adj/adj_midchain.h
index 65892314f40..24fea427a6b 100644
--- a/src/vnet/adj/adj_midchain.h
+++ b/src/vnet/adj/adj_midchain.h
@@ -53,7 +53,8 @@ extern void adj_nbr_midchain_update_rewrite(adj_index_t adj_index,
/**
* @brief
* [re]stack a midchain. 'Stacking' is the act of forming parent-child
- * relationships in the data-plane graph.
+ * relationships in the data-plane graph. Do NOT use this function to
+ * stack on a DPO type that might form a loop.
*
* @param adj_index
* The index of the midchain to stack
@@ -66,6 +67,25 @@ extern void adj_nbr_midchain_stack(adj_index_t adj_index,
/**
* @brief
+ * [re]stack a midchain. 'Stacking' is the act of forming parent-child
+ * relationships in the data-plane graph. Since function performs recursive
+ * loop detection.
+ *
+ * @param adj_index
+ * The index of the midchain to stack
+ *
+ * @param fei
+ * The FIB entry to stack on
+ *
+ * @param fct
+ * The chain type to use from the fib entry fowarding
+ */
+extern void adj_nbr_midchain_stack_on_fib_entry(adj_index_t adj_index,
+ fib_node_index_t fei,
+ fib_forward_chain_type_t fct);
+
+/**
+ * @brief
* unstack a midchain. This will break the chain between the midchain and
* the next graph section. This is a implemented as stack-on-drop
*
@@ -75,6 +95,18 @@ extern void adj_nbr_midchain_stack(adj_index_t adj_index,
extern void adj_nbr_midchain_unstack(adj_index_t adj_index);
/**
+ * @brief descend the FIB graph looking for loops
+ *
+ * @param ai
+ * The adj index to traverse
+ *
+ * @param entry_indicies)
+ * A pointer to a vector of FIB entries already visited.
+ */
+extern int adj_ndr_midchain_recursive_loop_detect(adj_index_t ai,
+ fib_node_index_t **entry_indicies);
+
+/**
* @brief
* Module initialisation
*/
diff --git a/src/vnet/fib/fib_path.c b/src/vnet/fib/fib_path.c
index baf8275d181..f528c67677f 100644
--- a/src/vnet/fib/fib_path.c
+++ b/src/vnet/fib/fib_path.c
@@ -1802,7 +1802,7 @@ fib_path_recursive_loop_detect (fib_node_index_t path_index,
{
/*
* no loop here yet. keep forward walking the graph.
- */
+ */
if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
{
FIB_PATH_DBG(path, "recursive loop formed");
@@ -1818,6 +1818,18 @@ fib_path_recursive_loop_detect (fib_node_index_t path_index,
}
case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
case FIB_PATH_TYPE_ATTACHED:
+ if (adj_recursive_loop_detect(path->fp_dpo.dpoi_index,
+ entry_indicies))
+ {
+ FIB_PATH_DBG(path, "recursive loop formed");
+ path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
+ }
+ else
+ {
+ FIB_PATH_DBG(path, "recursive loop cleared");
+ path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
+ }
+ break;
case FIB_PATH_TYPE_SPECIAL:
case FIB_PATH_TYPE_DEAG:
case FIB_PATH_TYPE_DVR:
@@ -2690,7 +2702,7 @@ show_fib_path_command (vlib_main_t * vm,
path = fib_path_get(pi);
u8 *s = format(NULL, "%U", format_fib_path, pi, 1,
FIB_PATH_FORMAT_FLAGS_NONE);
- s = format(s, "children:");
+ s = format(s, "\n children:");
s = fib_node_children_format(path->fp_node.fn_children, s);
vlib_cli_output (vm, "%s", s);
vec_free(s);
diff --git a/src/vnet/gre/gre.c b/src/vnet/gre/gre.c
index 449968c1be0..e30319f5f99 100644
--- a/src/vnet/gre/gre.c
+++ b/src/vnet/gre/gre.c
@@ -301,17 +301,20 @@ gre_update_adj (vnet_main_t * vnm, u32 sw_if_index, adj_index_t ai)
{
gre_main_t *gm = &gre_main;
gre_tunnel_t *t;
- u32 ti;
+ adj_flags_t af;
u8 is_ipv6;
+ u32 ti;
ti = gm->tunnel_index_by_sw_if_index[sw_if_index];
t = pool_elt_at_index (gm->tunnels, ti);
is_ipv6 = t->tunnel_dst.fp_proto == FIB_PROTOCOL_IP6 ? 1 : 0;
+ af = ADJ_FLAG_MIDCHAIN_IP_STACK;
+
+ if (VNET_LINK_ETHERNET == adj_get_link_type (ai))
+ af |= ADJ_FLAG_MIDCHAIN_NO_COUNT;
adj_nbr_midchain_update_rewrite
- (ai, !is_ipv6 ? gre4_fixup : gre6_fixup, NULL,
- (VNET_LINK_ETHERNET == adj_get_link_type (ai) ?
- ADJ_FLAG_MIDCHAIN_NO_COUNT : ADJ_FLAG_NONE),
+ (ai, !is_ipv6 ? gre4_fixup : gre6_fixup, NULL, af,
gre_build_rewrite (vnm, sw_if_index, adj_get_link_type (ai), NULL));
gre_tunnel_stack (ai);
diff --git a/src/vnet/gre/interface.c b/src/vnet/gre/interface.c
index 6be934af56c..b9bfb79c172 100644
--- a/src/vnet/gre/interface.c
+++ b/src/vnet/gre/interface.c
@@ -128,9 +128,7 @@ gre_tunnel_from_fib_node (fib_node_t * node)
void
gre_tunnel_stack (adj_index_t ai)
{
- fib_forward_chain_type_t fib_fwd;
gre_main_t *gm = &gre_main;
- dpo_id_t tmp = DPO_INVALID;
ip_adjacency_t *adj;
gre_tunnel_t *gt;
u32 sw_if_index;
@@ -149,42 +147,14 @@ gre_tunnel_stack (adj_index_t ai)
VNET_HW_INTERFACE_FLAG_LINK_UP) == 0)
{
adj_nbr_midchain_unstack (ai);
- return;
}
-
- fib_fwd = fib_forw_chain_type_from_fib_proto (gt->tunnel_dst.fp_proto);
-
- fib_entry_contribute_forwarding (gt->fib_entry_index, fib_fwd, &tmp);
- if (DPO_LOAD_BALANCE == tmp.dpoi_type)
+ else
{
- /*
- * post GRE rewrite we will load-balance. However, the GRE encap
- * is always the same for this adjacency/tunnel and hence the IP/GRE
- * src,dst hash is always the same result too. So we do that hash now and
- * stack on the choice.
- * If the choice is an incomplete adj then we will need a poke when
- * it becomes complete. This happens since the adj update walk propagates
- * as far a recursive paths.
- */
- const dpo_id_t *choice;
- load_balance_t *lb;
- int hash;
-
- lb = load_balance_get (tmp.dpoi_index);
-
- if (fib_fwd == FIB_FORW_CHAIN_TYPE_UNICAST_IP4)
- hash = ip4_compute_flow_hash ((ip4_header_t *) adj_get_rewrite (ai),
- lb->lb_hash_config);
- else
- hash = ip6_compute_flow_hash ((ip6_header_t *) adj_get_rewrite (ai),
- lb->lb_hash_config);
- choice =
- load_balance_get_bucket_i (lb, hash & lb->lb_n_buckets_minus_1);
- dpo_copy (&tmp, choice);
+ adj_nbr_midchain_stack_on_fib_entry (ai,
+ gt->fib_entry_index,
+ fib_forw_chain_type_from_fib_proto
+ (gt->tunnel_dst.fp_proto));
}
-
- adj_nbr_midchain_stack (ai, &tmp);
- dpo_reset (&tmp);
}
/**
diff --git a/src/vnet/ipip/ipip.c b/src/vnet/ipip/ipip.c
index 9c58e520623..a5e46c41d6c 100644
--- a/src/vnet/ipip/ipip.c
+++ b/src/vnet/ipip/ipip.c
@@ -186,46 +186,15 @@ ipip_tunnel_stack (adj_index_t ai)
VNET_HW_INTERFACE_FLAG_LINK_UP) == 0)
{
adj_nbr_midchain_unstack (ai);
- return;
}
-
- dpo_id_t tmp = DPO_INVALID;
- fib_forward_chain_type_t fib_fwd =
- t->transport ==
- IPIP_TRANSPORT_IP6 ? FIB_FORW_CHAIN_TYPE_UNICAST_IP6 :
- FIB_FORW_CHAIN_TYPE_UNICAST_IP4;
-
- fib_entry_contribute_forwarding (t->p2p.fib_entry_index, fib_fwd, &tmp);
- if (DPO_LOAD_BALANCE == tmp.dpoi_type)
+ else
{
- /*
- * post IPIP rewrite we will load-balance. However, the IPIP encap
- * is always the same for this adjacency/tunnel and hence the IP/IPIP
- * src,dst hash is always the same result too. So we do that hash now and
- * stack on the choice.
- * If the choice is an incomplete adj then we will need a poke when
- * it becomes complete. This happens since the adj update walk propagates
- * as far a recursive paths.
- */
- const dpo_id_t *choice;
- load_balance_t *lb;
- int hash;
-
- lb = load_balance_get (tmp.dpoi_index);
-
- if (fib_fwd == FIB_FORW_CHAIN_TYPE_UNICAST_IP4)
- hash = ip4_compute_flow_hash ((ip4_header_t *) adj_get_rewrite (ai),
- lb->lb_hash_config);
- else
- hash = ip6_compute_flow_hash ((ip6_header_t *) adj_get_rewrite (ai),
- lb->lb_hash_config);
- choice =
- load_balance_get_bucket_i (lb, hash & lb->lb_n_buckets_minus_1);
- dpo_copy (&tmp, choice);
+ adj_nbr_midchain_stack_on_fib_entry
+ (ai,
+ t->p2p.fib_entry_index,
+ (t->transport == IPIP_TRANSPORT_IP6) ?
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP6 : FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
}
-
- adj_nbr_midchain_stack (ai, &tmp);
- dpo_reset (&tmp);
}
static adj_walk_rc_t
@@ -253,24 +222,24 @@ ipip_tunnel_restack (ipip_tunnel_t * gt)
void
ipip_update_adj (vnet_main_t * vnm, u32 sw_if_index, adj_index_t ai)
{
- ipip_tunnel_t *t;
adj_midchain_fixup_t f;
+ ipip_tunnel_t *t;
+ adj_flags_t af;
t = ipip_tunnel_db_find_by_sw_if_index (sw_if_index);
if (!t)
return;
f = t->transport == IPIP_TRANSPORT_IP6 ? ipip6_fixup : ipip4_fixup;
-
- adj_nbr_midchain_update_rewrite (ai, f, t,
- (VNET_LINK_ETHERNET ==
- adj_get_link_type (ai) ?
- ADJ_FLAG_MIDCHAIN_NO_COUNT :
- ADJ_FLAG_NONE), ipip_build_rewrite (vnm,
- sw_if_index,
- adj_get_link_type
- (ai),
- NULL));
+ af = ADJ_FLAG_MIDCHAIN_IP_STACK;
+ if (VNET_LINK_ETHERNET == adj_get_link_type (ai))
+ af |= ADJ_FLAG_MIDCHAIN_NO_COUNT;
+
+ adj_nbr_midchain_update_rewrite (ai, f, t, af,
+ ipip_build_rewrite (vnm,
+ sw_if_index,
+ adj_get_link_type
+ (ai), NULL));
ipip_tunnel_stack (ai);
}
diff --git a/src/vnet/ipip/sixrd.c b/src/vnet/ipip/sixrd.c
index cc5bfa33d91..30c37c80fe8 100644
--- a/src/vnet/ipip/sixrd.c
+++ b/src/vnet/ipip/sixrd.c
@@ -152,9 +152,9 @@ ip6ip_tunnel_stack (adj_index_t ai, u32 fib_entry_index)
if (vnet_hw_interface_get_flags (vnet_get_main (), t->hw_if_index) &
VNET_HW_INTERFACE_FLAG_LINK_UP)
{
- adj_nbr_midchain_stack (ai,
- fib_entry_contribute_ip_forwarding
- (fib_entry_index));
+ adj_nbr_midchain_stack_on_fib_entry (ai,
+ fib_entry_index,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
}
else
{
diff --git a/src/vnet/lisp-gpe/lisp_gpe_adjacency.c b/src/vnet/lisp-gpe/lisp_gpe_adjacency.c
index 6f85dc4a761..7361e8eb0d6 100644
--- a/src/vnet/lisp-gpe/lisp_gpe_adjacency.c
+++ b/src/vnet/lisp-gpe/lisp_gpe_adjacency.c
@@ -131,48 +131,13 @@ static void
lisp_gpe_adj_stack_one (lisp_gpe_adjacency_t * ladj, adj_index_t ai)
{
const lisp_gpe_tunnel_t *lgt;
- dpo_id_t tmp = DPO_INVALID;
lgt = lisp_gpe_tunnel_get (ladj->tunnel_index);
- fib_entry_contribute_forwarding (lgt->fib_entry_index,
- lisp_gpe_adj_get_fib_chain_type (ladj),
- &tmp);
- if (DPO_LOAD_BALANCE == tmp.dpoi_type)
- {
- /*
- * post LISP rewrite we will load-balance. However, the LISP encap
- * is always the same for this adjacency/tunnel and hence the IP/UDP src,dst
- * hash is always the same result too. So we do that hash now and
- * stack on the choice.
- * If the choice is an incomplete adj then we will need a poke when
- * it becomes complete. This happens since the adj update walk propagates
- * as far a recursive paths.
- */
- const dpo_id_t *choice;
- load_balance_t *lb;
- int hash;
-
- lb = load_balance_get (tmp.dpoi_index);
-
- if (IP4 == ip_addr_version (&ladj->remote_rloc))
- {
- hash = ip4_compute_flow_hash ((ip4_header_t *) adj_get_rewrite (ai),
- lb->lb_hash_config);
- }
- else
- {
- hash = ip6_compute_flow_hash ((ip6_header_t *) adj_get_rewrite (ai),
- lb->lb_hash_config);
- }
-
- choice =
- load_balance_get_bucket_i (lb, hash & lb->lb_n_buckets_minus_1);
- dpo_copy (&tmp, choice);
- }
-
- adj_nbr_midchain_stack (ai, &tmp);
- dpo_reset (&tmp);
+ adj_nbr_midchain_stack_on_fib_entry (ai,
+ lgt->fib_entry_index,
+ lisp_gpe_adj_get_fib_chain_type
+ (ladj));
}
/**
@@ -332,6 +297,7 @@ lisp_gpe_update_adjacency (vnet_main_t * vnm, u32 sw_if_index, adj_index_t ai)
ip_adjacency_t *adj;
ip_address_t rloc;
vnet_link_t linkt;
+ adj_flags_t af;
index_t lai;
adj = adj_get (ai);
@@ -347,12 +313,12 @@ lisp_gpe_update_adjacency (vnet_main_t * vnm, u32 sw_if_index, adj_index_t ai)
ladj = pool_elt_at_index (lisp_adj_pool, lai);
lgt = lisp_gpe_tunnel_get (ladj->tunnel_index);
linkt = adj_get_link_type (ai);
+ af = ADJ_FLAG_MIDCHAIN_IP_STACK;
+ if (VNET_LINK_ETHERNET == linkt)
+ af |= ADJ_FLAG_MIDCHAIN_NO_COUNT;
+
adj_nbr_midchain_update_rewrite
- (ai, lisp_gpe_fixup,
- NULL,
- (VNET_LINK_ETHERNET == linkt ?
- ADJ_FLAG_MIDCHAIN_NO_COUNT :
- ADJ_FLAG_NONE),
+ (ai, lisp_gpe_fixup, NULL, af,
lisp_gpe_tunnel_build_rewrite (lgt, ladj,
lisp_gpe_adj_proto_from_vnet_link_type
(linkt)));
diff --git a/test/test_gre.py b/test/test_gre.py
index eed3d8b2c6f..dd7b8bc6b8f 100644
--- a/test/test_gre.py
+++ b/test/test_gre.py
@@ -867,6 +867,67 @@ class TestGRE(VppTestCase):
route_tun1_dst.add_vpp_config()
route_tun2_dst.add_vpp_config()
+ def test_gre_loop(self):
+ """ GRE tunnel loop Tests """
+
+ #
+ # Create an L3 GRE tunnel.
+ # - set it admin up
+ # - assign an IP Addres
+ #
+ gre_if = VppGreInterface(self,
+ self.pg0.local_ip4,
+ "1.1.1.2")
+ gre_if.add_vpp_config()
+ gre_if.admin_up()
+ gre_if.config_ip4()
+
+ #
+ # add a route to the tunnel's destination that points
+ # through the tunnel, hence forming a loop in the forwarding
+ # graph
+ #
+ route_dst = VppIpRoute(self, "1.1.1.2", 32,
+ [VppRoutePath("0.0.0.0",
+ gre_if.sw_if_index)])
+ route_dst.add_vpp_config()
+
+ #
+ # packets to the tunnels destination should be dropped
+ #
+ tx = self.create_stream_ip4(self.pg0, "1.1.1.1", "1.1.1.2")
+ self.send_and_assert_no_replies(self.pg2, tx)
+
+ self.logger.info(self.vapi.ppcli("sh adj 7"))
+
+ #
+ # break the loop
+ #
+ route_dst.modify([VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index)])
+ route_dst.add_vpp_config()
+
+ rx = self.send_and_expect(self.pg0, tx, self.pg1)
+
+ #
+ # a good route throught the tunnel to check it restacked
+ #
+ route_via_tun_2 = VppIpRoute(self, "2.2.2.2", 32,
+ [VppRoutePath("0.0.0.0",
+ gre_if.sw_if_index)])
+ route_via_tun_2.add_vpp_config()
+
+ tx = self.create_stream_ip4(self.pg0, "2.2.2.3", "2.2.2.2")
+ rx = self.send_and_expect(self.pg0, tx, self.pg1)
+ self.verify_tunneled_4o4(self.pg1, rx, tx,
+ self.pg0.local_ip4, "1.1.1.2")
+
+ #
+ # cleanup
+ #
+ route_via_tun_2.remove_vpp_config()
+ gre_if.remove_vpp_config()
+
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)