summaryrefslogtreecommitdiffstats
path: root/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c
diff options
context:
space:
mode:
authorVengada Govindan <venggovi@cisco.com>2016-12-13 21:59:54 -0800
committerDamjan Marion <dmarion.lists@gmail.com>2016-12-21 17:52:33 +0000
commit7e4edc8d1ee9ae26747cf6ec23b76b78f0f550da (patch)
tree4ba527cdb747846bc62b97dcb340bab04b776968 /plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c
parentfff67c89c5135263989c48b9ed923eb785efa67e (diff)
VPP-470: Introduce VxLAN-GPE as transport for iOAM.
This is the second of the two commits. This commit introduces VxLAN-GPE transit functionality. Change-Id: I12f13e00f456df0047f2b7a0737addfeb683c420 Signed-off-by: Vengada Govindan <venggovi@cisco.com>
Diffstat (limited to 'plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c')
-rw-r--r--plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c453
1 files changed, 415 insertions, 38 deletions
diff --git a/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c b/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c
index 066f5821bde..6c04d9af210 100644
--- a/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c
+++ b/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c
@@ -16,6 +16,9 @@
#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
#include <vnet/ip/format.h>
#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/fib_entry.h>
vxlan_gpe_ioam_main_t vxlan_gpe_ioam_main;
@@ -163,11 +166,257 @@ vxlan_gpe_ioam_set (vxlan_gpe_tunnel_t * t,
}
+static void
+vxlan_gpe_set_clear_output_feature_on_intf (vlib_main_t * vm,
+ u32 sw_if_index0, u8 is_add)
+{
+
+
+
+ vnet_feature_enable_disable ("ip4-output", "vxlan-gpe-transit-ioam",
+ sw_if_index0, is_add,
+ 0 /* void *feature_config */ ,
+ 0 /* u32 n_feature_config_bytes */ );
+ return;
+}
+
+void
+vxlan_gpe_clear_output_feature_on_all_intfs (vlib_main_t * vm)
+{
+ vnet_sw_interface_t *si = 0;
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+
+ pool_foreach (si, im->sw_interfaces, (
+ {
+ vxlan_gpe_set_clear_output_feature_on_intf
+ (vm, si->sw_if_index, 0);
+ }));
+ return;
+}
+
+
+extern fib_forward_chain_type_t
+fib_entry_get_default_chain_type (const fib_entry_t * fib_entry);
+
+int
+vxlan_gpe_enable_disable_ioam_for_dest (vlib_main_t * vm,
+ ip46_address_t dst_addr,
+ u32 outer_fib_index,
+ u8 is_ipv4, u8 is_add)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ u32 fib_index0 = 0;
+ u32 sw_if_index0 = ~0;
+
+ fib_node_index_t fei = ~0;
+ fib_entry_t *fib_entry;
+ u32 adj_index0;
+ ip_adjacency_t *adj0;
+ fib_prefix_t fib_prefix;
+ //fib_forward_chain_type_t fct;
+ load_balance_t *lb_m, *lb_b;
+ const dpo_id_t *dpo0, *dpo1;
+ u32 i, j;
+ //vnet_hw_interface_t *hw;
+
+ if (is_ipv4)
+ {
+ memset (&fib_prefix, 0, sizeof (fib_prefix_t));
+ fib_prefix.fp_len = 32;
+ fib_prefix.fp_proto = FIB_PROTOCOL_IP4;
+ fib_prefix.fp_addr = dst_addr;
+ }
+ else
+ {
+ return 0;
+ }
+
+ fei = fib_table_lookup (fib_index0, &fib_prefix);
+ fib_entry = fib_entry_get (fei);
+
+ //fct = fib_entry_get_default_chain_type (fib_entry);
+
+ if (!dpo_id_is_valid (&fib_entry->fe_lb /*[fct] */ ))
+ {
+ return (-1);
+ }
+
+ lb_m = load_balance_get (fib_entry->fe_lb /*[fct] */ .dpoi_index);
+
+ for (i = 0; i < lb_m->lb_n_buckets; i++)
+ {
+ dpo0 = load_balance_get_bucket_i (lb_m, i);
+
+ if (dpo0->dpoi_type == DPO_LOAD_BALANCE)
+ {
+ lb_b = load_balance_get (dpo0->dpoi_index);
+
+ for (j = 0; j < lb_b->lb_n_buckets; j++)
+ {
+ dpo1 = load_balance_get_bucket_i (lb_b, j);
+
+ if (dpo1->dpoi_type == DPO_ADJACENCY)
+ {
+ adj_index0 = dpo1->dpoi_index;
+
+ if (ADJ_INDEX_INVALID == adj_index0)
+ {
+ continue;
+ }
+
+ adj0 =
+ ip_get_adjacency (&(ip4_main.lookup_main), adj_index0);
+ sw_if_index0 = adj0->rewrite_header.sw_if_index;
+
+ if (~0 == sw_if_index0)
+ {
+ continue;
+ }
+
+
+ if (is_add)
+ {
+ vnet_feature_enable_disable ("ip4-output",
+ "vxlan-gpe-transit-ioam",
+ sw_if_index0, is_add, 0
+ /* void *feature_config */
+ , 0 /* u32 n_feature_config_bytes */
+ );
+
+ vec_validate_init_empty (hm->bool_ref_by_sw_if_index,
+ sw_if_index0, ~0);
+ hm->bool_ref_by_sw_if_index[sw_if_index0] = 1;
+ }
+ else
+ {
+ hm->bool_ref_by_sw_if_index[sw_if_index0] = ~0;
+ }
+ }
+ }
+ }
+ }
+
+ if (is_ipv4)
+ {
+
+ uword *t = NULL;
+ vxlan_gpe_ioam_dest_tunnels_t *t1;
+ fib_prefix_t key4, *key4_copy;
+ hash_pair_t *hp;
+ memset (&key4, 0, sizeof (key4));
+ key4.fp_proto = FIB_PROTOCOL_IP4;
+ key4.fp_addr.ip4.as_u32 = fib_prefix.fp_addr.ip4.as_u32;
+ t = hash_get_mem (hm->dst_by_ip4, &key4);
+ if (is_add)
+ {
+ if (t)
+ {
+ return 0;
+ }
+ pool_get_aligned (hm->dst_tunnels, t1, CLIB_CACHE_LINE_BYTES);
+ memset (t1, 0, sizeof (*t1));
+ t1->fp_proto = FIB_PROTOCOL_IP4;
+ t1->dst_addr.ip4.as_u32 = fib_prefix.fp_addr.ip4.as_u32;
+ key4_copy = clib_mem_alloc (sizeof (*key4_copy));
+ clib_memcpy (key4_copy, &key4, sizeof (*key4_copy));
+ hash_set_mem (hm->dst_by_ip4, key4_copy, t1 - hm->dst_tunnels);
+ /*
+ * Attach to the FIB entry for the VxLAN-GPE destination
+ * and become its child. The dest route will invoke a callback
+ * when the fib entry changes, it can be used to
+ * re-program the output feature on the egress interface.
+ */
+
+ const fib_prefix_t tun_dst_pfx = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {.ip4 = t1->dst_addr.ip4,}
+ };
+
+ t1->fib_entry_index =
+ fib_table_entry_special_add (outer_fib_index,
+ &tun_dst_pfx,
+ FIB_SOURCE_RR,
+ FIB_ENTRY_FLAG_NONE,
+ ADJ_INDEX_INVALID);
+ t1->sibling_index =
+ fib_entry_child_add (t1->fib_entry_index,
+ hm->fib_entry_type, t1 - hm->dst_tunnels);
+ t1->outer_fib_index = outer_fib_index;
+
+ }
+ else
+ {
+ if (!t)
+ {
+ return 0;
+ }
+ t1 = pool_elt_at_index (hm->dst_tunnels, t[0]);
+ hp = hash_get_pair (hm->dst_by_ip4, &key4);
+ key4_copy = (void *) (hp->key);
+ hash_unset_mem (hm->dst_by_ip4, &key4);
+ clib_mem_free (key4_copy);
+ pool_put (hm->dst_tunnels, t1);
+ }
+ }
+ else
+ {
+ // TBD for IPv6
+ }
+
+ return 0;
+}
+
+void
+vxlan_gpe_refresh_output_feature_on_all_dest (void)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ vxlan_gpe_ioam_dest_tunnels_t *t;
+ u32 i;
+ if (pool_elts (hm->dst_tunnels) == 0)
+ return;
+ vxlan_gpe_clear_output_feature_on_all_intfs (hm->vlib_main);
+ i = vec_len (hm->bool_ref_by_sw_if_index);
+ vec_free (hm->bool_ref_by_sw_if_index);
+ vec_validate_init_empty (hm->bool_ref_by_sw_if_index, i, ~0);
+ pool_foreach (t, hm->dst_tunnels, (
+ {
+ vxlan_gpe_enable_disable_ioam_for_dest
+ (hm->vlib_main,
+ t->dst_addr,
+ t->outer_fib_index,
+ (t->fp_proto == FIB_PROTOCOL_IP4), 1
+ /* is_add */
+ );
+ }
+ ));
+ return;
+}
+
+void
+vxlan_gpe_clear_output_feature_on_select_intfs (void)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ u32 sw_if_index0 = 0;
+ for (sw_if_index0 = 0;
+ sw_if_index0 < vec_len (hm->bool_ref_by_sw_if_index); sw_if_index0++)
+ {
+ if (hm->bool_ref_by_sw_if_index[sw_if_index0] == 0xFF)
+ {
+ vxlan_gpe_set_clear_output_feature_on_intf
+ (hm->vlib_main, sw_if_index0, 0);
+ }
+ }
+
+ return;
+}
static clib_error_t *
-vxlan_gpe_set_ioam_rewrite_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
+vxlan_gpe_set_ioam_rewrite_command_fn (vlib_main_t *
+ vm,
+ unformat_input_t
+ * input, vlib_cli_command_t * cmd)
{
vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
ip46_address_t local, remote;
@@ -184,7 +433,6 @@ vxlan_gpe_set_ioam_rewrite_command_fn (vlib_main_t * vm,
uword *p;
vxlan_gpe_main_t *gm = &vxlan_gpe_main;
vxlan_gpe_tunnel_t *t = 0;
-
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "local %U", unformat_ip4_address, &local.ip4))
@@ -192,8 +440,8 @@ vxlan_gpe_set_ioam_rewrite_command_fn (vlib_main_t * vm,
local_set = 1;
ipv4_set = 1;
}
- else if (unformat (input, "remote %U",
- unformat_ip4_address, &remote.ip4))
+ else
+ if (unformat (input, "remote %U", unformat_ip4_address, &remote.ip4))
{
remote_set = 1;
ipv4_set = 1;
@@ -203,8 +451,8 @@ vxlan_gpe_set_ioam_rewrite_command_fn (vlib_main_t * vm,
local_set = 1;
ipv6_set = 1;
}
- else if (unformat (input, "remote %U",
- unformat_ip6_address, &remote.ip6))
+ else
+ if (unformat (input, "remote %U", unformat_ip6_address, &remote.ip6))
{
remote_set = 1;
ipv6_set = 1;
@@ -219,29 +467,27 @@ vxlan_gpe_set_ioam_rewrite_command_fn (vlib_main_t * vm,
if (local_set == 0)
return clib_error_return (0, "tunnel local address not specified");
-
if (remote_set == 0)
return clib_error_return (0, "tunnel remote address not specified");
-
if (ipv4_set && ipv6_set)
return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
-
- if ((ipv4_set && memcmp (&local.ip4, &remote.ip4, sizeof (local.ip4)) == 0)
- || (ipv6_set
- && memcmp (&local.ip6, &remote.ip6, sizeof (local.ip6)) == 0))
+ if ((ipv4_set
+ && memcmp (&local.ip4, &remote.ip4,
+ sizeof (local.ip4)) == 0) || (ipv6_set
+ &&
+ memcmp
+ (&local.ip6,
+ &remote.ip6,
+ sizeof (local.ip6)) == 0))
return clib_error_return (0, "src and dst addresses are identical");
-
if (vni_set == 0)
return clib_error_return (0, "vni not specified");
-
-
if (!ipv6_set)
{
key4.local = local.ip4.as_u32;
key4.remote = remote.ip4.as_u32;
key4.vni = clib_host_to_net_u32 (vni << 8);
key4.pad = 0;
-
p = hash_get_mem (gm->vxlan4_gpe_tunnel_by_key, &key4);
}
else
@@ -251,20 +497,17 @@ vxlan_gpe_set_ioam_rewrite_command_fn (vlib_main_t * vm,
key6.remote.as_u64[0] = remote.ip6.as_u64[0];
key6.remote.as_u64[1] = remote.ip6.as_u64[1];
key6.vni = clib_host_to_net_u32 (vni << 8);
-
p = hash_get_mem (gm->vxlan6_gpe_tunnel_by_key, &key6);
}
if (!p)
return clib_error_return (0, "VxLAN Tunnel not found");
-
t = pool_elt_at_index (gm->tunnels, p[0]);
-
if (!disable)
{
- rv = vxlan_gpe_ioam_set (t, hm->has_trace_option,
- hm->has_pot_option,
- hm->has_ppc_option, ipv6_set);
+ rv =
+ vxlan_gpe_ioam_set (t, hm->has_trace_option,
+ hm->has_pot_option, hm->has_ppc_option, ipv6_set);
}
else
{
@@ -285,11 +528,10 @@ VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_rewrite_cmd, static) = {
clib_error_t *
-vxlan_gpe_ioam_enable (int has_trace_option, int has_pot_option,
- int has_ppc_option)
+vxlan_gpe_ioam_enable (int has_trace_option,
+ int has_pot_option, int has_ppc_option)
{
vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
-
hm->has_trace_option = has_trace_option;
hm->has_pot_option = has_pot_option;
hm->has_ppc_option = has_ppc_option;
@@ -302,11 +544,11 @@ vxlan_gpe_ioam_enable (int has_trace_option, int has_pot_option,
}
clib_error_t *
-vxlan_gpe_ioam_disable (int has_trace_option, int has_pot_option,
- int has_ppc_option)
+vxlan_gpe_ioam_disable (int
+ has_trace_option,
+ int has_pot_option, int has_ppc_option)
{
vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
-
hm->has_trace_option = has_trace_option;
hm->has_pot_option = has_pot_option;
hm->has_ppc_option = has_ppc_option;
@@ -328,14 +570,13 @@ vxlan_gpe_set_next_override (uword next)
static clib_error_t *
vxlan_gpe_set_ioam_flags_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
+ unformat_input_t
+ * input, vlib_cli_command_t * cmd)
{
int has_trace_option = 0;
int has_pot_option = 0;
int has_ppc_option = 0;
clib_error_t *rv = 0;
-
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "trace"))
@@ -355,7 +596,6 @@ vxlan_gpe_set_ioam_flags_command_fn (vlib_main_t * vm,
rv =
vxlan_gpe_ioam_enable (has_trace_option, has_pot_option, has_ppc_option);
-
return rv;
}
@@ -368,12 +608,100 @@ VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_flags_cmd, static) =
/* *INDENT-ON* */
+int vxlan_gpe_ioam_disable_for_dest
+ (vlib_main_t * vm, ip46_address_t dst_addr, u32 outer_fib_index,
+ u8 ipv4_set)
+{
+ vxlan_gpe_ioam_dest_tunnels_t *t;
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+ vxlan_gpe_enable_disable_ioam_for_dest (hm->vlib_main,
+ dst_addr, outer_fib_index, ipv4_set,
+ 0);
+ if (pool_elts (hm->dst_tunnels) == 0)
+ {
+ vxlan_gpe_clear_output_feature_on_select_intfs ();
+ return 0;
+ }
+
+ pool_foreach (t, hm->dst_tunnels, (
+ {
+ vxlan_gpe_enable_disable_ioam_for_dest
+ (hm->vlib_main,
+ t->dst_addr,
+ t->outer_fib_index,
+ (t->fp_proto ==
+ FIB_PROTOCOL_IP4), 1 /* is_add */ );
+ }
+ ));
+ vxlan_gpe_clear_output_feature_on_select_intfs ();
+ return (0);
+}
-clib_error_t *
-clear_vxlan_gpe_ioam_rewrite_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
+static clib_error_t *vxlan_gpe_set_ioam_transit_rewrite_command_fn
+ (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ ip46_address_t dst_addr;
+ u8 dst_addr_set = 0;
+ u8 ipv4_set = 0;
+ u8 ipv6_set = 0;
+ u8 disable = 0;
+ clib_error_t *rv = 0;
+ u32 outer_fib_index = 0;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "dst-ip %U", unformat_ip4_address, &dst_addr.ip4))
+ {
+ dst_addr_set = 1;
+ ipv4_set = 1;
+ }
+ else
+ if (unformat
+ (input, "dst-ip %U", unformat_ip6_address, &dst_addr.ip6))
+ {
+ dst_addr_set = 1;
+ ipv6_set = 1;
+ }
+ else if (unformat (input, "outer-fib-index %d", &outer_fib_index))
+ {
+ }
+
+ else if (unformat (input, "disable"))
+ disable = 1;
+ else
+ break;
+ }
+
+ if (dst_addr_set == 0)
+ return clib_error_return (0, "tunnel destination address not specified");
+ if (ipv4_set && ipv6_set)
+ return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
+ if (!disable)
+ {
+ vxlan_gpe_enable_disable_ioam_for_dest (hm->vlib_main,
+ dst_addr, outer_fib_index,
+ ipv4_set, 1);
+ }
+ else
+ {
+ vxlan_gpe_ioam_disable_for_dest
+ (vm, dst_addr, outer_fib_index, ipv4_set);
+ }
+ return rv;
+}
+
+ /* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_transit_rewrite_cmd, static) = {
+ .path = "set vxlan-gpe-ioam-transit",
+ .short_help = "set vxlan-gpe-ioam-transit dst-ip <dst_ip> [outer-fib-index <outer_fib_index>] [disable]",
+ .function = vxlan_gpe_set_ioam_transit_rewrite_command_fn,
+};
+/* *INDENT-ON* */
+
+clib_error_t *clear_vxlan_gpe_ioam_rewrite_command_fn
+ (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
{
return (vxlan_gpe_ioam_disable (0, 0, 0));
}
@@ -387,6 +715,55 @@ VLIB_CLI_COMMAND (vxlan_gpe_clear_ioam_flags_cmd, static) =
};
/* *INDENT-ON* */
+
+/**
+ * Function definition to backwalk a FIB node
+ */
+static fib_node_back_walk_rc_t
+vxlan_gpe_ioam_back_walk (fib_node_t * node, fib_node_back_walk_ctx_t * ctx)
+{
+ vxlan_gpe_refresh_output_feature_on_all_dest ();
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+/**
+ * Function definition to get a FIB node from its index
+ */
+static fib_node_t *
+vxlan_gpe_ioam_fib_node_get (fib_node_index_t index)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ return (&hm->node);
+}
+
+/**
+ * Function definition to inform the FIB node that its last lock has gone.
+ */
+static void
+vxlan_gpe_ioam_last_lock_gone (fib_node_t * node)
+{
+ ASSERT (0);
+}
+
+
+/*
+ * Virtual function table registered by MPLS GRE tunnels
+ * for participation in the FIB object graph.
+ */
+const static fib_node_vft_t vxlan_gpe_ioam_vft = {
+ .fnv_get = vxlan_gpe_ioam_fib_node_get,
+ .fnv_last_lock = vxlan_gpe_ioam_last_lock_gone,
+ .fnv_back_walk = vxlan_gpe_ioam_back_walk,
+};
+
+void
+vxlan_gpe_ioam_interface_init (void)
+{
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ hm->fib_entry_type = fib_node_register_new_type (&vxlan_gpe_ioam_vft);
+ return;
+}
+
/*
* fd.io coding-style-patch-verification: ON
*