diff options
author | Neale Ranns <nranns@cisco.com> | 2019-07-05 00:53:45 -0700 |
---|---|---|
committer | Neale Ranns <nranns@cisco.com> | 2019-07-09 15:45:52 +0000 |
commit | e28c87cd00644205e9bebca054029a8e655ed015 (patch) | |
tree | 8e6e0bac3b6d269c00b569a3ebf60338237c8a29 /src/plugins/gbp/gbp_itf.c | |
parent | 777d2aee8054e77e6f265879e1dfe3776d90d758 (diff) |
gbp: Ownership of dynamically created vxlan-gbp tunnels managed via gbp_itf
Type: fix
This solves the ownership of vxlan-gbp tunnels. When the last reference of these goes away they need to be deleted. Currently there are two owners; gbp_itf via gef_itf and the lock held by the gbp_endpoint_location_t. The problem is that the
loc removes its reference whilst the fwd still holds the gbp_itf, and things go wrong.
This change moves the lifecycle management of the vxlan-gbp tunnel to the gbp_itf. When the last lock of the gbp_itf goes, so does the tunnel. now both the EP's loc and fwd can hold a lock on the gbp_itf and it's only removed when required.
The other change is the management of the 'user' of the gbp_itf. Since each user can enable and disable different features, it's the job of the gbp_itf to apply the combined set. determining a unique 'uesr' from the caller was near impossible, so I moved that to the gbp_itf, and return the allocated user, hence the 'handle' that encodes both user and interface.
The hash table maps from sw_if_index to pool index.
Change-Id: I4c7bf4c0e5dcf33d1c545f262365e69151febcf4
Signed-off-by: Neale Ranns <nranns@cisco.com>
Diffstat (limited to 'src/plugins/gbp/gbp_itf.c')
-rw-r--r-- | src/plugins/gbp/gbp_itf.c | 441 |
1 files changed, 385 insertions, 56 deletions
diff --git a/src/plugins/gbp/gbp_itf.c b/src/plugins/gbp/gbp_itf.c index 59c96291279..a9e9225ae72 100644 --- a/src/plugins/gbp/gbp_itf.c +++ b/src/plugins/gbp/gbp_itf.c @@ -15,6 +15,18 @@ #include <plugins/gbp/gbp_itf.h> #include <plugins/gbp/gbp_bridge_domain.h> +#include <plugins/gbp/gbp_route_domain.h> + +#define foreach_gbp_itf_mode \ + _(L2, "l2") \ + _(L3, "L3") + +typedef enum gbp_ift_mode_t_ +{ +#define _(s,v) GBP_ITF_MODE_##s, + foreach_gbp_itf_mode +#undef _ +} gbp_itf_mode_t; /** * Attributes and configurations attached to interfaces by GBP @@ -26,100 +38,371 @@ typedef struct gbp_itf_t_ */ u32 gi_locks; + /** + * The interface this wrapper is managing + */ u32 gi_sw_if_index; - u32 gi_bd_index; + + /** + * The mode of the interface + */ + gbp_itf_mode_t gi_mode; + + /** + * Users of this interface - this is encoded in the user's handle + */ + u32 *gi_users; /** * L2/L3 Features configured by each user */ - u32 *gi_l2_input_fbs; - u32 gi_l2_input_fb; - u32 *gi_l2_output_fbs; - u32 gi_l2_output_fb; + u32 *gi_input_fbs; + u32 gi_input_fb; + u32 *gi_output_fbs; + u32 gi_output_fb; + + /** + * function to call when the interface is deleted. + */ + gbp_itf_free_fn_t gi_free_fn; + + union + { + /** + * GBP BD or RD index + */ + u32 gi_gbi; + index_t gi_gri; + }; } gbp_itf_t; -static gbp_itf_t *gbp_itfs; +static gbp_itf_t *gbp_itf_pool; +static uword *gbp_itf_db; + +static const char *gbp_itf_feat_bit_pos_to_arc[] = { +#define _(s,v,a) [GBP_ITF_L3_FEAT_POS_##s] = a, + foreach_gdb_l3_feature +#undef _ +}; + +static const char *gbp_itf_feat_bit_pos_to_feat[] = { +#define _(s,v,a) [GBP_ITF_L3_FEAT_POS_##s] = v, + foreach_gdb_l3_feature +#undef _ +}; + +u8 * +format_gbp_itf_l3_feat (u8 * s, va_list * args) +{ + gbp_itf_l3_feat_t flags = va_arg (*args, gbp_itf_l3_feat_t); + +#define _(a, b, c) \ + if (flags & GBP_ITF_L3_FEAT_##a) \ + s = format (s, "%s ", b); + foreach_gdb_l3_feature +#undef _ + return (s); +} + +void +gbp_itf_hdl_reset (gbp_itf_hdl_t * gh) +{ + *gh = GBP_ITF_HDL_INVALID; +} + +bool +gbp_itf_hdl_is_valid (gbp_itf_hdl_t gh) +{ + return (gh.gh_which != GBP_ITF_HDL_INVALID.gh_which); +} static gbp_itf_t * gbp_itf_get (index_t gii) { - vec_validate (gbp_itfs, gii); + if (pool_is_free_index (gbp_itf_pool, gii)) + return (NULL); - return (&gbp_itfs[gii]); + return (pool_elt_at_index (gbp_itf_pool, gii)); } -static index_t -gbp_itf_get_itf (u32 sw_if_index) +static gbp_itf_t * +gbp_itf_find (u32 sw_if_index) { - return (sw_if_index); + uword *p; + + p = hash_get (gbp_itf_db, sw_if_index); + + if (NULL != p) + return (gbp_itf_get (p[0])); + + return (NULL); +} + +static gbp_itf_t * +gbp_itf_find_hdl (gbp_itf_hdl_t gh) +{ + return (gbp_itf_find (gh.gh_which)); } -index_t -gbp_itf_add_and_lock (u32 sw_if_index, u32 bd_index) +u32 +gbp_itf_get_sw_if_index (gbp_itf_hdl_t hdl) +{ + return (hdl.gh_which); +} + +static gbp_itf_hdl_t +gbp_itf_mk_hdl (gbp_itf_t * gi) +{ + gbp_itf_hdl_t gh; + u32 *useri; + + pool_get (gi->gi_users, useri); + *useri = 0; + + gh.gh_who = useri - gi->gi_users; + gh.gh_which = gi->gi_sw_if_index; + + return (gh); +} + +static gbp_itf_hdl_t +gbp_itf_l2_add_and_lock_i (u32 sw_if_index, index_t gbi, gbp_itf_free_fn_t ff) { gbp_itf_t *gi; - gi = gbp_itf_get (gbp_itf_get_itf (sw_if_index)); + gi = gbp_itf_find (sw_if_index); - if (0 == gi->gi_locks) + if (NULL == gi) { + pool_get_zero (gbp_itf_pool, gi); + gi->gi_sw_if_index = sw_if_index; - gi->gi_bd_index = bd_index; + gi->gi_gbi = gbi; + gi->gi_mode = GBP_ITF_MODE_L2; + gi->gi_free_fn = ff; - if (~0 != gi->gi_bd_index) - gbp_bridge_domain_itf_add (sw_if_index, bd_index, - L2_BD_PORT_TYPE_NORMAL); + gbp_bridge_domain_itf_add (gi->gi_gbi, gi->gi_sw_if_index, + L2_BD_PORT_TYPE_NORMAL); + + hash_set (gbp_itf_db, gi->gi_sw_if_index, gi - gbp_itf_pool); } gi->gi_locks++; - return (sw_if_index); + return (gbp_itf_mk_hdl (gi)); +} + +gbp_itf_hdl_t +gbp_itf_l2_add_and_lock (u32 sw_if_index, index_t gbi) +{ + return (gbp_itf_l2_add_and_lock_i (sw_if_index, gbi, NULL)); +} + +gbp_itf_hdl_t +gbp_itf_l2_add_and_lock_w_free (u32 sw_if_index, + index_t gbi, gbp_itf_free_fn_t ff) +{ + return (gbp_itf_l2_add_and_lock_i (sw_if_index, gbi, ff)); +} + +gbp_itf_hdl_t +gbp_itf_l3_add_and_lock_i (u32 sw_if_index, index_t gri, gbp_itf_free_fn_t ff) +{ + gbp_itf_t *gi; + + gi = gbp_itf_find (sw_if_index); + + if (NULL == gi) + { + const gbp_route_domain_t *grd; + fib_protocol_t fproto; + + pool_get_zero (gbp_itf_pool, gi); + + gi->gi_sw_if_index = sw_if_index; + gi->gi_mode = GBP_ITF_MODE_L3; + gi->gi_gri = gri; + gi->gi_free_fn = ff; + + grd = gbp_route_domain_get (gi->gi_gri); + + ip4_sw_interface_enable_disable (gi->gi_sw_if_index, 1); + ip6_sw_interface_enable_disable (gi->gi_sw_if_index, 1); + + FOR_EACH_FIB_IP_PROTOCOL (fproto) + ip_table_bind (fproto, gi->gi_sw_if_index, + grd->grd_table_id[fproto], 1); + + hash_set (gbp_itf_db, gi->gi_sw_if_index, gi - gbp_itf_pool); + } + + gi->gi_locks++; + + return (gbp_itf_mk_hdl (gi)); +} + +gbp_itf_hdl_t +gbp_itf_l3_add_and_lock (u32 sw_if_index, index_t gri) +{ + return (gbp_itf_l3_add_and_lock_i (sw_if_index, gri, NULL)); +} + +gbp_itf_hdl_t +gbp_itf_l3_add_and_lock_w_free (u32 sw_if_index, + index_t gri, gbp_itf_free_fn_t ff) +{ + return (gbp_itf_l3_add_and_lock_i (sw_if_index, gri, ff)); } void -gbp_itf_unlock (index_t gii) +gbp_itf_lock (gbp_itf_hdl_t gh) { gbp_itf_t *gi; - gi = gbp_itf_get (gii); + if (!gbp_itf_hdl_is_valid (gh)) + return; + + gi = gbp_itf_find_hdl (gh); + + gi->gi_locks++; +} + +gbp_itf_hdl_t +gbp_itf_clone_and_lock (gbp_itf_hdl_t gh) +{ + gbp_itf_t *gi; + + if (!gbp_itf_hdl_is_valid (gh)) + return (GBP_ITF_HDL_INVALID); + + gi = gbp_itf_find_hdl (gh); + + gi->gi_locks++; + + return (gbp_itf_mk_hdl (gi)); +} + +void +gbp_itf_unlock (gbp_itf_hdl_t * gh) +{ + gbp_itf_t *gi; + + if (!gbp_itf_hdl_is_valid (*gh)) + return; + + gi = gbp_itf_find_hdl (*gh); ASSERT (gi->gi_locks > 0); gi->gi_locks--; if (0 == gi->gi_locks) { - if (~0 != gi->gi_bd_index) - gbp_bridge_domain_itf_del (gi->gi_sw_if_index, gi->gi_bd_index, - L2_BD_PORT_TYPE_NORMAL); - vec_free (gi->gi_l2_input_fbs); - vec_free (gi->gi_l2_output_fbs); + if (GBP_ITF_MODE_L2 == gi->gi_mode) + { + gbp_itf_l2_set_input_feature (*gh, L2INPUT_FEAT_NONE); + gbp_itf_l2_set_output_feature (*gh, L2OUTPUT_FEAT_NONE); + gbp_bridge_domain_itf_del (gi->gi_gbi, + gi->gi_sw_if_index, + L2_BD_PORT_TYPE_NORMAL); + } + else + { + fib_protocol_t fproto; + + gbp_itf_l3_set_input_feature (*gh, GBP_ITF_L3_FEAT_NONE); + FOR_EACH_FIB_IP_PROTOCOL (fproto) + ip_table_bind (fproto, gi->gi_sw_if_index, 0, 0); + + ip4_sw_interface_enable_disable (gi->gi_sw_if_index, 0); + ip6_sw_interface_enable_disable (gi->gi_sw_if_index, 0); + } + + hash_unset (gbp_itf_db, gi->gi_sw_if_index); + + if (gi->gi_free_fn) + gi->gi_free_fn (gi->gi_sw_if_index); + + pool_free (gi->gi_users); + vec_free (gi->gi_input_fbs); + vec_free (gi->gi_output_fbs); memset (gi, 0, sizeof (*gi)); } + + gbp_itf_hdl_reset (gh); } void -gbp_itf_set_l2_input_feature (index_t gii, - index_t useri, l2input_feat_masks_t feats) +gbp_itf_l3_set_input_feature (gbp_itf_hdl_t gh, gbp_itf_l3_feat_t feats) { u32 diff_fb, new_fb, *fb, feat; gbp_itf_t *gi; - gi = gbp_itf_get (gii); + gi = gbp_itf_find_hdl (gh); - if (gi->gi_bd_index == ~0) + if (NULL == gi || GBP_ITF_MODE_L3 != gi->gi_mode) return; - vec_validate (gi->gi_l2_input_fbs, useri); - gi->gi_l2_input_fbs[useri] = feats; + vec_validate (gi->gi_input_fbs, gh.gh_who); + gi->gi_input_fbs[gh.gh_who] = feats; new_fb = 0; - vec_foreach (fb, gi->gi_l2_input_fbs) + vec_foreach (fb, gi->gi_input_fbs) { new_fb |= *fb; } /* add new features */ - diff_fb = (gi->gi_l2_input_fb ^ new_fb) & new_fb; + diff_fb = (gi->gi_input_fb ^ new_fb) & new_fb; + + /* *INDENT-OFF* */ + foreach_set_bit (feat, diff_fb, + ({ + vnet_feature_enable_disable (gbp_itf_feat_bit_pos_to_arc[feat], + gbp_itf_feat_bit_pos_to_feat[feat], + gi->gi_sw_if_index, 1, 0, 0); + })); + /* *INDENT-ON* */ + + /* remove unneeded features */ + diff_fb = (gi->gi_input_fb ^ new_fb) & gi->gi_input_fb; + + /* *INDENT-OFF* */ + foreach_set_bit (feat, diff_fb, + ({ + vnet_feature_enable_disable (gbp_itf_feat_bit_pos_to_arc[feat], + gbp_itf_feat_bit_pos_to_feat[feat], + gi->gi_sw_if_index, 0, 0, 0); + })); + /* *INDENT-ON* */ + + gi->gi_input_fb = new_fb; +} + +void +gbp_itf_l2_set_input_feature (gbp_itf_hdl_t gh, l2input_feat_masks_t feats) +{ + u32 diff_fb, new_fb, *fb, feat; + gbp_itf_t *gi; + + gi = gbp_itf_find_hdl (gh); + + if (NULL == gi || GBP_ITF_MODE_L2 != gi->gi_mode) + { + ASSERT (0); + return; + } + + vec_validate (gi->gi_input_fbs, gh.gh_who); + gi->gi_input_fbs[gh.gh_who] = feats; + + new_fb = 0; + vec_foreach (fb, gi->gi_input_fbs) + { + new_fb |= *fb; + } + + /* add new features */ + diff_fb = (gi->gi_input_fb ^ new_fb) & new_fb; /* *INDENT-OFF* */ foreach_set_bit (feat, diff_fb, @@ -129,7 +412,7 @@ gbp_itf_set_l2_input_feature (index_t gii, /* *INDENT-ON* */ /* remove unneeded features */ - diff_fb = (gi->gi_l2_input_fb ^ new_fb) & gi->gi_l2_input_fb; + diff_fb = (gi->gi_input_fb ^ new_fb) & gi->gi_input_fb; /* *INDENT-OFF* */ foreach_set_bit (feat, diff_fb, @@ -138,32 +421,34 @@ gbp_itf_set_l2_input_feature (index_t gii, })); /* *INDENT-ON* */ - gi->gi_l2_input_fb = new_fb; + gi->gi_input_fb = new_fb; } void -gbp_itf_set_l2_output_feature (index_t gii, - index_t useri, l2output_feat_masks_t feats) +gbp_itf_l2_set_output_feature (gbp_itf_hdl_t gh, l2output_feat_masks_t feats) { u32 diff_fb, new_fb, *fb, feat; gbp_itf_t *gi; - gi = gbp_itf_get (gii); + gi = gbp_itf_find_hdl (gh); - if (gi->gi_bd_index == ~0) - return; + if (NULL == gi || GBP_ITF_MODE_L2 != gi->gi_mode) + { + ASSERT (0); + return; + } - vec_validate (gi->gi_l2_output_fbs, useri); - gi->gi_l2_output_fbs[useri] = feats; + vec_validate (gi->gi_output_fbs, gh.gh_who); + gi->gi_output_fbs[gh.gh_who] = feats; new_fb = 0; - vec_foreach (fb, gi->gi_l2_output_fbs) + vec_foreach (fb, gi->gi_output_fbs) { new_fb |= *fb; } /* add new features */ - diff_fb = (gi->gi_l2_output_fb ^ new_fb) & new_fb; + diff_fb = (gi->gi_output_fb ^ new_fb) & new_fb; /* *INDENT-OFF* */ foreach_set_bit (feat, diff_fb, @@ -173,7 +458,7 @@ gbp_itf_set_l2_output_feature (index_t gii, /* *INDENT-ON* */ /* remove unneeded features */ - diff_fb = (gi->gi_l2_output_fb ^ new_fb) & gi->gi_l2_output_fb; + diff_fb = (gi->gi_output_fb ^ new_fb) & gi->gi_output_fb; /* *INDENT-OFF* */ foreach_set_bit (feat, diff_fb, @@ -182,27 +467,69 @@ gbp_itf_set_l2_output_feature (index_t gii, })); /* *INDENT-ON* */ - gi->gi_l2_output_fb = new_fb; + gi->gi_output_fb = new_fb; } -u8 * +static u8 * +format_gbp_itf_mode (u8 * s, va_list * args) +{ + gbp_itf_mode_t mode = va_arg (*args, gbp_itf_mode_t); + + switch (mode) + { +#define _(a,v) \ + case GBP_ITF_MODE_##a: \ + return format(s, "%s", v); + foreach_gbp_itf_mode +#undef _ + } + return (s); +} + +static u8 * format_gbp_itf (u8 * s, va_list * args) { index_t gii = va_arg (*args, index_t); gbp_itf_t *gi; + if (INDEX_INVALID == gii) + return (format (s, "unset")); + gi = gbp_itf_get (gii); - s = format (s, "%U locks:%d bd-index:%d input-feats:%U output-feats:%U", + s = format (s, "%U locks:%d mode:%U ", format_vnet_sw_if_index_name, vnet_get_main (), gi->gi_sw_if_index, gi->gi_locks, - gi->gi_bd_index, - format_l2_input_features, gi->gi_l2_input_fb, 0, - format_l2_output_features, gi->gi_l2_output_fb, 0); + format_gbp_itf_mode, gi->gi_mode); + + if (GBP_ITF_MODE_L2 == gi->gi_mode) + s = format (s, "gbp-bd:%d input-feats:[%U] output-feats:[%U]", + gi->gi_gbi, + format_l2_input_features, gi->gi_input_fb, 0, + format_l2_output_features, gi->gi_output_fb, 0); + else + s = format (s, "gbp-rd:%d input-feats:[%U] output-feats:[%U]", + gi->gi_gbi, + format_gbp_itf_l3_feat, gi->gi_input_fb, + format_gbp_itf_l3_feat, gi->gi_output_fb); return (s); } +u8 * +format_gbp_itf_hdl (u8 * s, va_list * args) +{ + gbp_itf_hdl_t gh = va_arg (*args, gbp_itf_hdl_t); + gbp_itf_t *gi; + + gi = gbp_itf_find_hdl (gh); + + if (NULL == gi) + return format (s, "INVALID"); + + return (format (s, "%U", format_gbp_itf, gi - gbp_itf_pool)); +} + static clib_error_t * gbp_itf_show (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) @@ -211,10 +538,12 @@ gbp_itf_show (vlib_main_t * vm, vlib_cli_output (vm, "Interfaces:"); - vec_foreach_index (gii, gbp_itfs) - { + /* *INDENT-OFF* */ + pool_foreach_index (gii, gbp_itf_pool, + ({ vlib_cli_output (vm, " [%d] %U", gii, format_gbp_itf, gii); - } + })); + /* *INDENT-ON* */ return (NULL); } |