From 235c64f0678165a2cddee67514052d4bc2bedadb Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Wed, 4 Jan 2017 12:41:21 +0000 Subject: FIB memory leaks (VPP-578) 1) vec_free the fe_srcs of a fib_entry_t when the fib_entry_t is itself reed 2) in the load-balance fixup if a drop path is required add this to a new vector of next-hops 'fixed_nhs'. This vector is managed by the load-balance function. The caller continues to manage its own set. The function is now const implying that the caller is safe to assume the next-hops do not change. Change-Id: I0f29203ee16b9a270f40edf237488fa99ba65320 Signed-off-by: Neale Ranns --- vnet/vnet/dpo/load_balance.c | 33 ++++++++++++++++++--------------- vnet/vnet/dpo/load_balance.h | 2 +- vnet/vnet/fib/fib_entry.c | 1 + vnet/vnet/fib/fib_entry_src.c | 8 ++++++++ vnet/vnet/fib/fib_path_list.c | 26 ++++++++++++++++++-------- vnet/vnet/fib/fib_path_list.h | 2 ++ 6 files changed, 48 insertions(+), 24 deletions(-) (limited to 'vnet') diff --git a/vnet/vnet/dpo/load_balance.c b/vnet/vnet/dpo/load_balance.c index a244776ffb8..e70a7a306e1 100644 --- a/vnet/vnet/dpo/load_balance.c +++ b/vnet/vnet/dpo/load_balance.c @@ -279,8 +279,8 @@ load_balance_get_bucket (index_t lbi, } static int -next_hop_sort_by_weight (load_balance_path_t * n1, - load_balance_path_t * n2) +next_hop_sort_by_weight (const load_balance_path_t * n1, + const load_balance_path_t * n2) { return ((int) n1->path_weight - (int) n2->path_weight); } @@ -289,7 +289,7 @@ next_hop_sort_by_weight (load_balance_path_t * n1, with weights corresponding to the number of adjacencies for each next hop. Returns number of adjacencies in block. */ u32 -ip_multipath_normalize_next_hops (load_balance_path_t * raw_next_hops, +ip_multipath_normalize_next_hops (const load_balance_path_t * raw_next_hops, load_balance_path_t ** normalized_next_hops, u32 *sum_weight_in, f64 multipath_next_hop_error_tolerance) @@ -409,23 +409,25 @@ done: } static load_balance_path_t * -load_balance_multipath_next_hop_fixup (load_balance_path_t *nhs, +load_balance_multipath_next_hop_fixup (const load_balance_path_t *nhs, dpo_proto_t drop_proto) { if (0 == vec_len(nhs)) { - load_balance_path_t *nh; + load_balance_path_t *new_nhs = NULL, *nh; /* * we need something for the load-balance. so use the drop */ - vec_add2(nhs, nh, 1); + vec_add2(new_nhs, nh, 1); nh->path_weight = 1; dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto)); + + return (new_nhs); } - return (nhs); + return (NULL); } /* @@ -467,11 +469,11 @@ load_balance_set_n_buckets (load_balance_t *lb, void load_balance_multipath_update (const dpo_id_t *dpo, - load_balance_path_t * raw_next_hops, + const load_balance_path_t * raw_nhs, load_balance_flags_t flags) { - u32 sum_of_weights,n_buckets, ii; - load_balance_path_t * nh, * nhs; + load_balance_path_t *nh, *nhs, *fixed_nhs; + u32 sum_of_weights, n_buckets, ii; index_t lbmi, old_lbmi; load_balance_t *lb; dpo_id_t *tmp_dpo; @@ -480,16 +482,16 @@ load_balance_multipath_update (const dpo_id_t *dpo, ASSERT(DPO_LOAD_BALANCE == dpo->dpoi_type); lb = load_balance_get(dpo->dpoi_index); - raw_next_hops = - load_balance_multipath_next_hop_fixup(raw_next_hops, - lb->lb_proto); + fixed_nhs = load_balance_multipath_next_hop_fixup(raw_nhs, lb->lb_proto); n_buckets = - ip_multipath_normalize_next_hops(raw_next_hops, + ip_multipath_normalize_next_hops((NULL == fixed_nhs ? + raw_nhs : + fixed_nhs), &nhs, &sum_of_weights, multipath_next_hop_error_tolerance); - ASSERT (n_buckets >= vec_len (raw_next_hops)); + ASSERT (n_buckets >= vec_len (raw_nhs)); /* * Save the old load-balance map used, and get a new one if required. @@ -694,6 +696,7 @@ load_balance_multipath_update (const dpo_id_t *dpo, dpo_reset(&nh->path_dpo); } vec_free(nhs); + vec_free(fixed_nhs); load_balance_map_unlock(old_lbmi); } diff --git a/vnet/vnet/dpo/load_balance.h b/vnet/vnet/dpo/load_balance.h index dc6485e688a..1799653628d 100644 --- a/vnet/vnet/dpo/load_balance.h +++ b/vnet/vnet/dpo/load_balance.h @@ -159,7 +159,7 @@ extern index_t load_balance_create(u32 num_buckets, flow_hash_config_t fhc); extern void load_balance_multipath_update( const dpo_id_t *dpo, - load_balance_path_t * raw_next_hops, + const load_balance_path_t * raw_next_hops, load_balance_flags_t flags); extern void load_balance_set_bucket(index_t lbi, diff --git a/vnet/vnet/fib/fib_entry.c b/vnet/vnet/fib/fib_entry.c index 24b506379ac..3aa3632c596 100644 --- a/vnet/vnet/fib/fib_entry.c +++ b/vnet/vnet/fib/fib_entry.c @@ -230,6 +230,7 @@ fib_entry_last_lock_gone (fib_node_t *node) ASSERT(0 == vec_len(fib_entry->fe_delegates)); vec_free(fib_entry->fe_delegates); + vec_free(fib_entry->fe_srcs); pool_put(fib_entry_pool, fib_entry); } diff --git a/vnet/vnet/fib/fib_entry_src.c b/vnet/vnet/fib/fib_entry_src.c index 060fac941d2..1fb040608b4 100644 --- a/vnet/vnet/fib/fib_entry_src.c +++ b/vnet/vnet/fib/fib_entry_src.c @@ -382,6 +382,14 @@ fib_entry_src_mk_lb (fib_entry_t *fib_entry, .fct = fct, }; + /* + * As an optimisation we allocate the vector of next-hops to be sized + * equal to the maximum nuber of paths we will need, which is also the + * most likely number we will need, since in most cases the paths are 'up'. + */ + vec_validate(ctx.next_hops, fib_path_list_get_n_paths(esrc->fes_pl)); + vec_reset_length(ctx.next_hops); + lb_proto = fib_proto_to_dpo(fib_entry->fe_prefix.fp_proto); fib_path_list_walk(esrc->fes_pl, diff --git a/vnet/vnet/fib/fib_path_list.c b/vnet/vnet/fib/fib_path_list.c index 5b35e9b87e7..db9d1af9e3f 100644 --- a/vnet/vnet/fib/fib_path_list.c +++ b/vnet/vnet/fib/fib_path_list.c @@ -365,10 +365,10 @@ fib_path_list_mk_lb (fib_path_list_t *path_list, fib_forward_chain_type_t fct, dpo_id_t *dpo) { - load_balance_path_t *hash_key; + load_balance_path_t *nhs; fib_node_index_t *path_index; - hash_key = NULL; + nhs = NULL; if (!dpo_id_is_valid(dpo)) { @@ -388,21 +388,20 @@ fib_path_list_mk_lb (fib_path_list_t *path_list, */ vec_foreach (path_index, path_list->fpl_paths) { - hash_key = fib_path_append_nh_for_multipath_hash( - *path_index, - fct, - hash_key); + nhs = fib_path_append_nh_for_multipath_hash(*path_index, + fct, + nhs); } /* * Path-list load-balances, which if used, would be shared and hence * never need a load-balance map. */ - load_balance_multipath_update(dpo, hash_key, LOAD_BALANCE_FLAG_NONE); + load_balance_multipath_update(dpo, nhs, LOAD_BALANCE_FLAG_NONE); FIB_PATH_LIST_DBG(path_list, "mk lb: %d", dpo->dpoi_index); - vec_free(hash_key); + vec_free(nhs); } /** @@ -591,6 +590,17 @@ fib_path_list_resolve (fib_path_list_t *path_list) return (path_list); } +u32 +fib_path_list_get_n_paths (fib_node_index_t path_list_index) +{ + fib_path_list_t *path_list; + + path_list = fib_path_list_get(path_list_index); + + return (vec_len(path_list->fpl_paths)); +} + + u32 fib_path_list_get_resolving_interface (fib_node_index_t path_list_index) { diff --git a/vnet/vnet/fib/fib_path_list.h b/vnet/vnet/fib/fib_path_list.h index 8bc1b20b6bf..f4f94a1b04a 100644 --- a/vnet/vnet/fib/fib_path_list.h +++ b/vnet/vnet/fib/fib_path_list.h @@ -104,6 +104,8 @@ extern fib_node_index_t fib_path_list_copy_and_path_remove( fib_node_index_t pl_index, fib_path_list_flags_t flags, const fib_route_path_t *path); +extern u32 fib_path_list_get_n_paths(fib_node_index_t pl_index); + extern void fib_path_list_contribute_forwarding(fib_node_index_t path_list_index, fib_forward_chain_type_t type, dpo_id_t *dpo); -- cgit 1.2.3-korg