From 7f4d577d6bc243f53a044d92ca9367b3f1fa170e Mon Sep 17 00:00:00 2001 From: Andrew Yourtchenko Date: Wed, 24 May 2017 13:20:47 +0200 Subject: acl-plugin: bihash-based ACL lookup Add a bihash-based ACL lookup mechanism and make it a new default. This changes the time required to lookup a 5-tuple match from O(total_N_entries) to O(total_N_mask_types), where "mask type" is an overall mask on the 5-tuple required to represent an ACE. For testing/comparison there is a temporary debug CLI "set acl-plugin use-hash-acl-matching {0|1}", which, when set to 0, makes the plugin use the "old" linear lookup, and when set to 1, makes it use the hash-based lookup. Based on the discussions on vpp-dev mailing list, prevent assigning the ACL index to an interface, when the ACL with that index is not defined, also prevent deleting an ACL if that ACL is applied. Also, for the easier debugging of the state, there are new debug CLI commands to see the ACL plugin state at several layers: "show acl-plugin acl [index N]" - show a high-level ACL representation, used for the linear lookup and as a base for building the hashtable-based lookup. Also shows if a given ACL is applied somewhere. "show acl-plugin interface [sw_if_index N]" - show which interfaces have which ACL(s) applied. "show acl-plugin tables" - a lower-level debug command used to see the state of all of the related data structures at once. There are specifiers possible, which make for a more focused and maybe augmented output: "show acl-plugin tables acl [index N]" show the "bitmask-ready" representations of the ACLs, we well as the mask types and their associated indices. "show acl-plutin tables mask" show the derived mask types and their indices only. "show acl-plugin tables applied [sw_if_index N]" show the table of all of the ACEs applied for a given sw_if_index or all interfaces. "show acl-plugin tables hash [verbose N]" show the 48x8 bihash used for the ACL lookup. Change-Id: I89fff051424cb44bcb189e3cee04c1b8f76efc28 Signed-off-by: Andrew Yourtchenko --- src/plugins/acl/hash_lookup.c | 742 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 742 insertions(+) create mode 100644 src/plugins/acl/hash_lookup.c (limited to 'src/plugins/acl/hash_lookup.c') diff --git a/src/plugins/acl/hash_lookup.c b/src/plugins/acl/hash_lookup.c new file mode 100644 index 00000000..8e8c5274 --- /dev/null +++ b/src/plugins/acl/hash_lookup.c @@ -0,0 +1,742 @@ +/* + *------------------------------------------------------------------ + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "hash_lookup.h" +#include "hash_lookup_private.h" + +/* + * This returns true if there is indeed a match on the portranges. + * With all these levels of indirections, this is not going to be very fast, + * so, best use the individual ports or wildcard ports for performance. + */ +static int +match_portranges(acl_main_t *am, fa_5tuple_t *match, u32 index) +{ + applied_hash_ace_entry_t **applied_hash_aces = match->pkt.is_input ? &am->input_hash_entry_vec_by_sw_if_index[match->pkt.sw_if_index] : + &am->output_hash_entry_vec_by_sw_if_index[match->pkt.sw_if_index]; + applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[index]); + + // hash_acl_info_t *ha = &am->hash_acl_infos[pae->acl_index]; + acl_rule_t *r = &(am->acls[pae->acl_index].rules[pae->ace_index]); + DBG("PORTMATCH: %d <= %d <= %d && %d <= %d <= %d ?", + r->src_port_or_type_first, match->l4.port[0], r->src_port_or_type_last, + r->dst_port_or_code_first, match->l4.port[1], r->dst_port_or_code_last); + + return ( ((r->src_port_or_type_first <= match->l4.port[0]) && r->src_port_or_type_last >= match->l4.port[0]) && + ((r->dst_port_or_code_first <= match->l4.port[1]) && r->dst_port_or_code_last >= match->l4.port[1]) ); +} + +static u32 +multi_acl_match_get_applied_ace_index(acl_main_t *am, fa_5tuple_t *match) +{ + clib_bihash_kv_48_8_t kv; + clib_bihash_kv_48_8_t result; + fa_5tuple_t *kv_key = (fa_5tuple_t *)kv.key; + hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value; + u64 *pmatch = (u64 *)match; + u64 *pmask; + u64 *pkey; + int mask_type_index; + u32 curr_match_index = ~0; + + u32 sw_if_index = match->pkt.sw_if_index; + u8 is_input = match->pkt.is_input; + applied_hash_ace_entry_t **applied_hash_aces = is_input ? &am->input_hash_entry_vec_by_sw_if_index[sw_if_index] : + &am->output_hash_entry_vec_by_sw_if_index[sw_if_index]; + applied_hash_acl_info_t **applied_hash_acls = is_input ? &am->input_applied_hash_acl_info_by_sw_if_index : + &am->output_applied_hash_acl_info_by_sw_if_index; + + DBG("TRYING TO MATCH: %016llx %016llx %016llx %016llx %016llx %016llx", + pmatch[0], pmatch[1], pmatch[2], pmatch[3], pmatch[4], pmatch[5]); + + for(mask_type_index=0; mask_type_index < pool_len(am->ace_mask_type_pool); mask_type_index++) { + if (!clib_bitmap_get((*applied_hash_acls)[sw_if_index].mask_type_index_bitmap, mask_type_index)) { + /* This bit is not set. Avoid trying to match */ + continue; + } + ace_mask_type_entry_t *mte = &am->ace_mask_type_pool[mask_type_index]; + pmatch = (u64 *)match; + pmask = (u64 *)&mte->mask; + pkey = (u64 *)kv.key; + /* + * unrolling the below loop results in a noticeable performance increase. + int i; + for(i=0; i<6; i++) { + kv.key[i] = pmatch[i] & pmask[i]; + } + */ + + *pkey++ = *pmatch++ & *pmask++; + *pkey++ = *pmatch++ & *pmask++; + *pkey++ = *pmatch++ & *pmask++; + *pkey++ = *pmatch++ & *pmask++; + *pkey++ = *pmatch++ & *pmask++; + *pkey++ = *pmatch++ & *pmask++; + + kv_key->pkt.mask_type_index_lsb = mask_type_index; + DBG(" KEY %3d: %016llx %016llx %016llx %016llx %016llx %016llx", mask_type_index, + kv.key[0], kv.key[1], kv.key[2], kv.key[3], kv.key[4], kv.key[5]); + int res = BV (clib_bihash_search) (&am->acl_lookup_hash, &kv, &result); + if (res == 0) { + DBG("ACL-MATCH! result_val: %016llx", result_val->as_u64); + if (result_val->applied_entry_index < curr_match_index) { + if (PREDICT_FALSE(result_val->need_portrange_check)) { + /* + * This is going to be slow, since we can have multiple superset + * entries for narrow-ish portranges, e.g.: + * 0..42 100..400, 230..60000, + * so we need to walk linearly and check if they match. + */ + + u32 curr_index = result_val->applied_entry_index; + while ((curr_index != ~0) && !match_portranges(am, match, curr_index)) { + /* while no match and there are more entries, walk... */ + applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[curr_index]); + DBG("entry %d did not portmatch, advancing to %d", curr_index, pae->next_applied_entry_index); + curr_index = pae->next_applied_entry_index; + } + if (curr_index < curr_match_index) { + DBG("The index %d is the new candidate in portrange matches.", curr_index); + curr_match_index = result_val->applied_entry_index; + if (!result_val->shadowed) { + /* new result is known to not be shadowed, so no point to look up further */ + break; + } + } else { + DBG("Curr portmatch index %d is too big vs. current matched one %d", curr_index, curr_match_index); + } + } else { + /* The usual path is here. Found an entry in front of the current candiate - so it's a new one */ + DBG("This match is the new candidate"); + curr_match_index = result_val->applied_entry_index; + if (!result_val->shadowed) { + /* new result is known to not be shadowed, so no point to look up further */ + break; + } + } + } + } + } + DBG("MATCH-RESULT: %d", curr_match_index); + return curr_match_index; +} + +static void +hashtable_add_del(acl_main_t *am, clib_bihash_kv_48_8_t *kv, int is_add) +{ + DBG("HASH ADD/DEL: %016llx %016llx %016llx %016llx %016llx %016llx add %d", + kv->key[0], kv->key[1], kv->key[2], + kv->key[3], kv->key[4], kv->key[5], is_add); + BV (clib_bihash_add_del) (&am->acl_lookup_hash, kv, is_add); +} + +static void +fill_applied_hash_ace_kv(acl_main_t *am, + applied_hash_ace_entry_t **applied_hash_aces, + u32 sw_if_index, u8 is_input, + u32 new_index, clib_bihash_kv_48_8_t *kv) +{ + fa_5tuple_t *kv_key = (fa_5tuple_t *)kv->key; + hash_acl_lookup_value_t *kv_val = (hash_acl_lookup_value_t *)&kv->value; + applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[new_index]); + hash_acl_info_t *ha = &am->hash_acl_infos[pae->acl_index]; + + memcpy(kv_key, &ha->rules[pae->hash_ace_info_index].match, sizeof(*kv_key)); + /* initialize the sw_if_index and direction */ + kv_key->pkt.sw_if_index = sw_if_index; + kv_key->pkt.is_input = is_input; + kv_val->as_u64 = 0; + kv_val->applied_entry_index = new_index; + kv_val->need_portrange_check = ha->rules[pae->hash_ace_info_index].src_portrange_not_powerof2 || + ha->rules[pae->hash_ace_info_index].dst_portrange_not_powerof2; + /* by default assume all values are shadowed -> check all mask types */ + kv_val->shadowed = 1; +} + +static void +add_del_hashtable_entry(acl_main_t *am, + u32 sw_if_index, u8 is_input, + applied_hash_ace_entry_t **applied_hash_aces, + u32 index, int is_add) +{ + clib_bihash_kv_48_8_t kv; + + fill_applied_hash_ace_kv(am, applied_hash_aces, sw_if_index, is_input, index, &kv); + hashtable_add_del(am, &kv, is_add); +} + + + +static void +activate_applied_ace_hash_entry(acl_main_t *am, + u32 sw_if_index, u8 is_input, + applied_hash_ace_entry_t **applied_hash_aces, + u32 new_index) +{ + clib_bihash_kv_48_8_t kv; + applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[new_index]); + + fill_applied_hash_ace_kv(am, applied_hash_aces, sw_if_index, is_input, new_index, &kv); + + DBG("APPLY ADD KY: %016llx %016llx %016llx %016llx %016llx %016llx", + kv.key[0], kv.key[1], kv.key[2], + kv.key[3], kv.key[4], kv.key[5]); + + clib_bihash_kv_48_8_t result; + hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value; + int res = BV (clib_bihash_search) (&am->acl_lookup_hash, &kv, &result); + if (res == 0) { + /* There already exists an entry or more. Append at the end. */ + u32 existing_index = result_val->applied_entry_index; + DBG("A key already exists, with applied entry index: %d", existing_index); + ASSERT(existing_index != ~0); + applied_hash_ace_entry_t *existing_pae = &((*applied_hash_aces)[existing_index]); + /* walk the list to the last element */ + while (existing_pae->next_applied_entry_index != ~0) { + if (existing_index == existing_pae->next_applied_entry_index) { + DBG("existing and next index of entry %d are the same, abort", existing_index); + ASSERT(existing_index != existing_pae->next_applied_entry_index); + } + existing_index = existing_pae->next_applied_entry_index; + existing_pae = &((*applied_hash_aces)[existing_index]); + DBG("...advance to chained entry index: %d", existing_index); + } + /* link ourseves in */ + existing_pae->next_applied_entry_index = new_index; + pae->prev_applied_entry_index = existing_index; + } else { + /* It's the very first entry */ + hashtable_add_del(am, &kv, 1); + } +} + +static void +applied_hash_entries_analyze(acl_main_t *am, applied_hash_ace_entry_t **applied_hash_aces) +{ + /* + * Go over the rules and check which ones are shadowed and which aren't. + * Naive approach: try to match the match value from every ACE as if it + * was a live packet, and see if the resulting match happens earlier in the list. + * if it does not match or it is later in the ACL - then the entry is not shadowed. + * + * This approach fails, an example: + * deny tcp 2001:db8::/32 2001:db8::/32 + * permit ip 2001:db8::1/128 2001:db8::2/128 + */ +} + +void +hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) +{ + int i; + + DBG("HASH ACL apply: sw_if_index %d is_input %d acl %d", sw_if_index, is_input, acl_index); + u32 *acl_vec = is_input ? am->input_acl_vec_by_sw_if_index[sw_if_index] : + am->output_acl_vec_by_sw_if_index[sw_if_index]; + if (is_input) { + vec_validate(am->input_hash_entry_vec_by_sw_if_index, sw_if_index); + } else { + vec_validate(am->output_hash_entry_vec_by_sw_if_index, sw_if_index); + } + vec_validate(am->hash_acl_infos, acl_index); + applied_hash_ace_entry_t **applied_hash_aces = is_input ? &am->input_hash_entry_vec_by_sw_if_index[sw_if_index] : + &am->output_hash_entry_vec_by_sw_if_index[sw_if_index]; + u32 order_index = vec_search(acl_vec, acl_index); + hash_acl_info_t *ha = &am->hash_acl_infos[acl_index]; + ASSERT(order_index != ~0); + + if (!am->acl_lookup_hash_initialized) { + BV (clib_bihash_init) (&am->acl_lookup_hash, "ACL plugin rule lookup bihash", + 65536, 2 << 25); + am->acl_lookup_hash_initialized = 1; + } + int base_offset = vec_len(*applied_hash_aces); + + /* Update the bitmap of the mask types with which the lookup + needs to happen for the ACLs applied to this sw_if_index */ + applied_hash_acl_info_t **applied_hash_acls = is_input ? &am->input_applied_hash_acl_info_by_sw_if_index : + &am->output_applied_hash_acl_info_by_sw_if_index; + vec_validate((*applied_hash_acls), sw_if_index); + applied_hash_acl_info_t *pal = &(*applied_hash_acls)[sw_if_index]; + pal->mask_type_index_bitmap = clib_bitmap_or(pal->mask_type_index_bitmap, + ha->mask_type_index_bitmap); + /* + * if the applied ACL is empty, the current code will cause a + * different behavior compared to current linear search: an empty ACL will + * simply fallthrough to the next ACL, or the default deny in the end. + * + * This is not a problem, because after vpp-dev discussion, + * the consensus was it should not be possible to apply the non-existent + * ACL, so the change adding this code also takes care of that. + */ + + /* expand the applied aces vector by the necessary amount */ + vec_resize((*applied_hash_aces), vec_len(ha->rules)); + + /* add the rules from the ACL to the hash table for lookup and append to the vector*/ + for(i=0; i < vec_len(ha->rules); i++) { + u32 new_index = base_offset + i; + applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[new_index]); + pae->acl_index = acl_index; + pae->ace_index = ha->rules[i].ace_index; + pae->action = ha->rules[i].action; + pae->hash_ace_info_index = i; + /* we might link it in later */ + pae->next_applied_entry_index = ~0; + pae->prev_applied_entry_index = ~0; + activate_applied_ace_hash_entry(am, sw_if_index, is_input, applied_hash_aces, new_index); + } + applied_hash_entries_analyze(am, applied_hash_aces); +} + +static void +move_applied_ace_hash_entry(acl_main_t *am, + u32 sw_if_index, u8 is_input, + applied_hash_ace_entry_t **applied_hash_aces, + u32 old_index, u32 new_index) +{ + + /* move the entry */ + (*applied_hash_aces)[new_index] = (*applied_hash_aces)[old_index]; + + /* update the linkage and hash table if necessary */ + applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[old_index]); + + if (pae->prev_applied_entry_index != ~0) { + applied_hash_ace_entry_t *prev_pae = &((*applied_hash_aces)[pae->prev_applied_entry_index]); + ASSERT(prev_pae->next_applied_entry_index == old_index); + prev_pae->next_applied_entry_index = new_index; + } else { + /* first entry - so the hash points to it, update */ + add_del_hashtable_entry(am, sw_if_index, is_input, + applied_hash_aces, new_index, 1); + } + if (pae->next_applied_entry_index != ~0) { + applied_hash_ace_entry_t *next_pae = &((*applied_hash_aces)[pae->next_applied_entry_index]); + ASSERT(next_pae->prev_applied_entry_index == old_index); + next_pae->prev_applied_entry_index = new_index; + } + /* invalidate the old entry */ + pae->prev_applied_entry_index = ~0; + pae->next_applied_entry_index = ~0; +} + +static void +deactivate_applied_ace_hash_entry(acl_main_t *am, + u32 sw_if_index, u8 is_input, + applied_hash_ace_entry_t **applied_hash_aces, + u32 old_index) +{ + applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[old_index]); + + if (pae->next_applied_entry_index != ~0) { + applied_hash_ace_entry_t *next_pae = &((*applied_hash_aces)[pae->next_applied_entry_index]); + ASSERT(next_pae->prev_applied_entry_index == old_index); + next_pae->prev_applied_entry_index = pae->prev_applied_entry_index; + } + + if (pae->prev_applied_entry_index != ~0) { + applied_hash_ace_entry_t *prev_pae = &((*applied_hash_aces)[pae->prev_applied_entry_index]); + ASSERT(prev_pae->next_applied_entry_index == old_index); + prev_pae->next_applied_entry_index = pae->next_applied_entry_index; + } else { + /* It was the first entry. We need either to reset the hash entry or delete it */ + if (pae->next_applied_entry_index != ~0) { + add_del_hashtable_entry(am, sw_if_index, is_input, + applied_hash_aces, pae->next_applied_entry_index, 1); + } else { + /* no next entry, so just delete the entry in the hash table */ + add_del_hashtable_entry(am, sw_if_index, is_input, + applied_hash_aces, old_index, 0); + } + } +} + + +static void +hash_acl_build_applied_lookup_bitmap(acl_main_t *am, u32 sw_if_index, u8 is_input) +{ + int i; + uword *new_lookup_bitmap = 0; + u32 **applied_acls = is_input ? &am->input_acl_vec_by_sw_if_index[sw_if_index] : + &am->output_acl_vec_by_sw_if_index[sw_if_index]; + applied_hash_acl_info_t **applied_hash_acls = is_input ? &am->input_applied_hash_acl_info_by_sw_if_index : + &am->output_applied_hash_acl_info_by_sw_if_index; + applied_hash_acl_info_t *pal = &(*applied_hash_acls)[sw_if_index]; + for(i=0; i < vec_len(*applied_acls); i++) { + u32 a_acl_index = (*applied_acls)[i]; + hash_acl_info_t *ha = &am->hash_acl_infos[a_acl_index]; + new_lookup_bitmap = clib_bitmap_or(new_lookup_bitmap, + ha->mask_type_index_bitmap); + } + uword *old_lookup_bitmap = pal->mask_type_index_bitmap; + pal->mask_type_index_bitmap = new_lookup_bitmap; + clib_bitmap_free(old_lookup_bitmap); +} + +void +hash_acl_unapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) +{ + int i; + + DBG("HASH ACL unapply: sw_if_index %d is_input %d acl %d", sw_if_index, is_input, acl_index); + hash_acl_info_t *ha = &am->hash_acl_infos[acl_index]; + applied_hash_ace_entry_t **applied_hash_aces = is_input ? &am->input_hash_entry_vec_by_sw_if_index[sw_if_index] : + &am->output_hash_entry_vec_by_sw_if_index[sw_if_index]; + + for(i=0; i < vec_len((*applied_hash_aces)); i++) { + if ((*applied_hash_aces)[i].acl_index == acl_index) { + DBG("Found applied ACL#%d at applied index %d", acl_index, i); + break; + } + } + if (vec_len((*applied_hash_aces)) <= i) { + DBG("Did not find applied ACL#%d at sw_if_index %d", acl_index, sw_if_index); + /* we went all the way without finding any entries. Probably a list was empty. */ + return; + } + int base_offset = i; + int tail_offset = base_offset + vec_len(ha->rules); + int tail_len = vec_len((*applied_hash_aces)) - tail_offset; + DBG("base_offset: %d, tail_offset: %d, tail_len: %d", base_offset, tail_offset, tail_len); + + for(i=0; i < vec_len(ha->rules); i ++) { + DBG("UNAPPLY DEACTIVATE: sw_if_index %d is_input %d, applied index %d", sw_if_index, is_input, base_offset + i); + deactivate_applied_ace_hash_entry(am, sw_if_index, is_input, + applied_hash_aces, base_offset + i); + } + for(i=0; i < tail_len; i ++) { + /* move the entry at tail offset to base offset */ + /* that is, from (tail_offset+i) -> (base_offset+i) */ + DBG("UNAPPLY MOVE: sw_if_index %d is_input %d, applied index %d ->", sw_if_index, is_input, tail_offset+i, base_offset + i); + move_applied_ace_hash_entry(am, sw_if_index, is_input, applied_hash_aces, tail_offset + i, base_offset + i); + } + /* trim the end of the vector */ + _vec_len((*applied_hash_aces)) -= vec_len(ha->rules); + + applied_hash_entries_analyze(am, applied_hash_aces); + + /* After deletion we might not need some of the mask-types anymore... */ + hash_acl_build_applied_lookup_bitmap(am, sw_if_index, is_input); +} + +/* + * Create the applied ACEs and update the hash table, + * taking into account that the ACL may not be the last + * in the vector of applied ACLs. + * + * For now, walk from the end of the vector and unapply the ACLs, + * then apply the one in question and reapply the rest. + */ + +void +hash_acl_reapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) +{ + u32 **applied_acls = is_input ? &am->input_acl_vec_by_sw_if_index[sw_if_index] : + &am->output_acl_vec_by_sw_if_index[sw_if_index]; + int i; + int start_index = vec_search((*applied_acls), acl_index); + /* + * This function is called after we find out the sw_if_index where ACL is applied. + * If the by-sw_if_index vector does not have the ACL#, then it's a bug. + */ + ASSERT(start_index < vec_len(*applied_acls)); + + /* unapply all the ACLs till the current one */ + for(i = vec_len(*applied_acls) - 1; i >= start_index; i--) { + hash_acl_unapply(am, sw_if_index, is_input, (*applied_acls)[i]); + } + for(i = start_index; i < vec_len(*applied_acls); i++) { + hash_acl_apply(am, sw_if_index, is_input, (*applied_acls)[i]); + } +} + +static void +make_address_mask(ip46_address_t *addr, u8 is_ipv6, u8 prefix_len) +{ + if (is_ipv6) { + ip6_address_mask_from_width(&addr->ip6, prefix_len); + } else { + /* FIXME: this may not be correct way */ + ip6_address_mask_from_width(&addr->ip6, prefix_len + 3*32); + ip46_address_mask_ip4(addr); + } +} + +static u8 +make_port_mask(u16 *portmask, u16 port_first, u16 port_last) +{ + if (port_first == port_last) { + *portmask = 0xffff; + /* single port is representable by masked value */ + return 0; + } + if ((port_first == 0) && (port_last == 65535)) { + *portmask = 0; + /* wildcard port is representable by a masked value */ + return 0; + } + + /* + * For now match all the ports, later + * here might be a better optimization which would + * pick out bitmaskable portranges. + * + * However, adding a new mask type potentially + * adds a per-packet extra lookup, so the benefit is not clear. + */ + *portmask = 0; + /* This port range can't be represented via bitmask exactly. */ + return 1; +} + +static void +make_mask_and_match_from_rule(fa_5tuple_t *mask, acl_rule_t *r, hash_ace_info_t *hi, int match_nonfirst_fragment) +{ + memset(mask, 0, sizeof(*mask)); + memset(&hi->match, 0, sizeof(hi->match)); + hi->action = r->is_permit; + + /* we will need to be matching based on sw_if_index, direction, and mask_type_index when applied */ + mask->pkt.sw_if_index = ~0; + mask->pkt.is_input = 1; + /* we will assign the match of mask_type_index later when we find it*/ + mask->pkt.mask_type_index_lsb = ~0; + + mask->pkt.is_ip6 = 1; + hi->match.pkt.is_ip6 = r->is_ipv6; + + make_address_mask(&mask->addr[0], r->is_ipv6, r->src_prefixlen); + hi->match.addr[0] = r->src; + make_address_mask(&mask->addr[1], r->is_ipv6, r->dst_prefixlen); + hi->match.addr[1] = r->dst; + + if (r->proto != 0) { + mask->l4.proto = ~0; /* L4 proto needs to be matched */ + hi->match.l4.proto = r->proto; + if (match_nonfirst_fragment) { + /* match the non-first fragments only */ + mask->pkt.is_nonfirst_fragment = 1; + hi->match.pkt.is_nonfirst_fragment = 1; + } else { + /* Calculate the src/dst port masks and make the src/dst port matches accordingly */ + hi->src_portrange_not_powerof2 = make_port_mask(&mask->l4.port[0], r->src_port_or_type_first, r->src_port_or_type_last); + hi->match.l4.port[0] = r->src_port_or_type_first & mask->l4.port[0]; + hi->dst_portrange_not_powerof2 = make_port_mask(&mask->l4.port[1], r->dst_port_or_code_first, r->dst_port_or_code_last); + hi->match.l4.port[1] = r->dst_port_or_code_first & mask->l4.port[1]; + /* L4 info must be valid in order to match */ + mask->pkt.l4_valid = 1; + hi->match.pkt.l4_valid = 1; + /* And we must set the mask to check that it is an initial fragment */ + mask->pkt.is_nonfirst_fragment = 1; + hi->match.pkt.is_nonfirst_fragment = 0; + if ((r->proto == IPPROTO_TCP) && (r->tcp_flags_mask != 0)) { + /* if we want to match on TCP flags, they must be masked off as well */ + mask->pkt.tcp_flags = r->tcp_flags_mask; + hi->match.pkt.tcp_flags = r->tcp_flags_value; + /* and the flags need to be present within the packet being matched */ + mask->pkt.tcp_flags_valid = 1; + hi->match.pkt.tcp_flags_valid = 1; + } + } + } + /* Sanitize the mask and the match */ + u64 *pmask = (u64 *)mask; + u64 *pmatch = (u64 *)&hi->match; + int j; + for(j=0; j<6; j++) { + pmatch[j] = pmatch[j] & pmask[j]; + } +} + +static u32 +find_mask_type_index(acl_main_t *am, fa_5tuple_t *mask) +{ + ace_mask_type_entry_t *mte; + /* *INDENT-OFF* */ + pool_foreach(mte, am->ace_mask_type_pool, + ({ + if(memcmp(&mte->mask, mask, sizeof(*mask)) == 0) + return (mte - am->ace_mask_type_pool); + })); + /* *INDENT-ON* */ + return ~0; +} + +static u32 +assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask) +{ + u32 mask_type_index = find_mask_type_index(am, mask); + ace_mask_type_entry_t *mte; + if(~0 == mask_type_index) { + pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES); + mask_type_index = mte - am->ace_mask_type_pool; + clib_memcpy(&mte->mask, mask, sizeof(mte->mask)); + mte->refcount = 0; + /* + * We can use only 16 bits, since in the match there is only u16 field. + * Realistically, once you go to 64K of mask types, it is a huge + * problem anyway, so we might as well stop half way. + */ + ASSERT(mask_type_index < 32768); + } + mte = am->ace_mask_type_pool + mask_type_index; + mte->refcount++; + return mask_type_index; +} + +static void +release_mask_type_index(acl_main_t *am, u32 mask_type_index) +{ + ace_mask_type_entry_t *mte = &am->ace_mask_type_pool[mask_type_index]; + mte->refcount--; + if (mte->refcount == 0) { + /* we are not using this entry anymore */ + pool_put(am->ace_mask_type_pool, mte); + } +} + +void hash_acl_add(acl_main_t *am, int acl_index) +{ + DBG("HASH ACL add : %d", acl_index); + int i; + acl_list_t *a = &am->acls[acl_index]; + vec_validate(am->hash_acl_infos, acl_index); + hash_acl_info_t *ha = &am->hash_acl_infos[acl_index]; + memset(ha, 0, sizeof(*ha)); + + /* walk the newly added ACL entries and ensure that for each of them there + is a mask type, increment a reference count for that mask type */ + for(i=0; i < a->count; i++) { + hash_ace_info_t ace_info; + fa_5tuple_t mask; + memset(&ace_info, 0, sizeof(ace_info)); + ace_info.acl_index = acl_index; + ace_info.ace_index = i; + + make_mask_and_match_from_rule(&mask, &a->rules[i], &ace_info, 0); + ace_info.mask_type_index = assign_mask_type_index(am, &mask); + /* assign the mask type index for matching itself */ + ace_info.match.pkt.mask_type_index_lsb = ace_info.mask_type_index; + DBG("ACE: %d mask_type_index: %d", i, ace_info.mask_type_index); + /* Ensure a given index is set in the mask type index bitmap for this ACL */ + ha->mask_type_index_bitmap = clib_bitmap_set(ha->mask_type_index_bitmap, ace_info.mask_type_index, 1); + vec_add1(ha->rules, ace_info); + if (am->l4_match_nonfirst_fragment) { + /* add the second rule which matches the noninitial fragments with the respective mask */ + make_mask_and_match_from_rule(&mask, &a->rules[i], &ace_info, 1); + ace_info.mask_type_index = assign_mask_type_index(am, &mask); + ace_info.match.pkt.mask_type_index_lsb = ace_info.mask_type_index; + DBG("ACE: %d (non-initial frags) mask_type_index: %d", i, ace_info.mask_type_index); + /* Ensure a given index is set in the mask type index bitmap for this ACL */ + ha->mask_type_index_bitmap = clib_bitmap_set(ha->mask_type_index_bitmap, ace_info.mask_type_index, 1); + vec_add1(ha->rules, ace_info); + } + } + /* + * if an ACL is applied somewhere, fill the corresponding lookup data structures. + * We need to take care if the ACL is not the last one in the vector of ACLs applied to the interface. + */ + if (acl_index < vec_len(am->input_sw_if_index_vec_by_acl)) { + u32 *sw_if_index; + vec_foreach(sw_if_index, am->input_sw_if_index_vec_by_acl[acl_index]) { + hash_acl_reapply(am, *sw_if_index, 1, acl_index); + } + } + if (acl_index < vec_len(am->output_sw_if_index_vec_by_acl)) { + u32 *sw_if_index; + vec_foreach(sw_if_index, am->output_sw_if_index_vec_by_acl[acl_index]) { + hash_acl_reapply(am, *sw_if_index, 0, acl_index); + } + } +} + +void hash_acl_delete(acl_main_t *am, int acl_index) +{ + DBG("HASH ACL delete : %d", acl_index); + /* + * If the ACL is applied somewhere, remove the references of it (call hash_acl_unapply) + * this is a different behavior from the linear lookup where an empty ACL is "deny all", + * + * However, following vpp-dev discussion the ACL that is referenced elsewhere + * should not be possible to delete, and the change adding this also adds + * the safeguards to that respect, so this is not a problem. + */ + if (acl_index < vec_len(am->input_sw_if_index_vec_by_acl)) { + u32 *sw_if_index; + vec_foreach(sw_if_index, am->input_sw_if_index_vec_by_acl[acl_index]) { + hash_acl_unapply(am, *sw_if_index, 1, acl_index); + } + } + if (acl_index < vec_len(am->output_sw_if_index_vec_by_acl)) { + u32 *sw_if_index; + vec_foreach(sw_if_index, am->output_sw_if_index_vec_by_acl[acl_index]) { + hash_acl_unapply(am, *sw_if_index, 0, acl_index); + } + } + + /* walk the mask types for the ACL about-to-be-deleted, and decrease + * the reference count, possibly freeing up some of them */ + int i; + hash_acl_info_t *ha = &am->hash_acl_infos[acl_index]; + for(i=0; i < vec_len(ha->rules); i++) { + release_mask_type_index(am, ha->rules[i].mask_type_index); + } + clib_bitmap_free(ha->mask_type_index_bitmap); + vec_free(ha->rules); +} + +u8 +hash_multi_acl_match_5tuple (u32 sw_if_index, fa_5tuple_t * pkt_5tuple, int is_l2, + int is_ip6, int is_input, u32 * acl_match_p, + u32 * rule_match_p, u32 * trace_bitmap) +{ + acl_main_t *am = &acl_main; + applied_hash_ace_entry_t **applied_hash_aces = is_input ? &am->input_hash_entry_vec_by_sw_if_index[sw_if_index] : + &am->output_hash_entry_vec_by_sw_if_index[sw_if_index]; + u32 match_index = multi_acl_match_get_applied_ace_index(am, pkt_5tuple); + if (match_index < vec_len((*applied_hash_aces))) { + applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[match_index]); + *acl_match_p = pae->acl_index; + *rule_match_p = pae->ace_index; + return pae->action; + } + return 0; +} + + +void +show_hash_acl_hash (vlib_main_t * vm, acl_main_t *am, u32 verbose) +{ + vlib_cli_output(vm, "\nACL lookup hash table:\n%U\n", + BV (format_bihash), &am->acl_lookup_hash, verbose); +} -- cgit 1.2.3-korg From a016216753c955683bda76ed250daa540ff35107 Mon Sep 17 00:00:00 2001 From: Andrew Yourtchenko Date: Mon, 3 Jul 2017 12:32:44 +0200 Subject: acl-plugin: VPP-897: applying of large number of ACEs is slow When applying ACEs, in the new hash-based scheme, for each ACE the lookup in the hash table is done, and either that ACE is added to the end of the existing list if there is a match, or a new list is created if there is no match. Usually ACEs do not overlap, so this operation is fast, however, the fragment-permit entries in case of a large number of ACLs create a huge list which needs to be traversed for every other ACE being added, slowing down the process dramatically. The solution is to add an explicit flag to denote the first element of the chain, and use the "prev" index of that element to point to the tail element. The "next" field of the last element is still ~0 and if we touch that one, we do the linear search to find the first one, but that is a relatively infrequent operation. Change-Id: I352a3becd7854cf39aae65f0950afad7d18a70aa Signed-off-by: Andrew Yourtchenko (cherry picked from commit 204cf74aed51ca07933df7c606754abb4b26fd82) --- src/plugins/acl/hash_lookup.c | 57 +++++++++++++++++++++++++------------ src/plugins/acl/hash_lookup_types.h | 8 ++++-- 2 files changed, 45 insertions(+), 20 deletions(-) (limited to 'src/plugins/acl/hash_lookup.c') diff --git a/src/plugins/acl/hash_lookup.c b/src/plugins/acl/hash_lookup.c index 8e8c5274..3600faf1 100644 --- a/src/plugins/acl/hash_lookup.c +++ b/src/plugins/acl/hash_lookup.c @@ -216,26 +216,26 @@ activate_applied_ace_hash_entry(acl_main_t *am, int res = BV (clib_bihash_search) (&am->acl_lookup_hash, &kv, &result); if (res == 0) { /* There already exists an entry or more. Append at the end. */ - u32 existing_index = result_val->applied_entry_index; + u32 first_index = result_val->applied_entry_index; DBG("A key already exists, with applied entry index: %d", existing_index); - ASSERT(existing_index != ~0); - applied_hash_ace_entry_t *existing_pae = &((*applied_hash_aces)[existing_index]); - /* walk the list to the last element */ - while (existing_pae->next_applied_entry_index != ~0) { - if (existing_index == existing_pae->next_applied_entry_index) { - DBG("existing and next index of entry %d are the same, abort", existing_index); - ASSERT(existing_index != existing_pae->next_applied_entry_index); - } - existing_index = existing_pae->next_applied_entry_index; - existing_pae = &((*applied_hash_aces)[existing_index]); - DBG("...advance to chained entry index: %d", existing_index); - } + ASSERT(first_index != ~0); + applied_hash_ace_entry_t *first_pae = &((*applied_hash_aces)[first_index]); + /* move to "prev" by one, this should land us in the end of the list */ + u32 last_index = first_pae->prev_applied_entry_index; + ASSERT(last_index != ~0); + applied_hash_ace_entry_t *last_pae = &((*applied_hash_aces)[last_index]); + ASSERT(last_pae->next_applied_entry_index == ~0); /* link ourseves in */ - existing_pae->next_applied_entry_index = new_index; - pae->prev_applied_entry_index = existing_index; + last_pae->next_applied_entry_index = new_index; + pae->prev_applied_entry_index = last_index; + /* make a new reference from the very first element to new tail */ + first_pae->prev_applied_entry_index = new_index; } else { /* It's the very first entry */ hashtable_add_del(am, &kv, 1); + pae->is_first_entry = 1; + /* we are the tail */ + pae->prev_applied_entry_index = new_index; } } @@ -306,6 +306,7 @@ hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) for(i=0; i < vec_len(ha->rules); i++) { u32 new_index = base_offset + i; applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[new_index]); + pae->is_first_entry = 0; pae->acl_index = acl_index; pae->ace_index = ha->rules[i].ace_index; pae->action = ha->rules[i].action; @@ -324,14 +325,13 @@ move_applied_ace_hash_entry(acl_main_t *am, applied_hash_ace_entry_t **applied_hash_aces, u32 old_index, u32 new_index) { - /* move the entry */ (*applied_hash_aces)[new_index] = (*applied_hash_aces)[old_index]; /* update the linkage and hash table if necessary */ applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[old_index]); - if (pae->prev_applied_entry_index != ~0) { + if (!pae->is_first_entry) { applied_hash_ace_entry_t *prev_pae = &((*applied_hash_aces)[pae->prev_applied_entry_index]); ASSERT(prev_pae->next_applied_entry_index == old_index); prev_pae->next_applied_entry_index = new_index; @@ -344,6 +344,20 @@ move_applied_ace_hash_entry(acl_main_t *am, applied_hash_ace_entry_t *next_pae = &((*applied_hash_aces)[pae->next_applied_entry_index]); ASSERT(next_pae->prev_applied_entry_index == old_index); next_pae->prev_applied_entry_index = new_index; + } else { + /* + * Moving the very last entry, so we need to update the tail pointer in the first one. + * find back the first entry. Inefficient so might need to be a bit cleverer + * if this proves to be a problem.. + */ + u32 an_index = pae->prev_applied_entry_index; + applied_hash_ace_entry_t *head_pae = &((*applied_hash_aces)[pae->prev_applied_entry_index]); + while(!head_pae->is_first_entry) { + an_index = head_pae->prev_applied_entry_index; + head_pae = &((*applied_hash_aces)[an_index]); + } + ASSERT(head_pae->prev_applied_entry_index == old_index); + head_pae->prev_applied_entry_index = new_index; } /* invalidate the old entry */ pae->prev_applied_entry_index = ~0; @@ -364,13 +378,20 @@ deactivate_applied_ace_hash_entry(acl_main_t *am, next_pae->prev_applied_entry_index = pae->prev_applied_entry_index; } - if (pae->prev_applied_entry_index != ~0) { + if (!pae->is_first_entry) { applied_hash_ace_entry_t *prev_pae = &((*applied_hash_aces)[pae->prev_applied_entry_index]); ASSERT(prev_pae->next_applied_entry_index == old_index); prev_pae->next_applied_entry_index = pae->next_applied_entry_index; } else { /* It was the first entry. We need either to reset the hash entry or delete it */ + pae->is_first_entry = 0; if (pae->next_applied_entry_index != ~0) { + /* a custom case of relinking for the first node, with the tail not forward linked to it */ + applied_hash_ace_entry_t *next_pae = &((*applied_hash_aces)[pae->next_applied_entry_index]); + /* this is the tail the new head should be aware of */ + next_pae->prev_applied_entry_index = pae->prev_applied_entry_index; + next_pae->is_first_entry = 1; + add_del_hashtable_entry(am, sw_if_index, is_input, applied_hash_aces, pae->next_applied_entry_index, 1); } else { diff --git a/src/plugins/acl/hash_lookup_types.h b/src/plugins/acl/hash_lookup_types.h index 1c044591..1848c5ff 100644 --- a/src/plugins/acl/hash_lookup_types.h +++ b/src/plugins/acl/hash_lookup_types.h @@ -53,10 +53,14 @@ typedef struct { */ u32 next_applied_entry_index; /* - * previous entry in the list of the chained ones, - * if ~0 then this is entry in the hash. + * previous entry in the ring list of the chained ones. */ u32 prev_applied_entry_index; + /* + * 1 if it is the very first entry in the list, + * referenced from the hash. + */ + u8 is_first_entry; /* * Action of this applied ACE */ -- cgit 1.2.3-korg From faee17e8b866fe16ce706af31d1e9cbc6d06b961 Mon Sep 17 00:00:00 2001 From: Andrew Yourtchenko Date: Wed, 19 Jul 2017 13:23:59 -0400 Subject: acl-plugin: assertion failed at hash_lookup.c:226 when modifying ACLs applied as part of many (VPP-910) change 7385 has added the code which has the first ACE's "prev" entry within the linked list of shadowed ACEs pointing to the last ACE, in order to avoid the frequent linear list traversal. That change was not complete and did not update this "prev" entry whenever the last ACE was deleted. As a result the changes within the applied ACLs which caused the calls to hash_acl_unapply/hash_acl_apply may result in hitting assert which does the sanity check. The solution is to add the missing update logic. Change-Id: I9cbe9a7c68b92fa3a22a8efd11b679667d38f186 Signed-off-by: Andrew Yourtchenko (cherry picked from commit 45fe7399152f5ca511ba0b03fee3d5a3dffd1897) --- src/plugins/acl/hash_lookup.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'src/plugins/acl/hash_lookup.c') diff --git a/src/plugins/acl/hash_lookup.c b/src/plugins/acl/hash_lookup.c index 3600faf1..027dabd0 100644 --- a/src/plugins/acl/hash_lookup.c +++ b/src/plugins/acl/hash_lookup.c @@ -382,6 +382,17 @@ deactivate_applied_ace_hash_entry(acl_main_t *am, applied_hash_ace_entry_t *prev_pae = &((*applied_hash_aces)[pae->prev_applied_entry_index]); ASSERT(prev_pae->next_applied_entry_index == old_index); prev_pae->next_applied_entry_index = pae->next_applied_entry_index; + if (pae->next_applied_entry_index == ~0) { + /* it was a last entry we removed, update the pointer on the first one */ + u32 an_index = pae->prev_applied_entry_index; + applied_hash_ace_entry_t *head_pae = &((*applied_hash_aces)[pae->prev_applied_entry_index]); + while(!head_pae->is_first_entry) { + an_index = head_pae->prev_applied_entry_index; + head_pae = &((*applied_hash_aces)[an_index]); + } + ASSERT(head_pae->prev_applied_entry_index == old_index); + head_pae->prev_applied_entry_index = pae->prev_applied_entry_index; + } } else { /* It was the first entry. We need either to reset the hash entry or delete it */ pae->is_first_entry = 0; -- cgit 1.2.3-korg From a5e614f76fda9efb7bd1577ec18f9c0407d95d03 Mon Sep 17 00:00:00 2001 From: Andrew Yourtchenko Date: Thu, 27 Jul 2017 15:39:50 +0200 Subject: acl-plugin: rework the optimization 7383, fortify acl-plugin memory behavior (VPP-910) The further prolonged testing from testbed that reported VPP-910 has uncovered a couple of deeper issues with optimization from 7384, and the usage of subscripts rather than vec_elt_at_index() allowed to hide a couple of further errors in the code. Also, the current acl-plugin behavior of using the global heap for its dynamic data is problematic - it makes the troubleshooting much harder by potentially spreading the problem around. Based on this experience, this commits makes a few changes to fix the issues seen, also improving the serviceability of the acl-plugin code for the future: - Use separate mheaps for any ACL-related control plane operations and separate for the hash lookup datastructures, to compartmentalize any memory-related issues for the ACL plugin. - Ensure vec_elt_at_index() usage throughout the hash_lookup.c file. - Use vectors rather than raw memory for storing the "ordinary" ACL rules. - Rework the optimization from 7384 to use a separate tail pointer rather than overloading the "prev" field. - Make get_session_ptr() more conservative and adjust is_valid_session_ptr accordingly Change-Id: Ifda85193f361de5ed3782a4acd39622bd33c5830 Signed-off-by: Andrew Yourtchenko (cherry picked from commit bd9c5ffe39e9ce61db95d74d150e07d738f24da1) --- src/plugins/acl/acl.c | 156 ++++++++++++++++------ src/plugins/acl/acl.h | 6 + src/plugins/acl/fa_node.c | 16 ++- src/plugins/acl/hash_lookup.c | 253 +++++++++++++++++++++--------------- src/plugins/acl/hash_lookup_types.h | 8 +- 5 files changed, 289 insertions(+), 150 deletions(-) (limited to 'src/plugins/acl/hash_lookup.c') diff --git a/src/plugins/acl/acl.c b/src/plugins/acl/acl.c index 6cd2d0c6..bc38265a 100644 --- a/src/plugins/acl/acl.c +++ b/src/plugins/acl/acl.c @@ -83,6 +83,18 @@ VLIB_PLUGIN_REGISTER () = { }; /* *INDENT-ON* */ + +static void * +acl_set_heap(acl_main_t *am) +{ + if (0 == am->acl_mheap) { + am->acl_mheap = mheap_alloc (0 /* use VM */ , 2 << 29); + } + void *oldheap = clib_mem_set_heap(am->acl_mheap); + return oldheap; +} + + static void vl_api_acl_plugin_get_version_t_handler (vl_api_acl_plugin_get_version_t * mp) { @@ -130,7 +142,7 @@ acl_add_list (u32 count, vl_api_acl_rule_t rules[], acl_main_t *am = &acl_main; acl_list_t *a; acl_rule_t *r; - acl_rule_t *acl_new_rules; + acl_rule_t *acl_new_rules = 0; int i; if (*acl_list_index != ~0) @@ -139,22 +151,23 @@ acl_add_list (u32 count, vl_api_acl_rule_t rules[], if (pool_is_free_index (am->acls, *acl_list_index)) { /* tried to replace a non-existent ACL, no point doing anything */ + clib_warning("acl-plugin-error: Trying to replace nonexistent ACL %d (tag %s)", *acl_list_index, tag); return -1; } } + if (0 == count) { + clib_warning("acl-plugin-warning: supplied no rules for ACL %d (tag %s)", *acl_list_index, tag); + } + + void *oldheap = acl_set_heap(am); /* Create and populate the rules */ - acl_new_rules = clib_mem_alloc_aligned (sizeof (acl_rule_t) * count, - CLIB_CACHE_LINE_BYTES); - if (!acl_new_rules) - { - /* Could not allocate rules. New or existing ACL - bail out regardless */ - return -1; - } + if (count > 0) + vec_validate(acl_new_rules, count-1); for (i = 0; i < count; i++) { - r = &acl_new_rules[i]; + r = vec_elt_at_index(acl_new_rules, i); memset(r, 0, sizeof(*r)); r->is_permit = rules[i].is_permit; r->is_ipv6 = rules[i].is_ipv6; @@ -192,13 +205,14 @@ acl_add_list (u32 count, vl_api_acl_rule_t rules[], a = am->acls + *acl_list_index; hash_acl_delete(am, *acl_list_index); /* Get rid of the old rules */ - clib_mem_free (a->rules); + if (a->rules) + vec_free (a->rules); } a->rules = acl_new_rules; a->count = count; memcpy (a->tag, tag, sizeof (a->tag)); hash_acl_add(am, *acl_list_index); - + clib_mem_set_heap (oldheap); return 0; } @@ -226,6 +240,7 @@ acl_del_list (u32 acl_list_index) } } + void *oldheap = acl_set_heap(am); /* delete any references to the ACL */ for (i = 0; i < vec_len (am->output_acl_vec_by_sw_if_index); i++) { @@ -263,10 +278,10 @@ acl_del_list (u32 acl_list_index) /* now we can delete the ACL itself */ a = &am->acls[acl_list_index]; if (a->rules) - { - clib_mem_free (a->rules); - } + vec_free (a->rules); + pool_put (am->acls, a); + clib_mem_set_heap (oldheap); return 0; } @@ -359,13 +374,15 @@ acl_classify_add_del_table_tiny (vnet_classify_main_t * cm, u8 * mask, if (0 == match) match = 1; - - return vnet_classify_add_del_table (cm, skip_mask_ptr, nbuckets, + void *oldheap = clib_mem_set_heap (cm->vlib_main->heap_base); + int ret = vnet_classify_add_del_table (cm, skip_mask_ptr, nbuckets, memory_size, skip, match, next_table_index, miss_next_index, table_index, current_data_flag, current_data_offset, is_add, 1 /* delete_chain */); + clib_mem_set_heap (oldheap); + return ret; } static int @@ -385,12 +402,15 @@ acl_classify_add_del_table_small (vnet_classify_main_t * cm, u8 * mask, if (0 == match) match = 1; - return vnet_classify_add_del_table (cm, skip_mask_ptr, nbuckets, + void *oldheap = clib_mem_set_heap (cm->vlib_main->heap_base); + int ret = vnet_classify_add_del_table (cm, skip_mask_ptr, nbuckets, memory_size, skip, match, next_table_index, miss_next_index, table_index, current_data_flag, current_data_offset, is_add, 1 /* delete_chain */); + return ret; + clib_mem_set_heap (oldheap); } @@ -400,12 +420,15 @@ acl_unhook_l2_input_classify (acl_main_t * am, u32 sw_if_index) vnet_classify_main_t *cm = &vnet_classify_main; u32 ip4_table_index = ~0; u32 ip6_table_index = ~0; + void *oldheap = acl_set_heap(am); vec_validate_init_empty (am->acl_ip4_input_classify_table_by_sw_if_index, sw_if_index, ~0); vec_validate_init_empty (am->acl_ip6_input_classify_table_by_sw_if_index, sw_if_index, ~0); + /* switch to global heap while calling vnet_* functions */ + clib_mem_set_heap (cm->vlib_main->heap_base); vnet_l2_input_classify_enable_disable (sw_if_index, 0); if (am->acl_ip4_input_classify_table_by_sw_if_index[sw_if_index] != ~0) @@ -428,7 +451,7 @@ acl_unhook_l2_input_classify (acl_main_t * am, u32 sw_if_index) am->l2_input_classify_next_acl_ip6, &ip6_table_index, 0); } - + clib_mem_set_heap (oldheap); return 0; } @@ -438,12 +461,16 @@ acl_unhook_l2_output_classify (acl_main_t * am, u32 sw_if_index) vnet_classify_main_t *cm = &vnet_classify_main; u32 ip4_table_index = ~0; u32 ip6_table_index = ~0; + void *oldheap = acl_set_heap(am); vec_validate_init_empty (am->acl_ip4_output_classify_table_by_sw_if_index, sw_if_index, ~0); vec_validate_init_empty (am->acl_ip6_output_classify_table_by_sw_if_index, sw_if_index, ~0); + /* switch to global heap while calling vnet_* functions */ + clib_mem_set_heap (cm->vlib_main->heap_base); + vnet_l2_output_classify_enable_disable (sw_if_index, 0); if (am->acl_ip4_output_classify_table_by_sw_if_index[sw_if_index] != ~0) @@ -466,7 +493,7 @@ acl_unhook_l2_output_classify (acl_main_t * am, u32 sw_if_index) am->l2_output_classify_next_acl_ip6, &ip6_table_index, 0); } - + clib_mem_set_heap (oldheap); return 0; } @@ -478,6 +505,8 @@ acl_hook_l2_input_classify (acl_main_t * am, u32 sw_if_index) u32 ip6_table_index = ~0; int rv; + void *prevheap = clib_mem_set_heap (cm->vlib_main->heap_base); + /* in case there were previous tables attached */ acl_unhook_l2_input_classify (am, sw_if_index); rv = @@ -486,7 +515,7 @@ acl_hook_l2_input_classify (acl_main_t * am, u32 sw_if_index) am->l2_input_classify_next_acl_ip4, &ip4_table_index, 1); if (rv) - return rv; + goto done; rv = acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask, sizeof (ip6_5tuple_mask) - 1, ~0, @@ -498,7 +527,7 @@ acl_hook_l2_input_classify (acl_main_t * am, u32 sw_if_index) sizeof (ip4_5tuple_mask) - 1, ~0, am->l2_input_classify_next_acl_ip4, &ip4_table_index, 0); - return rv; + goto done; } rv = vnet_l2_input_classify_set_tables (sw_if_index, ip4_table_index, @@ -516,7 +545,7 @@ acl_hook_l2_input_classify (acl_main_t * am, u32 sw_if_index) sizeof (ip4_5tuple_mask) - 1, ~0, am->l2_input_classify_next_acl_ip4, &ip4_table_index, 0); - return rv; + goto done; } am->acl_ip4_input_classify_table_by_sw_if_index[sw_if_index] = @@ -525,6 +554,8 @@ acl_hook_l2_input_classify (acl_main_t * am, u32 sw_if_index) ip6_table_index; vnet_l2_input_classify_enable_disable (sw_if_index, 1); +done: + clib_mem_set_heap (prevheap); return rv; } @@ -536,6 +567,8 @@ acl_hook_l2_output_classify (acl_main_t * am, u32 sw_if_index) u32 ip6_table_index = ~0; int rv; + void *prevheap = clib_mem_set_heap (cm->vlib_main->heap_base); + /* in case there were previous tables attached */ acl_unhook_l2_output_classify (am, sw_if_index); rv = @@ -544,7 +577,7 @@ acl_hook_l2_output_classify (acl_main_t * am, u32 sw_if_index) am->l2_output_classify_next_acl_ip4, &ip4_table_index, 1); if (rv) - return rv; + goto done; rv = acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask, sizeof (ip6_5tuple_mask) - 1, ~0, @@ -556,7 +589,7 @@ acl_hook_l2_output_classify (acl_main_t * am, u32 sw_if_index) sizeof (ip4_5tuple_mask) - 1, ~0, am->l2_output_classify_next_acl_ip4, &ip4_table_index, 0); - return rv; + goto done; } rv = vnet_l2_output_classify_set_tables (sw_if_index, ip4_table_index, @@ -574,7 +607,7 @@ acl_hook_l2_output_classify (acl_main_t * am, u32 sw_if_index) sizeof (ip4_5tuple_mask) - 1, ~0, am->l2_output_classify_next_acl_ip4, &ip4_table_index, 0); - return rv; + goto done; } am->acl_ip4_output_classify_table_by_sw_if_index[sw_if_index] = @@ -583,6 +616,8 @@ acl_hook_l2_output_classify (acl_main_t * am, u32 sw_if_index) ip6_table_index; vnet_l2_output_classify_enable_disable (sw_if_index, 1); +done: + clib_mem_set_heap (prevheap); return rv; } @@ -653,6 +688,7 @@ acl_interface_add_inout_acl (u32 sw_if_index, u8 is_input, u32 acl_list_index) /* ACL is not defined. Can not apply */ return -1; } + void *oldheap = acl_set_heap(am); if (is_input) { @@ -663,6 +699,7 @@ acl_interface_add_inout_acl (u32 sw_if_index, u8 is_input, u32 acl_list_index) clib_warning("ACL %d is already applied inbound on sw_if_index %d (index %d)", acl_list_index, sw_if_index, index); /* the entry is already there */ + clib_mem_set_heap (oldheap); return -1; } /* if there was no ACL applied before, enable the ACL processing */ @@ -684,6 +721,7 @@ acl_interface_add_inout_acl (u32 sw_if_index, u8 is_input, u32 acl_list_index) clib_warning("ACL %d is already applied outbound on sw_if_index %d (index %d)", acl_list_index, sw_if_index, index); /* the entry is already there */ + clib_mem_set_heap (oldheap); return -1; } /* if there was no ACL applied before, enable the ACL processing */ @@ -696,6 +734,7 @@ acl_interface_add_inout_acl (u32 sw_if_index, u8 is_input, u32 acl_list_index) vec_add (am->output_sw_if_index_vec_by_acl[acl_list_index], &sw_if_index, 1); } + clib_mem_set_heap (oldheap); return 0; } @@ -706,6 +745,7 @@ acl_interface_del_inout_acl (u32 sw_if_index, u8 is_input, u32 acl_list_index) acl_main_t *am = &acl_main; int i; int rv = -1; + void *oldheap = acl_set_heap(am); if (is_input) { vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index); @@ -764,6 +804,7 @@ acl_interface_del_inout_acl (u32 sw_if_index, u8 is_input, u32 acl_list_index) acl_interface_out_enable_disable (am, sw_if_index, 0); } } + clib_mem_set_heap (oldheap); return rv; } @@ -772,6 +813,7 @@ acl_interface_reset_inout_acls (u32 sw_if_index, u8 is_input) { acl_main_t *am = &acl_main; int i; + void *oldheap = acl_set_heap(am); if (is_input) { vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index); @@ -812,6 +854,7 @@ acl_interface_reset_inout_acls (u32 sw_if_index, u8 is_input) vec_reset_length (am->output_acl_vec_by_sw_if_index[sw_if_index]); } + clib_mem_set_heap (oldheap); } static int @@ -820,6 +863,7 @@ acl_interface_add_del_inout_acl (u32 sw_if_index, u8 is_add, u8 is_input, { int rv = -1; acl_main_t *am = &acl_main; + void *oldheap = acl_set_heap(am); if (is_add) { rv = @@ -835,6 +879,7 @@ acl_interface_add_del_inout_acl (u32 sw_if_index, u8 is_add, u8 is_input, rv = acl_interface_del_inout_acl (sw_if_index, is_input, acl_list_index); } + clib_mem_set_heap (oldheap); return rv; } @@ -1015,6 +1060,7 @@ macip_create_classify_tables (acl_main_t * am, u32 macip_acl_index) macip_find_match_type (mvec, a->rules[i].src_mac_mask, a->rules[i].src_prefixlen, a->rules[i].is_ipv6); + ASSERT(match_type_index != ~0); /* add session to table mvec[match_type_index].table_index; */ vnet_classify_add_del_session (cm, mvec[match_type_index].table_index, mask, a->rules[i].is_permit ? ~0 : 0, i, @@ -1066,17 +1112,15 @@ macip_acl_add_list (u32 count, vl_api_macip_acl_rule_t rules[], acl_main_t *am = &acl_main; macip_acl_list_t *a; macip_acl_rule_t *r; - macip_acl_rule_t *acl_new_rules; + macip_acl_rule_t *acl_new_rules = 0; int i; - + if (0 == count) { + clib_warning("acl-plugin-warning: Trying to create empty MACIP ACL (tag %s)", tag); + } + void *oldheap = acl_set_heap(am); /* Create and populate the rules */ - acl_new_rules = clib_mem_alloc_aligned (sizeof (macip_acl_rule_t) * count, - CLIB_CACHE_LINE_BYTES); - if (!acl_new_rules) - { - /* Could not allocate rules. New or existing ACL - bail out regardless */ - return -1; - } + if (count > 0) + vec_validate(acl_new_rules, count-1); for (i = 0; i < count; i++) { @@ -1104,7 +1148,7 @@ macip_acl_add_list (u32 count, vl_api_macip_acl_rule_t rules[], /* Create and populate the classifer tables */ macip_create_classify_tables (am, *acl_list_index); - + clib_mem_set_heap (oldheap); return 0; } @@ -1117,7 +1161,9 @@ macip_acl_interface_del_acl (acl_main_t * am, u32 sw_if_index) int rv; u32 macip_acl_index; macip_acl_list_t *a; + void *oldheap = acl_set_heap(am); vec_validate_init_empty (am->macip_acl_by_sw_if_index, sw_if_index, ~0); + clib_mem_set_heap (oldheap); macip_acl_index = am->macip_acl_by_sw_if_index[sw_if_index]; /* No point in deleting MACIP ACL which is not applied */ if (~0 == macip_acl_index) @@ -1144,12 +1190,15 @@ macip_acl_interface_add_acl (acl_main_t * am, u32 sw_if_index, { return -1; } + void *oldheap = acl_set_heap(am); a = &am->macip_acls[macip_acl_index]; vec_validate_init_empty (am->macip_acl_by_sw_if_index, sw_if_index, ~0); /* If there already a MACIP ACL applied, unapply it */ if (~0 != am->macip_acl_by_sw_if_index[sw_if_index]) macip_acl_interface_del_acl(am, sw_if_index); am->macip_acl_by_sw_if_index[sw_if_index] = macip_acl_index; + clib_mem_set_heap (oldheap); + /* Apply the classifier tables for L2 ACLs */ rv = vnet_set_input_acl_intfc (am->vlib_main, sw_if_index, a->ip4_table_index, @@ -1161,6 +1210,7 @@ static int macip_acl_del_list (u32 acl_list_index) { acl_main_t *am = &acl_main; + void *oldheap = acl_set_heap(am); macip_acl_list_t *a; int i; if (pool_is_free_index (am->macip_acls, acl_list_index)) @@ -1184,9 +1234,10 @@ macip_acl_del_list (u32 acl_list_index) a = &am->macip_acls[acl_list_index]; if (a->rules) { - clib_mem_free (a->rules); + vec_free (a->rules); } pool_put (am->macip_acls, a); + clib_mem_set_heap (oldheap); return 0; } @@ -1196,6 +1247,7 @@ macip_acl_interface_add_del_acl (u32 sw_if_index, u8 is_add, u32 acl_list_index) { acl_main_t *am = &acl_main; + void *oldheap = acl_set_heap(am); int rv = -1; if (is_add) { @@ -1205,6 +1257,7 @@ macip_acl_interface_add_del_acl (u32 sw_if_index, u8 is_add, { rv = macip_acl_interface_del_acl (am, sw_if_index); } + clib_mem_set_heap (oldheap); return rv; } @@ -1363,6 +1416,7 @@ send_acl_details (acl_main_t * am, unix_shared_memory_queue_t * q, vl_api_acl_rule_t *rules; int i; int msg_size = sizeof (*mp) + sizeof (mp->r[0]) * acl->count; + void *oldheap = acl_set_heap(am); mp = vl_msg_api_alloc (msg_size); memset (mp, 0, msg_size); @@ -1381,6 +1435,7 @@ send_acl_details (acl_main_t * am, unix_shared_memory_queue_t * q, } clib_warning("Sending acl details for ACL index %d", ntohl(mp->acl_index)); + clib_mem_set_heap (oldheap); vl_msg_api_send_shmem (q, (u8 *) & mp); } @@ -1439,6 +1494,7 @@ send_acl_interface_list_details (acl_main_t * am, int n_output; int count; int i = 0; + void *oldheap = acl_set_heap(am); vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index); vec_validate (am->output_acl_vec_by_sw_if_index, sw_if_index); @@ -1469,7 +1525,7 @@ send_acl_interface_list_details (acl_main_t * am, mp->acls[n_input + i] = htonl (am->output_acl_vec_by_sw_if_index[sw_if_index][i]); } - + clib_mem_set_heap (oldheap); vl_msg_api_send_shmem (q, (u8 *) & mp); } @@ -1784,6 +1840,7 @@ static int acl_set_skip_ipv6_eh(u32 eh, u32 value) { acl_main_t *am = &acl_main; + if ((eh < 256) && (value < 2)) { am->fa_ipv6_known_eh_bitmap = clib_bitmap_set(am->fa_ipv6_known_eh_bitmap, eh, value); @@ -2104,6 +2161,21 @@ acl_show_aclplugin_fn (vlib_main_t * vm, vlib_cli_output(vm, "\n%s\n", out0); vec_free(out0); } + else if (unformat (input, "memory")) + { + vlib_cli_output (vm, "ACL plugin main heap statistics:\n"); + if (am->acl_mheap) { + vlib_cli_output (vm, " %U\n", format_mheap, am->acl_mheap, 1); + } else { + vlib_cli_output (vm, " Not initialized\n"); + } + vlib_cli_output (vm, "ACL hash lookup support heap statistics:\n"); + if (am->hash_lookup_mheap) { + vlib_cli_output (vm, " %U\n", format_mheap, am->hash_lookup_mheap, 1); + } else { + vlib_cli_output (vm, " Not initialized\n"); + } + } else if (unformat (input, "tables")) { ace_mask_type_entry_t *mte; @@ -2198,9 +2270,9 @@ acl_show_aclplugin_fn (vlib_main_t * vm, out0 = format(out0, " input lookup applied entries:\n"); for(j=0; jinput_hash_entry_vec_by_sw_if_index[swi]); j++) { applied_hash_ace_entry_t *pae = &am->input_hash_entry_vec_by_sw_if_index[swi][j]; - out0 = format(out0, " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d\n", + out0 = format(out0, " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d tail %d\n", j, pae->acl_index, pae->ace_index, pae->action, pae->hash_ace_info_index, - pae->next_applied_entry_index, pae->prev_applied_entry_index); + pae->next_applied_entry_index, pae->prev_applied_entry_index, pae->tail_applied_entry_index); } } @@ -2212,9 +2284,9 @@ acl_show_aclplugin_fn (vlib_main_t * vm, out0 = format(out0, " output lookup applied entries:\n"); for(j=0; joutput_hash_entry_vec_by_sw_if_index[swi]); j++) { applied_hash_ace_entry_t *pae = &am->output_hash_entry_vec_by_sw_if_index[swi][j]; - out0 = format(out0, " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d\n", + out0 = format(out0, " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d tail %d\n", j, pae->acl_index, pae->ace_index, pae->action, pae->hash_ace_info_index, - pae->next_applied_entry_index, pae->prev_applied_entry_index); + pae->next_applied_entry_index, pae->prev_applied_entry_index, pae->tail_applied_entry_index); } } @@ -2288,6 +2360,7 @@ acl_init (vlib_main_t * vm) vec_free (name); acl_setup_fa_nodes(); + am->session_timeout_sec[ACL_TIMEOUT_TCP_TRANSIENT] = TCP_SESSION_TRANSIENT_TIMEOUT_SEC; am->session_timeout_sec[ACL_TIMEOUT_TCP_IDLE] = TCP_SESSION_IDLE_TIMEOUT_SEC; am->session_timeout_sec[ACL_TIMEOUT_UDP_IDLE] = UDP_SESSION_IDLE_TIMEOUT_SEC; @@ -2329,7 +2402,6 @@ acl_init (vlib_main_t * vm) am->l4_match_nonfirst_fragment = 1; /* use the new fancy hash-based matching */ - // NOT IMMEDIATELY am->use_hash_acl_matching = 1; return error; diff --git a/src/plugins/acl/acl.h b/src/plugins/acl/acl.h index b84ed7a4..14c6129c 100644 --- a/src/plugins/acl/acl.h +++ b/src/plugins/acl/acl.h @@ -122,12 +122,18 @@ typedef struct } ace_mask_type_entry_t; typedef struct { + /* mheap to hold all the ACL module related allocations, other than hash */ + void *acl_mheap; + /* API message ID base */ u16 msg_id_base; acl_list_t *acls; /* Pool of ACLs */ hash_acl_info_t *hash_acl_infos; /* corresponding hash matching housekeeping info */ clib_bihash_48_8_t acl_lookup_hash; /* ACL lookup hash table. */ + + /* mheap to hold all the miscellaneous allocations related to hash-based lookups */ + void *hash_lookup_mheap; int acl_lookup_hash_initialized; applied_hash_ace_entry_t **input_hash_entry_vec_by_sw_if_index; applied_hash_ace_entry_t **output_hash_entry_vec_by_sw_if_index; diff --git a/src/plugins/acl/fa_node.c b/src/plugins/acl/fa_node.c index 3181a22c..74079a2d 100644 --- a/src/plugins/acl/fa_node.c +++ b/src/plugins/acl/fa_node.c @@ -621,14 +621,14 @@ acl_fa_verify_init_sessions (acl_main_t * am) static inline fa_session_t *get_session_ptr(acl_main_t *am, u16 thread_index, u32 session_index) { acl_fa_per_worker_data_t *pw = &am->per_worker_data[thread_index]; - fa_session_t *sess = pw->fa_sessions_pool + session_index; + fa_session_t *sess = pool_is_free_index (pw->fa_sessions_pool, session_index) ? 0 : pool_elt_at_index(pw->fa_sessions_pool, session_index); return sess; } static inline int is_valid_session_ptr(acl_main_t *am, u16 thread_index, fa_session_t *sess) { acl_fa_per_worker_data_t *pw = &am->per_worker_data[thread_index]; - return ((sess - pw->fa_sessions_pool) < pool_len(pw->fa_sessions_pool)); + return ((sess != 0) && ((sess - pw->fa_sessions_pool) < pool_len(pw->fa_sessions_pool))); } static void @@ -731,6 +731,7 @@ acl_fa_track_session (acl_main_t * am, int is_input, u32 sw_if_index, u64 now, static void acl_fa_delete_session (acl_main_t * am, u32 sw_if_index, fa_full_session_id_t sess_id) { + void *oldheap = clib_mem_set_heap(am->acl_mheap); fa_session_t *sess = get_session_ptr(am, sess_id.thread_index, sess_id.session_index); ASSERT(sess->thread_index == os_get_thread_index ()); BV (clib_bihash_add_del) (&am->fa_sessions_hash, @@ -740,6 +741,7 @@ acl_fa_delete_session (acl_main_t * am, u32 sw_if_index, fa_full_session_id_t se /* Deleting from timer structures not needed, as the caller must have dealt with the timers. */ vec_validate (pw->fa_session_dels_by_sw_if_index, sw_if_index); + clib_mem_set_heap (oldheap); pw->fa_session_dels_by_sw_if_index[sw_if_index]++; clib_smp_atomic_add(&am->fa_session_total_dels, 1); } @@ -877,6 +879,7 @@ acl_fa_add_session (acl_main_t * am, int is_input, u32 sw_if_index, u64 now, clib_bihash_kv_40_8_t kv; fa_full_session_id_t f_sess_id; uword thread_index = os_get_thread_index(); + void *oldheap = clib_mem_set_heap(am->acl_mheap); acl_fa_per_worker_data_t *pw = &am->per_worker_data[thread_index]; f_sess_id.thread_index = thread_index; @@ -909,6 +912,7 @@ acl_fa_add_session (acl_main_t * am, int is_input, u32 sw_if_index, u64 now, acl_fa_conn_list_add_session(am, f_sess_id, now); vec_validate (pw->fa_session_adds_by_sw_if_index, sw_if_index); + clib_mem_set_heap (oldheap); pw->fa_session_adds_by_sw_if_index[sw_if_index]++; clib_smp_atomic_add(&am->fa_session_total_adds, 1); } @@ -1616,8 +1620,10 @@ acl_fa_enable_disable (u32 sw_if_index, int is_input, int enable_disable) if (enable_disable) { acl_fa_verify_init_sessions(am); am->fa_total_enabled_count++; + void *oldheap = clib_mem_set_heap (am->vlib_main->heap_base); vlib_process_signal_event (am->vlib_main, am->fa_cleaner_node_index, ACL_FA_CLEANER_RESCHEDULE, 0); + clib_mem_set_heap (oldheap); } else { am->fa_total_enabled_count--; } @@ -1625,10 +1631,12 @@ acl_fa_enable_disable (u32 sw_if_index, int is_input, int enable_disable) if (is_input) { ASSERT(clib_bitmap_get(am->fa_in_acl_on_sw_if_index, sw_if_index) != enable_disable); + void *oldheap = clib_mem_set_heap (am->vlib_main->heap_base); vnet_feature_enable_disable ("ip4-unicast", "acl-plugin-in-ip4-fa", sw_if_index, enable_disable, 0, 0); vnet_feature_enable_disable ("ip6-unicast", "acl-plugin-in-ip6-fa", sw_if_index, enable_disable, 0, 0); + clib_mem_set_heap (oldheap); am->fa_in_acl_on_sw_if_index = clib_bitmap_set (am->fa_in_acl_on_sw_if_index, sw_if_index, enable_disable); @@ -1636,10 +1644,12 @@ acl_fa_enable_disable (u32 sw_if_index, int is_input, int enable_disable) else { ASSERT(clib_bitmap_get(am->fa_out_acl_on_sw_if_index, sw_if_index) != enable_disable); + void *oldheap = clib_mem_set_heap (am->vlib_main->heap_base); vnet_feature_enable_disable ("ip4-output", "acl-plugin-out-ip4-fa", sw_if_index, enable_disable, 0, 0); vnet_feature_enable_disable ("ip6-output", "acl-plugin-out-ip6-fa", sw_if_index, enable_disable, 0, 0); + clib_mem_set_heap (oldheap); am->fa_out_acl_on_sw_if_index = clib_bitmap_set (am->fa_out_acl_on_sw_if_index, sw_if_index, enable_disable); @@ -1650,9 +1660,11 @@ acl_fa_enable_disable (u32 sw_if_index, int is_input, int enable_disable) #ifdef FA_NODE_VERBOSE_DEBUG clib_warning("ENABLE-DISABLE: clean the connections on interface %d", sw_if_index); #endif + void *oldheap = clib_mem_set_heap (am->vlib_main->heap_base); vlib_process_signal_event (am->vlib_main, am->fa_cleaner_node_index, ACL_FA_CLEANER_DELETE_BY_SW_IF_INDEX, sw_if_index); + clib_mem_set_heap (oldheap); } } diff --git a/src/plugins/acl/hash_lookup.c b/src/plugins/acl/hash_lookup.c index 027dabd0..a337eb84 100644 --- a/src/plugins/acl/hash_lookup.c +++ b/src/plugins/acl/hash_lookup.c @@ -33,6 +33,16 @@ #include "hash_lookup.h" #include "hash_lookup_private.h" + +static inline applied_hash_ace_entry_t **get_applied_hash_aces(acl_main_t *am, int is_input, u32 sw_if_index) +{ + applied_hash_ace_entry_t **applied_hash_aces = is_input ? vec_elt_at_index(am->input_hash_entry_vec_by_sw_if_index, sw_if_index) + : vec_elt_at_index(am->output_hash_entry_vec_by_sw_if_index, sw_if_index); + return applied_hash_aces; +} + + + /* * This returns true if there is indeed a match on the portranges. * With all these levels of indirections, this is not going to be very fast, @@ -41,11 +51,10 @@ static int match_portranges(acl_main_t *am, fa_5tuple_t *match, u32 index) { - applied_hash_ace_entry_t **applied_hash_aces = match->pkt.is_input ? &am->input_hash_entry_vec_by_sw_if_index[match->pkt.sw_if_index] : - &am->output_hash_entry_vec_by_sw_if_index[match->pkt.sw_if_index]; - applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[index]); - // hash_acl_info_t *ha = &am->hash_acl_infos[pae->acl_index]; + applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, match->pkt.is_input, match->pkt.sw_if_index); + applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), index); + acl_rule_t *r = &(am->acls[pae->acl_index].rules[pae->ace_index]); DBG("PORTMATCH: %d <= %d <= %d && %d <= %d <= %d ?", r->src_port_or_type_first, match->l4.port[0], r->src_port_or_type_last, @@ -70,8 +79,7 @@ multi_acl_match_get_applied_ace_index(acl_main_t *am, fa_5tuple_t *match) u32 sw_if_index = match->pkt.sw_if_index; u8 is_input = match->pkt.is_input; - applied_hash_ace_entry_t **applied_hash_aces = is_input ? &am->input_hash_entry_vec_by_sw_if_index[sw_if_index] : - &am->output_hash_entry_vec_by_sw_if_index[sw_if_index]; + applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, is_input, sw_if_index); applied_hash_acl_info_t **applied_hash_acls = is_input ? &am->input_applied_hash_acl_info_by_sw_if_index : &am->output_applied_hash_acl_info_by_sw_if_index; @@ -79,11 +87,11 @@ multi_acl_match_get_applied_ace_index(acl_main_t *am, fa_5tuple_t *match) pmatch[0], pmatch[1], pmatch[2], pmatch[3], pmatch[4], pmatch[5]); for(mask_type_index=0; mask_type_index < pool_len(am->ace_mask_type_pool); mask_type_index++) { - if (!clib_bitmap_get((*applied_hash_acls)[sw_if_index].mask_type_index_bitmap, mask_type_index)) { + if (!clib_bitmap_get(vec_elt_at_index((*applied_hash_acls), sw_if_index)->mask_type_index_bitmap, mask_type_index)) { /* This bit is not set. Avoid trying to match */ continue; } - ace_mask_type_entry_t *mte = &am->ace_mask_type_pool[mask_type_index]; + ace_mask_type_entry_t *mte = vec_elt_at_index(am->ace_mask_type_pool, mask_type_index); pmatch = (u64 *)match; pmask = (u64 *)&mte->mask; pkey = (u64 *)kv.key; @@ -120,7 +128,7 @@ multi_acl_match_get_applied_ace_index(acl_main_t *am, fa_5tuple_t *match) u32 curr_index = result_val->applied_entry_index; while ((curr_index != ~0) && !match_portranges(am, match, curr_index)) { /* while no match and there are more entries, walk... */ - applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[curr_index]); + applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces),curr_index); DBG("entry %d did not portmatch, advancing to %d", curr_index, pae->next_applied_entry_index); curr_index = pae->next_applied_entry_index; } @@ -153,9 +161,9 @@ multi_acl_match_get_applied_ace_index(acl_main_t *am, fa_5tuple_t *match) static void hashtable_add_del(acl_main_t *am, clib_bihash_kv_48_8_t *kv, int is_add) { - DBG("HASH ADD/DEL: %016llx %016llx %016llx %016llx %016llx %016llx add %d", + DBG("HASH ADD/DEL: %016llx %016llx %016llx %016llx %016llx %016llx %016llx add %d", kv->key[0], kv->key[1], kv->key[2], - kv->key[3], kv->key[4], kv->key[5], is_add); + kv->key[3], kv->key[4], kv->key[5], kv->value, is_add); BV (clib_bihash_add_del) (&am->acl_lookup_hash, kv, is_add); } @@ -167,17 +175,17 @@ fill_applied_hash_ace_kv(acl_main_t *am, { fa_5tuple_t *kv_key = (fa_5tuple_t *)kv->key; hash_acl_lookup_value_t *kv_val = (hash_acl_lookup_value_t *)&kv->value; - applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[new_index]); - hash_acl_info_t *ha = &am->hash_acl_infos[pae->acl_index]; + applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index); + hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index); - memcpy(kv_key, &ha->rules[pae->hash_ace_info_index].match, sizeof(*kv_key)); + memcpy(kv_key, &(vec_elt_at_index(ha->rules, pae->hash_ace_info_index)->match), sizeof(*kv_key)); /* initialize the sw_if_index and direction */ kv_key->pkt.sw_if_index = sw_if_index; kv_key->pkt.is_input = is_input; kv_val->as_u64 = 0; kv_val->applied_entry_index = new_index; - kv_val->need_portrange_check = ha->rules[pae->hash_ace_info_index].src_portrange_not_powerof2 || - ha->rules[pae->hash_ace_info_index].dst_portrange_not_powerof2; + kv_val->need_portrange_check = vec_elt_at_index(ha->rules, pae->hash_ace_info_index)->src_portrange_not_powerof2 || + vec_elt_at_index(ha->rules, pae->hash_ace_info_index)->dst_portrange_not_powerof2; /* by default assume all values are shadowed -> check all mask types */ kv_val->shadowed = 1; } @@ -203,7 +211,9 @@ activate_applied_ace_hash_entry(acl_main_t *am, u32 new_index) { clib_bihash_kv_48_8_t kv; - applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[new_index]); + ASSERT(new_index != ~0); + applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index); + DBG("activate_applied_ace_hash_entry sw_if_index %d is_input %d new_index %d", sw_if_index, is_input, new_index); fill_applied_hash_ace_kv(am, applied_hash_aces, sw_if_index, is_input, new_index, &kv); @@ -214,28 +224,28 @@ activate_applied_ace_hash_entry(acl_main_t *am, clib_bihash_kv_48_8_t result; hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value; int res = BV (clib_bihash_search) (&am->acl_lookup_hash, &kv, &result); + ASSERT(new_index != ~0); + ASSERT(new_index < vec_len((*applied_hash_aces))); if (res == 0) { /* There already exists an entry or more. Append at the end. */ u32 first_index = result_val->applied_entry_index; - DBG("A key already exists, with applied entry index: %d", existing_index); ASSERT(first_index != ~0); - applied_hash_ace_entry_t *first_pae = &((*applied_hash_aces)[first_index]); - /* move to "prev" by one, this should land us in the end of the list */ - u32 last_index = first_pae->prev_applied_entry_index; + DBG("A key already exists, with applied entry index: %d", first_index); + applied_hash_ace_entry_t *first_pae = vec_elt_at_index((*applied_hash_aces), first_index); + u32 last_index = first_pae->tail_applied_entry_index; ASSERT(last_index != ~0); - applied_hash_ace_entry_t *last_pae = &((*applied_hash_aces)[last_index]); - ASSERT(last_pae->next_applied_entry_index == ~0); + applied_hash_ace_entry_t *last_pae = vec_elt_at_index((*applied_hash_aces), last_index); + DBG("...advance to chained entry index: %d", last_index); /* link ourseves in */ last_pae->next_applied_entry_index = new_index; pae->prev_applied_entry_index = last_index; - /* make a new reference from the very first element to new tail */ - first_pae->prev_applied_entry_index = new_index; + /* adjust the pointer to the new tail */ + first_pae->tail_applied_entry_index = new_index; } else { /* It's the very first entry */ hashtable_add_del(am, &kv, 1); - pae->is_first_entry = 1; - /* we are the tail */ - pae->prev_applied_entry_index = new_index; + ASSERT(new_index != ~0); + pae->tail_applied_entry_index = new_index; } } @@ -254,31 +264,44 @@ applied_hash_entries_analyze(acl_main_t *am, applied_hash_ace_entry_t **applied_ */ } +static void * +hash_acl_set_heap(acl_main_t *am) +{ + if (0 == am->hash_lookup_mheap) { + am->hash_lookup_mheap = mheap_alloc (0 /* use VM */ , 2 << 25); + } + void *oldheap = clib_mem_set_heap(am->hash_lookup_mheap); + return oldheap; +} + void hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) { int i; DBG("HASH ACL apply: sw_if_index %d is_input %d acl %d", sw_if_index, is_input, acl_index); - u32 *acl_vec = is_input ? am->input_acl_vec_by_sw_if_index[sw_if_index] : - am->output_acl_vec_by_sw_if_index[sw_if_index]; + if (!am->acl_lookup_hash_initialized) { + BV (clib_bihash_init) (&am->acl_lookup_hash, "ACL plugin rule lookup bihash", + 65536, 2 << 25); + am->acl_lookup_hash_initialized = 1; + } + + u32 *acl_vec = is_input ? *vec_elt_at_index(am->input_acl_vec_by_sw_if_index, sw_if_index) + : *vec_elt_at_index(am->output_acl_vec_by_sw_if_index, sw_if_index); + + void *oldheap = hash_acl_set_heap(am); if (is_input) { vec_validate(am->input_hash_entry_vec_by_sw_if_index, sw_if_index); } else { vec_validate(am->output_hash_entry_vec_by_sw_if_index, sw_if_index); } vec_validate(am->hash_acl_infos, acl_index); - applied_hash_ace_entry_t **applied_hash_aces = is_input ? &am->input_hash_entry_vec_by_sw_if_index[sw_if_index] : - &am->output_hash_entry_vec_by_sw_if_index[sw_if_index]; + applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, is_input, sw_if_index); + u32 order_index = vec_search(acl_vec, acl_index); - hash_acl_info_t *ha = &am->hash_acl_infos[acl_index]; + hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index); ASSERT(order_index != ~0); - if (!am->acl_lookup_hash_initialized) { - BV (clib_bihash_init) (&am->acl_lookup_hash, "ACL plugin rule lookup bihash", - 65536, 2 << 25); - am->acl_lookup_hash_initialized = 1; - } int base_offset = vec_len(*applied_hash_aces); /* Update the bitmap of the mask types with which the lookup @@ -286,7 +309,7 @@ hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) applied_hash_acl_info_t **applied_hash_acls = is_input ? &am->input_applied_hash_acl_info_by_sw_if_index : &am->output_applied_hash_acl_info_by_sw_if_index; vec_validate((*applied_hash_acls), sw_if_index); - applied_hash_acl_info_t *pal = &(*applied_hash_acls)[sw_if_index]; + applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), sw_if_index); pal->mask_type_index_bitmap = clib_bitmap_or(pal->mask_type_index_bitmap, ha->mask_type_index_bitmap); /* @@ -305,8 +328,7 @@ hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) /* add the rules from the ACL to the hash table for lookup and append to the vector*/ for(i=0; i < vec_len(ha->rules); i++) { u32 new_index = base_offset + i; - applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[new_index]); - pae->is_first_entry = 0; + applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index); pae->acl_index = acl_index; pae->ace_index = ha->rules[i].ace_index; pae->action = ha->rules[i].action; @@ -314,9 +336,29 @@ hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) /* we might link it in later */ pae->next_applied_entry_index = ~0; pae->prev_applied_entry_index = ~0; + pae->tail_applied_entry_index = ~0; activate_applied_ace_hash_entry(am, sw_if_index, is_input, applied_hash_aces, new_index); } applied_hash_entries_analyze(am, applied_hash_aces); + clib_mem_set_heap (oldheap); +} + +static u32 +find_head_applied_ace_index(applied_hash_ace_entry_t **applied_hash_aces, u32 curr_index) +{ + /* + * find back the first entry. Inefficient so might need to be a bit cleverer + * if this proves to be a problem.. + */ + u32 an_index = curr_index; + ASSERT(an_index != ~0); + applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), an_index); + while(head_pae->prev_applied_entry_index != ~0) { + an_index = head_pae->prev_applied_entry_index; + ASSERT(an_index != ~0); + head_pae = vec_elt_at_index((*applied_hash_aces), an_index); + } + return an_index; } static void @@ -325,43 +367,43 @@ move_applied_ace_hash_entry(acl_main_t *am, applied_hash_ace_entry_t **applied_hash_aces, u32 old_index, u32 new_index) { + ASSERT(old_index != ~0); + ASSERT(new_index != ~0); /* move the entry */ - (*applied_hash_aces)[new_index] = (*applied_hash_aces)[old_index]; + *vec_elt_at_index((*applied_hash_aces), new_index) = *vec_elt_at_index((*applied_hash_aces), old_index); /* update the linkage and hash table if necessary */ - applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[old_index]); + applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index); - if (!pae->is_first_entry) { - applied_hash_ace_entry_t *prev_pae = &((*applied_hash_aces)[pae->prev_applied_entry_index]); + if (pae->prev_applied_entry_index != ~0) { + applied_hash_ace_entry_t *prev_pae = vec_elt_at_index((*applied_hash_aces), pae->prev_applied_entry_index); ASSERT(prev_pae->next_applied_entry_index == old_index); prev_pae->next_applied_entry_index = new_index; } else { /* first entry - so the hash points to it, update */ add_del_hashtable_entry(am, sw_if_index, is_input, applied_hash_aces, new_index, 1); + ASSERT(pae->tail_applied_entry_index != ~0); } if (pae->next_applied_entry_index != ~0) { - applied_hash_ace_entry_t *next_pae = &((*applied_hash_aces)[pae->next_applied_entry_index]); + applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index); ASSERT(next_pae->prev_applied_entry_index == old_index); next_pae->prev_applied_entry_index = new_index; } else { /* * Moving the very last entry, so we need to update the tail pointer in the first one. - * find back the first entry. Inefficient so might need to be a bit cleverer - * if this proves to be a problem.. */ - u32 an_index = pae->prev_applied_entry_index; - applied_hash_ace_entry_t *head_pae = &((*applied_hash_aces)[pae->prev_applied_entry_index]); - while(!head_pae->is_first_entry) { - an_index = head_pae->prev_applied_entry_index; - head_pae = &((*applied_hash_aces)[an_index]); - } - ASSERT(head_pae->prev_applied_entry_index == old_index); - head_pae->prev_applied_entry_index = new_index; + u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index); + ASSERT(head_index != ~0); + applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index); + + ASSERT(head_pae->tail_applied_entry_index == old_index); + head_pae->tail_applied_entry_index = new_index; } /* invalidate the old entry */ pae->prev_applied_entry_index = ~0; pae->next_applied_entry_index = ~0; + pae->tail_applied_entry_index = ~0; } static void @@ -370,39 +412,37 @@ deactivate_applied_ace_hash_entry(acl_main_t *am, applied_hash_ace_entry_t **applied_hash_aces, u32 old_index) { - applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[old_index]); - - if (pae->next_applied_entry_index != ~0) { - applied_hash_ace_entry_t *next_pae = &((*applied_hash_aces)[pae->next_applied_entry_index]); - ASSERT(next_pae->prev_applied_entry_index == old_index); - next_pae->prev_applied_entry_index = pae->prev_applied_entry_index; - } + applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index); + DBG("UNAPPLY DEACTIVATE: sw_if_index %d is_input %d, applied index %d", sw_if_index, is_input, old_index); - if (!pae->is_first_entry) { - applied_hash_ace_entry_t *prev_pae = &((*applied_hash_aces)[pae->prev_applied_entry_index]); + if (pae->prev_applied_entry_index != ~0) { + DBG("UNAPPLY = index %d has prev_applied_entry_index %d", old_index, pae->prev_applied_entry_index); + applied_hash_ace_entry_t *prev_pae = vec_elt_at_index((*applied_hash_aces), pae->prev_applied_entry_index); ASSERT(prev_pae->next_applied_entry_index == old_index); prev_pae->next_applied_entry_index = pae->next_applied_entry_index; if (pae->next_applied_entry_index == ~0) { /* it was a last entry we removed, update the pointer on the first one */ - u32 an_index = pae->prev_applied_entry_index; - applied_hash_ace_entry_t *head_pae = &((*applied_hash_aces)[pae->prev_applied_entry_index]); - while(!head_pae->is_first_entry) { - an_index = head_pae->prev_applied_entry_index; - head_pae = &((*applied_hash_aces)[an_index]); - } - ASSERT(head_pae->prev_applied_entry_index == old_index); - head_pae->prev_applied_entry_index = pae->prev_applied_entry_index; + u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index); + DBG("UNAPPLY = index %d head index to update %d", old_index, head_index); + ASSERT(head_index != ~0); + applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index); + + ASSERT(head_pae->tail_applied_entry_index == old_index); + head_pae->tail_applied_entry_index = pae->prev_applied_entry_index; + } else { + applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index); + next_pae->prev_applied_entry_index = pae->prev_applied_entry_index; } } else { /* It was the first entry. We need either to reset the hash entry or delete it */ - pae->is_first_entry = 0; if (pae->next_applied_entry_index != ~0) { - /* a custom case of relinking for the first node, with the tail not forward linked to it */ - applied_hash_ace_entry_t *next_pae = &((*applied_hash_aces)[pae->next_applied_entry_index]); - /* this is the tail the new head should be aware of */ - next_pae->prev_applied_entry_index = pae->prev_applied_entry_index; - next_pae->is_first_entry = 1; - + /* the next element becomes the new first one, so needs the tail pointer to be set */ + applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index); + ASSERT(pae->tail_applied_entry_index != ~0); + next_pae->tail_applied_entry_index = pae->tail_applied_entry_index; + DBG("Resetting the hash table entry from %d to %d, setting tail index to %d", old_index, pae->next_applied_entry_index, pae->tail_applied_entry_index); + /* unlink from the next element */ + next_pae->prev_applied_entry_index = ~0; add_del_hashtable_entry(am, sw_if_index, is_input, applied_hash_aces, pae->next_applied_entry_index, 1); } else { @@ -411,6 +451,10 @@ deactivate_applied_ace_hash_entry(acl_main_t *am, applied_hash_aces, old_index, 0); } } + /* invalidate the old entry */ + pae->prev_applied_entry_index = ~0; + pae->next_applied_entry_index = ~0; + pae->tail_applied_entry_index = ~0; } @@ -419,14 +463,14 @@ hash_acl_build_applied_lookup_bitmap(acl_main_t *am, u32 sw_if_index, u8 is_inpu { int i; uword *new_lookup_bitmap = 0; - u32 **applied_acls = is_input ? &am->input_acl_vec_by_sw_if_index[sw_if_index] : - &am->output_acl_vec_by_sw_if_index[sw_if_index]; - applied_hash_acl_info_t **applied_hash_acls = is_input ? &am->input_applied_hash_acl_info_by_sw_if_index : - &am->output_applied_hash_acl_info_by_sw_if_index; - applied_hash_acl_info_t *pal = &(*applied_hash_acls)[sw_if_index]; + u32 **applied_acls = is_input ? vec_elt_at_index(am->input_acl_vec_by_sw_if_index, sw_if_index) + : vec_elt_at_index(am->output_acl_vec_by_sw_if_index, sw_if_index); + applied_hash_acl_info_t **applied_hash_acls = is_input ? &am->input_applied_hash_acl_info_by_sw_if_index + : &am->output_applied_hash_acl_info_by_sw_if_index; + applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), sw_if_index); for(i=0; i < vec_len(*applied_acls); i++) { - u32 a_acl_index = (*applied_acls)[i]; - hash_acl_info_t *ha = &am->hash_acl_infos[a_acl_index]; + u32 a_acl_index = *vec_elt_at_index((*applied_acls), i); + hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, a_acl_index); new_lookup_bitmap = clib_bitmap_or(new_lookup_bitmap, ha->mask_type_index_bitmap); } @@ -441,12 +485,12 @@ hash_acl_unapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) int i; DBG("HASH ACL unapply: sw_if_index %d is_input %d acl %d", sw_if_index, is_input, acl_index); - hash_acl_info_t *ha = &am->hash_acl_infos[acl_index]; - applied_hash_ace_entry_t **applied_hash_aces = is_input ? &am->input_hash_entry_vec_by_sw_if_index[sw_if_index] : - &am->output_hash_entry_vec_by_sw_if_index[sw_if_index]; + + hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index); + applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, is_input, sw_if_index); for(i=0; i < vec_len((*applied_hash_aces)); i++) { - if ((*applied_hash_aces)[i].acl_index == acl_index) { + if (vec_elt_at_index(*applied_hash_aces,i)->acl_index == acl_index) { DBG("Found applied ACL#%d at applied index %d", acl_index, i); break; } @@ -456,13 +500,14 @@ hash_acl_unapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) /* we went all the way without finding any entries. Probably a list was empty. */ return; } + + void *oldheap = hash_acl_set_heap(am); int base_offset = i; int tail_offset = base_offset + vec_len(ha->rules); int tail_len = vec_len((*applied_hash_aces)) - tail_offset; DBG("base_offset: %d, tail_offset: %d, tail_len: %d", base_offset, tail_offset, tail_len); for(i=0; i < vec_len(ha->rules); i ++) { - DBG("UNAPPLY DEACTIVATE: sw_if_index %d is_input %d, applied index %d", sw_if_index, is_input, base_offset + i); deactivate_applied_ace_hash_entry(am, sw_if_index, is_input, applied_hash_aces, base_offset + i); } @@ -479,6 +524,7 @@ hash_acl_unapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) /* After deletion we might not need some of the mask-types anymore... */ hash_acl_build_applied_lookup_bitmap(am, sw_if_index, is_input); + clib_mem_set_heap (oldheap); } /* @@ -493,8 +539,8 @@ hash_acl_unapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) void hash_acl_reapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) { - u32 **applied_acls = is_input ? &am->input_acl_vec_by_sw_if_index[sw_if_index] : - &am->output_acl_vec_by_sw_if_index[sw_if_index]; + u32 **applied_acls = is_input ? vec_elt_at_index(am->input_acl_vec_by_sw_if_index, sw_if_index) + : vec_elt_at_index(am->output_acl_vec_by_sw_if_index, sw_if_index); int i; int start_index = vec_search((*applied_acls), acl_index); /* @@ -505,10 +551,10 @@ hash_acl_reapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) /* unapply all the ACLs till the current one */ for(i = vec_len(*applied_acls) - 1; i >= start_index; i--) { - hash_acl_unapply(am, sw_if_index, is_input, (*applied_acls)[i]); + hash_acl_unapply(am, sw_if_index, is_input, *vec_elt_at_index(*applied_acls, i)); } for(i = start_index; i < vec_len(*applied_acls); i++) { - hash_acl_apply(am, sw_if_index, is_input, (*applied_acls)[i]); + hash_acl_apply(am, sw_if_index, is_input, *vec_elt_at_index(*applied_acls, i)); } } @@ -649,7 +695,7 @@ assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask) static void release_mask_type_index(acl_main_t *am, u32 mask_type_index) { - ace_mask_type_entry_t *mte = &am->ace_mask_type_pool[mask_type_index]; + ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index); mte->refcount--; if (mte->refcount == 0) { /* we are not using this entry anymore */ @@ -659,11 +705,12 @@ release_mask_type_index(acl_main_t *am, u32 mask_type_index) void hash_acl_add(acl_main_t *am, int acl_index) { + void *oldheap = hash_acl_set_heap(am); DBG("HASH ACL add : %d", acl_index); int i; acl_list_t *a = &am->acls[acl_index]; vec_validate(am->hash_acl_infos, acl_index); - hash_acl_info_t *ha = &am->hash_acl_infos[acl_index]; + hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index); memset(ha, 0, sizeof(*ha)); /* walk the newly added ACL entries and ensure that for each of them there @@ -710,10 +757,12 @@ void hash_acl_add(acl_main_t *am, int acl_index) hash_acl_reapply(am, *sw_if_index, 0, acl_index); } } + clib_mem_set_heap (oldheap); } void hash_acl_delete(acl_main_t *am, int acl_index) { + void *oldheap = hash_acl_set_heap(am); DBG("HASH ACL delete : %d", acl_index); /* * If the ACL is applied somewhere, remove the references of it (call hash_acl_unapply) @@ -739,12 +788,13 @@ void hash_acl_delete(acl_main_t *am, int acl_index) /* walk the mask types for the ACL about-to-be-deleted, and decrease * the reference count, possibly freeing up some of them */ int i; - hash_acl_info_t *ha = &am->hash_acl_infos[acl_index]; + hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index); for(i=0; i < vec_len(ha->rules); i++) { release_mask_type_index(am, ha->rules[i].mask_type_index); } clib_bitmap_free(ha->mask_type_index_bitmap); vec_free(ha->rules); + clib_mem_set_heap (oldheap); } u8 @@ -753,11 +803,10 @@ hash_multi_acl_match_5tuple (u32 sw_if_index, fa_5tuple_t * pkt_5tuple, int is_l u32 * rule_match_p, u32 * trace_bitmap) { acl_main_t *am = &acl_main; - applied_hash_ace_entry_t **applied_hash_aces = is_input ? &am->input_hash_entry_vec_by_sw_if_index[sw_if_index] : - &am->output_hash_entry_vec_by_sw_if_index[sw_if_index]; + applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, is_input, sw_if_index); u32 match_index = multi_acl_match_get_applied_ace_index(am, pkt_5tuple); if (match_index < vec_len((*applied_hash_aces))) { - applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[match_index]); + applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), match_index); *acl_match_p = pae->acl_index; *rule_match_p = pae->ace_index; return pae->action; diff --git a/src/plugins/acl/hash_lookup_types.h b/src/plugins/acl/hash_lookup_types.h index 1848c5ff..fbc9777b 100644 --- a/src/plugins/acl/hash_lookup_types.h +++ b/src/plugins/acl/hash_lookup_types.h @@ -53,14 +53,14 @@ typedef struct { */ u32 next_applied_entry_index; /* - * previous entry in the ring list of the chained ones. + * previous entry in the list of the chained ones, + * if ~0 then this is entry in the hash. */ u32 prev_applied_entry_index; /* - * 1 if it is the very first entry in the list, - * referenced from the hash. + * chain tail, if this is the first entry */ - u8 is_first_entry; + u32 tail_applied_entry_index; /* * Action of this applied ACE */ -- cgit 1.2.3-korg From ef5dd4f2aec6df1b58aa8d07493acf486eccf802 Mon Sep 17 00:00:00 2001 From: Andrew Yourtchenko Date: Tue, 8 Aug 2017 20:10:12 +0200 Subject: acl-plugin: avoid crash in multithreaded setup adding/deleting ACLs with traffic (VPP-910/VPP-929) The commit fixing the VPP-910 and separating the memory operations into separate heaps has missed setting the MHEAP_FLAG_THREAD_SAFE, which quite obviously caused the issues in the multithread setup. Fix that. Also, add the debug CLIs "set acl-plugin heap {main|hash} {validate|trace} {1|0}" to toggle the memory instrumentation, in case we ever need it in the future. Change-Id: I8bd4f7978613f5ea75a030cfb90674dac34ae7bf Signed-off-by: Andrew Yourtchenko (cherry picked from commit e6423bef32ca2ffcfcd7a092eb4673badd53ea4c) --- src/plugins/acl/acl.c | 50 +++++++++++++++++++++++++++++++++++++++++++ src/plugins/acl/hash_lookup.c | 29 +++++++++++++++++++++++++ src/plugins/acl/hash_lookup.h | 4 ++++ 3 files changed, 83 insertions(+) (limited to 'src/plugins/acl/hash_lookup.c') diff --git a/src/plugins/acl/acl.c b/src/plugins/acl/acl.c index 70dc8c63..80ef5661 100644 --- a/src/plugins/acl/acl.c +++ b/src/plugins/acl/acl.c @@ -91,11 +91,39 @@ acl_set_heap(acl_main_t *am) { if (0 == am->acl_mheap) { am->acl_mheap = mheap_alloc (0 /* use VM */ , 2 << 29); + mheap_t *h = mheap_header (am->acl_mheap); + h->flags |= MHEAP_FLAG_THREAD_SAFE; } void *oldheap = clib_mem_set_heap(am->acl_mheap); return oldheap; } +void +acl_plugin_acl_set_validate_heap(acl_main_t *am, int on) +{ + clib_mem_set_heap(acl_set_heap(am)); + mheap_t *h = mheap_header (am->acl_mheap); + if (on) { + h->flags |= MHEAP_FLAG_VALIDATE; + h->flags &= ~MHEAP_FLAG_SMALL_OBJECT_CACHE; + mheap_validate(h); + } else { + h->flags &= ~MHEAP_FLAG_VALIDATE; + h->flags |= MHEAP_FLAG_SMALL_OBJECT_CACHE; + } +} + +void +acl_plugin_acl_set_trace_heap(acl_main_t *am, int on) +{ + clib_mem_set_heap(acl_set_heap(am)); + mheap_t *h = mheap_header (am->acl_mheap); + if (on) { + h->flags |= MHEAP_FLAG_TRACE; + } else { + h->flags &= ~MHEAP_FLAG_TRACE; + } +} static void vl_api_acl_plugin_get_version_t_handler (vl_api_acl_plugin_get_version_t * mp) @@ -1930,6 +1958,8 @@ acl_sw_interface_add_del (vnet_main_t * vnm, u32 sw_if_index, u32 is_add) VNET_SW_INTERFACE_ADD_DEL_FUNCTION (acl_sw_interface_add_del); + + static clib_error_t * acl_set_aclplugin_fn (vlib_main_t * vm, unformat_input_t * input, @@ -1958,6 +1988,26 @@ acl_set_aclplugin_fn (vlib_main_t * vm, am->l4_match_nonfirst_fragment = (val != 0); goto done; } + if (unformat (input, "heap")) + { + if (unformat(input, "main")) + { + if (unformat(input, "validate %u", &val)) + acl_plugin_acl_set_validate_heap(am, val); + else if (unformat(input, "trace %u", &val)) + acl_plugin_acl_set_trace_heap(am, val); + goto done; + } + else if (unformat(input, "hash")) + { + if (unformat(input, "validate %u", &val)) + acl_plugin_hash_acl_set_validate_heap(am, val); + else if (unformat(input, "trace %u", &val)) + acl_plugin_hash_acl_set_trace_heap(am, val); + goto done; + } + goto done; + } if (unformat (input, "session")) { if (unformat (input, "table")) { /* The commands here are for tuning/testing. No user-serviceable parts inside */ diff --git a/src/plugins/acl/hash_lookup.c b/src/plugins/acl/hash_lookup.c index a337eb84..ae522d92 100644 --- a/src/plugins/acl/hash_lookup.c +++ b/src/plugins/acl/hash_lookup.c @@ -269,11 +269,40 @@ hash_acl_set_heap(acl_main_t *am) { if (0 == am->hash_lookup_mheap) { am->hash_lookup_mheap = mheap_alloc (0 /* use VM */ , 2 << 25); + mheap_t *h = mheap_header (am->hash_lookup_mheap); + h->flags |= MHEAP_FLAG_THREAD_SAFE; } void *oldheap = clib_mem_set_heap(am->hash_lookup_mheap); return oldheap; } +void +acl_plugin_hash_acl_set_validate_heap(acl_main_t *am, int on) +{ + clib_mem_set_heap(hash_acl_set_heap(am)); + mheap_t *h = mheap_header (am->hash_lookup_mheap); + if (on) { + h->flags |= MHEAP_FLAG_VALIDATE; + h->flags &= ~MHEAP_FLAG_SMALL_OBJECT_CACHE; + mheap_validate(h); + } else { + h->flags &= ~MHEAP_FLAG_VALIDATE; + h->flags |= MHEAP_FLAG_SMALL_OBJECT_CACHE; + } +} + +void +acl_plugin_hash_acl_set_trace_heap(acl_main_t *am, int on) +{ + clib_mem_set_heap(hash_acl_set_heap(am)); + mheap_t *h = mheap_header (am->hash_lookup_mheap); + if (on) { + h->flags |= MHEAP_FLAG_TRACE; + } else { + h->flags &= ~MHEAP_FLAG_TRACE; + } +} + void hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) { diff --git a/src/plugins/acl/hash_lookup.h b/src/plugins/acl/hash_lookup.h index c5913624..2d7058e8 100644 --- a/src/plugins/acl/hash_lookup.h +++ b/src/plugins/acl/hash_lookup.h @@ -57,4 +57,8 @@ hash_multi_acl_match_5tuple (u32 sw_if_index, fa_5tuple_t * pkt_5tuple, int is_l */ void show_hash_acl_hash(vlib_main_t * vm, acl_main_t *am, u32 verbose); +/* Debug functions to turn validate/trace on and off */ +void acl_plugin_hash_acl_set_validate_heap(acl_main_t *am, int on); +void acl_plugin_hash_acl_set_trace_heap(acl_main_t *am, int on); + #endif -- cgit 1.2.3-korg From faef07fdd048cf96626daa8e09ed995af8e30f00 Mon Sep 17 00:00:00 2001 From: Andrew Yourtchenko Date: Thu, 10 Aug 2017 14:19:58 +0200 Subject: acl-plugin: hash lookup bitmask not cleared when ACL is unapplied from interface (VPP-935) The logic in hash ACL bitmask update was using the vector of ACLs applied to the interface to rebuild the hash lookup mask. However, in transient cases (like doing group manipulation with hash ACLs), that will not hold true. Thus, make a local copy of for which ACL indices the hash_acl_apply was called previously, and maintain that one local to the hash_lookup.c file logic. Change-Id: I30187d68febce8bba2ab6ffbb1eee13b5c96a44b Signed-off-by: Andrew Yourtchenko (cherry picked from commit 1de7d7044434196610190011ebb431f054701259) --- src/plugins/acl/acl.c | 2 ++ src/plugins/acl/hash_lookup.c | 31 +++++++++++++++++++++++++++---- src/plugins/acl/hash_lookup_types.h | 2 ++ 3 files changed, 31 insertions(+), 4 deletions(-) (limited to 'src/plugins/acl/hash_lookup.c') diff --git a/src/plugins/acl/acl.c b/src/plugins/acl/acl.c index 80ef5661..db54d4e8 100644 --- a/src/plugins/acl/acl.c +++ b/src/plugins/acl/acl.c @@ -2377,6 +2377,7 @@ acl_show_aclplugin_fn (vlib_main_t * vm, if (swi < vec_len(am->input_applied_hash_acl_info_by_sw_if_index)) { applied_hash_acl_info_t *pal = &am->input_applied_hash_acl_info_by_sw_if_index[swi]; out0 = format(out0, " input lookup mask_type_index_bitmap: %U\n", format_bitmap_hex, pal->mask_type_index_bitmap); + out0 = format(out0, " input applied acls: %U\n", format_vec32, pal->applied_acls, "%d"); } if (swi < vec_len(am->input_hash_entry_vec_by_sw_if_index)) { out0 = format(out0, " input lookup applied entries:\n"); @@ -2391,6 +2392,7 @@ acl_show_aclplugin_fn (vlib_main_t * vm, if (swi < vec_len(am->output_applied_hash_acl_info_by_sw_if_index)) { applied_hash_acl_info_t *pal = &am->output_applied_hash_acl_info_by_sw_if_index[swi]; out0 = format(out0, " output lookup mask_type_index_bitmap: %U\n", format_bitmap_hex, pal->mask_type_index_bitmap); + out0 = format(out0, " output applied acls: %U\n", format_vec32, pal->applied_acls, "%d"); } if (swi < vec_len(am->output_hash_entry_vec_by_sw_if_index)) { out0 = format(out0, " output lookup applied entries:\n"); diff --git a/src/plugins/acl/hash_lookup.c b/src/plugins/acl/hash_lookup.c index ae522d92..a2edb9f3 100644 --- a/src/plugins/acl/hash_lookup.c +++ b/src/plugins/acl/hash_lookup.c @@ -339,6 +339,16 @@ hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) &am->output_applied_hash_acl_info_by_sw_if_index; vec_validate((*applied_hash_acls), sw_if_index); applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), sw_if_index); + + /* ensure the list of applied hash acls is initialized and add this acl# to it */ + u32 index = vec_search(pal->applied_acls, acl_index); + if (index != ~0) { + clib_warning("BUG: trying to apply twice acl_index %d on sw_if_index %d is_input %d", + acl_index, sw_if_index, is_input); + goto done; + } + vec_add1(pal->applied_acls, acl_index); + pal->mask_type_index_bitmap = clib_bitmap_or(pal->mask_type_index_bitmap, ha->mask_type_index_bitmap); /* @@ -369,6 +379,7 @@ hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) activate_applied_ace_hash_entry(am, sw_if_index, is_input, applied_hash_aces, new_index); } applied_hash_entries_analyze(am, applied_hash_aces); +done: clib_mem_set_heap (oldheap); } @@ -492,14 +503,14 @@ hash_acl_build_applied_lookup_bitmap(acl_main_t *am, u32 sw_if_index, u8 is_inpu { int i; uword *new_lookup_bitmap = 0; - u32 **applied_acls = is_input ? vec_elt_at_index(am->input_acl_vec_by_sw_if_index, sw_if_index) - : vec_elt_at_index(am->output_acl_vec_by_sw_if_index, sw_if_index); applied_hash_acl_info_t **applied_hash_acls = is_input ? &am->input_applied_hash_acl_info_by_sw_if_index : &am->output_applied_hash_acl_info_by_sw_if_index; applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), sw_if_index); - for(i=0; i < vec_len(*applied_acls); i++) { - u32 a_acl_index = *vec_elt_at_index((*applied_acls), i); + for(i=0; i < vec_len(pal->applied_acls); i++) { + u32 a_acl_index = *vec_elt_at_index((pal->applied_acls), i); hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, a_acl_index); + DBG("Update bitmask = %U or %U (acl_index %d)\n", format_bitmap_hex, new_lookup_bitmap, + format_bitmap_hex, ha->mask_type_index_bitmap, a_acl_index); new_lookup_bitmap = clib_bitmap_or(new_lookup_bitmap, ha->mask_type_index_bitmap); } @@ -514,6 +525,18 @@ hash_acl_unapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) int i; DBG("HASH ACL unapply: sw_if_index %d is_input %d acl %d", sw_if_index, is_input, acl_index); + applied_hash_acl_info_t **applied_hash_acls = is_input ? &am->input_applied_hash_acl_info_by_sw_if_index + : &am->output_applied_hash_acl_info_by_sw_if_index; + applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), sw_if_index); + + /* remove this acl# from the list of applied hash acls */ + u32 index = vec_search(pal->applied_acls, acl_index); + if (index == ~0) { + clib_warning("BUG: trying to unapply unapplied acl_index %d on sw_if_index %d is_input %d", + acl_index, sw_if_index, is_input); + return; + } + vec_del1(pal->applied_acls, index); hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index); applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, is_input, sw_if_index); diff --git a/src/plugins/acl/hash_lookup_types.h b/src/plugins/acl/hash_lookup_types.h index fbc9777b..837cc0a8 100644 --- a/src/plugins/acl/hash_lookup_types.h +++ b/src/plugins/acl/hash_lookup_types.h @@ -73,6 +73,8 @@ typedef struct { * hash_ace_info_t=>mask_type_index bits set */ uword *mask_type_index_bitmap; + /* applied ACLs so we can track them independently from main ACL module */ + u32 *applied_acls; } applied_hash_acl_info_t; -- cgit 1.2.3-korg From 6d157c2f91a5cf334968df5a6ac4963fedb8c706 Mon Sep 17 00:00:00 2001 From: Andrew Yourtchenko Date: Fri, 18 Aug 2017 19:10:39 +0200 Subject: acl-plugin: warning printed when acl_add_replace already applied ACLs (complete the fix for VPP-935) The fix for VPP-935 missed the case that hash_acl_add() and hash_acl_delete() may be called during the replacement of the existing applied ACL, as a result the "applied" logic needs to be replicated for the hash acls separately, since it is a lower layer. Change-Id: I7dcb2b120fcbdceb5e59acb5029f9eb77bd0f240 Signed-off-by: Andrew Yourtchenko (cherry picked from commit ce9714032d36d18abe72981552219dff871ff392) --- src/plugins/acl/acl.c | 2 ++ src/plugins/acl/hash_lookup.c | 55 ++++++++++++++++++++++++----------- src/plugins/acl/hash_lookup_private.h | 6 ++++ src/plugins/acl/hash_lookup_types.h | 3 ++ 4 files changed, 49 insertions(+), 17 deletions(-) (limited to 'src/plugins/acl/hash_lookup.c') diff --git a/src/plugins/acl/acl.c b/src/plugins/acl/acl.c index 2448a928..7790b30b 100644 --- a/src/plugins/acl/acl.c +++ b/src/plugins/acl/acl.c @@ -2421,6 +2421,8 @@ acl_show_aclplugin_fn (vlib_main_t * vm, } hash_acl_info_t *ha = &am->hash_acl_infos[i]; out0 = format(out0, "acl-index %u bitmask-ready layout\n", i); + out0 = format(out0, " applied inbound on sw_if_index list: %U\n", format_vec32, ha->inbound_sw_if_index_list, "%d"); + out0 = format(out0, " applied outbound on sw_if_index list: %U\n", format_vec32, ha->outbound_sw_if_index_list, "%d"); out0 = format(out0, " mask type index bitmap: %U\n", format_bitmap_hex, ha->mask_type_index_bitmap); for(j=0; jrules); j++) { hash_ace_info_t *pa = &ha->rules[j]; diff --git a/src/plugins/acl/hash_lookup.c b/src/plugins/acl/hash_lookup.c index a2edb9f3..f987b36e 100644 --- a/src/plugins/acl/hash_lookup.c +++ b/src/plugins/acl/hash_lookup.c @@ -308,16 +308,13 @@ hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) { int i; - DBG("HASH ACL apply: sw_if_index %d is_input %d acl %d", sw_if_index, is_input, acl_index); + DBG0("HASH ACL apply: sw_if_index %d is_input %d acl %d", sw_if_index, is_input, acl_index); if (!am->acl_lookup_hash_initialized) { BV (clib_bihash_init) (&am->acl_lookup_hash, "ACL plugin rule lookup bihash", 65536, 2 << 25); am->acl_lookup_hash_initialized = 1; } - u32 *acl_vec = is_input ? *vec_elt_at_index(am->input_acl_vec_by_sw_if_index, sw_if_index) - : *vec_elt_at_index(am->output_acl_vec_by_sw_if_index, sw_if_index); - void *oldheap = hash_acl_set_heap(am); if (is_input) { vec_validate(am->input_hash_entry_vec_by_sw_if_index, sw_if_index); @@ -327,9 +324,9 @@ hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) vec_validate(am->hash_acl_infos, acl_index); applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, is_input, sw_if_index); - u32 order_index = vec_search(acl_vec, acl_index); hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index); - ASSERT(order_index != ~0); + u32 **hash_acl_applied_sw_if_index = is_input ? &ha->inbound_sw_if_index_list + : &ha->outbound_sw_if_index_list; int base_offset = vec_len(*applied_hash_aces); @@ -348,6 +345,13 @@ hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) goto done; } vec_add1(pal->applied_acls, acl_index); + u32 index2 = vec_search((*hash_acl_applied_sw_if_index), sw_if_index); + if (index2 != ~0) { + clib_warning("BUG: trying to apply twice acl_index %d on (sw_if_index %d) is_input %d", + acl_index, sw_if_index, is_input); + goto done; + } + vec_add1((*hash_acl_applied_sw_if_index), sw_if_index); pal->mask_type_index_bitmap = clib_bitmap_or(pal->mask_type_index_bitmap, ha->mask_type_index_bitmap); @@ -524,11 +528,15 @@ hash_acl_unapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) { int i; - DBG("HASH ACL unapply: sw_if_index %d is_input %d acl %d", sw_if_index, is_input, acl_index); + DBG0("HASH ACL unapply: sw_if_index %d is_input %d acl %d", sw_if_index, is_input, acl_index); applied_hash_acl_info_t **applied_hash_acls = is_input ? &am->input_applied_hash_acl_info_by_sw_if_index : &am->output_applied_hash_acl_info_by_sw_if_index; applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), sw_if_index); + hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index); + u32 **hash_acl_applied_sw_if_index = is_input ? &ha->inbound_sw_if_index_list + : &ha->outbound_sw_if_index_list; + /* remove this acl# from the list of applied hash acls */ u32 index = vec_search(pal->applied_acls, acl_index); if (index == ~0) { @@ -538,7 +546,14 @@ hash_acl_unapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) } vec_del1(pal->applied_acls, index); - hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index); + u32 index2 = vec_search((*hash_acl_applied_sw_if_index), sw_if_index); + if (index2 == ~0) { + clib_warning("BUG: trying to unapply twice acl_index %d on (sw_if_index %d) is_input %d", + acl_index, sw_if_index, is_input); + return; + } + vec_del1((*hash_acl_applied_sw_if_index), index2); + applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, is_input, sw_if_index); for(i=0; i < vec_len((*applied_hash_aces)); i++) { @@ -602,7 +617,7 @@ hash_acl_reapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) ASSERT(start_index < vec_len(*applied_acls)); /* unapply all the ACLs till the current one */ - for(i = vec_len(*applied_acls) - 1; i >= start_index; i--) { + for(i = vec_len(*applied_acls) - 1; i > start_index; i--) { hash_acl_unapply(am, sw_if_index, is_input, *vec_elt_at_index(*applied_acls, i)); } for(i = start_index; i < vec_len(*applied_acls); i++) { @@ -815,7 +830,7 @@ void hash_acl_add(acl_main_t *am, int acl_index) void hash_acl_delete(acl_main_t *am, int acl_index) { void *oldheap = hash_acl_set_heap(am); - DBG("HASH ACL delete : %d", acl_index); + DBG0("HASH ACL delete : %d", acl_index); /* * If the ACL is applied somewhere, remove the references of it (call hash_acl_unapply) * this is a different behavior from the linear lookup where an empty ACL is "deny all", @@ -823,16 +838,23 @@ void hash_acl_delete(acl_main_t *am, int acl_index) * However, following vpp-dev discussion the ACL that is referenced elsewhere * should not be possible to delete, and the change adding this also adds * the safeguards to that respect, so this is not a problem. + * + * The part to rememeber is that this routine is called in process of reapplication + * during the acl_add_replace() API call - the old acl ruleset is deleted, then + * the new one is added, without the change in the applied ACLs - so this case + * has to be handled. */ - if (acl_index < vec_len(am->input_sw_if_index_vec_by_acl)) { + hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index); + u32 *interface_list_copy = 0; + { u32 *sw_if_index; - vec_foreach(sw_if_index, am->input_sw_if_index_vec_by_acl[acl_index]) { + interface_list_copy = vec_dup(ha->inbound_sw_if_index_list); + vec_foreach(sw_if_index, interface_list_copy) { hash_acl_unapply(am, *sw_if_index, 1, acl_index); } - } - if (acl_index < vec_len(am->output_sw_if_index_vec_by_acl)) { - u32 *sw_if_index; - vec_foreach(sw_if_index, am->output_sw_if_index_vec_by_acl[acl_index]) { + vec_free(interface_list_copy); + interface_list_copy = vec_dup(ha->outbound_sw_if_index_list); + vec_foreach(sw_if_index, interface_list_copy) { hash_acl_unapply(am, *sw_if_index, 0, acl_index); } } @@ -840,7 +862,6 @@ void hash_acl_delete(acl_main_t *am, int acl_index) /* walk the mask types for the ACL about-to-be-deleted, and decrease * the reference count, possibly freeing up some of them */ int i; - hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index); for(i=0; i < vec_len(ha->rules); i++) { release_mask_type_index(am, ha->rules[i].mask_type_index); } diff --git a/src/plugins/acl/hash_lookup_private.h b/src/plugins/acl/hash_lookup_private.h index 7b6449cd..bc621416 100644 --- a/src/plugins/acl/hash_lookup_private.h +++ b/src/plugins/acl/hash_lookup_private.h @@ -18,9 +18,15 @@ #define ACL_HASH_LOOKUP_DEBUG 0 #if ACL_HASH_LOOKUP_DEBUG == 1 +#define DBG0(...) clib_warning(__VA_ARGS__) +#define DBG(...) +#define DBG_UNIX_LOG(...) +#elif ACL_HASH_LOOKUP_DEBUG == 2 +#define DBG0(...) clib_warning(__VA_ARGS__) #define DBG(...) clib_warning(__VA_ARGS__) #define DBG_UNIX_LOG(...) clib_unix_warning(__VA_ARGS__) #else +#define DBG0(...) #define DBG(...) #define DBG_UNIX_LOG(...) #endif diff --git a/src/plugins/acl/hash_lookup_types.h b/src/plugins/acl/hash_lookup_types.h index 837cc0a8..f7110007 100644 --- a/src/plugins/acl/hash_lookup_types.h +++ b/src/plugins/acl/hash_lookup_types.h @@ -38,6 +38,9 @@ typedef struct { typedef struct { /* The mask types present in this ACL */ uword *mask_type_index_bitmap; + /* hash ACL applied on these interfaces */ + u32 *inbound_sw_if_index_list; + u32 *outbound_sw_if_index_list; hash_ace_info_t *rules; } hash_acl_info_t; -- cgit 1.2.3-korg From 6be72cd89a3068130fea1e0a44f2885bff599c55 Mon Sep 17 00:00:00 2001 From: Andrew Yourtchenko Date: Thu, 10 Aug 2017 17:02:58 +0200 Subject: acl-plugin: match index set to first portrange element if non-first portrange matches on the same hash key (VPP-937) Multiple portranges that land on the same hash key will always report the match on the first portrange - even when the subsequent portranges have matched. Test escape, so make a corresponding test case and fix the code so it passes. (the commit on stable/1707 has erroneously mentioned VPP-938 jira ticket) Change-Id: Idbeb8a122252ead2468f5f9dbaf72cf0e8bb78f1 Signed-off-by: Andrew Yourtchenko (cherry picked from commit fb088f0a201270e949469c915c529d75ad13353e) Signed-off-by: Andrew Yourtchenko --- src/plugins/acl/hash_lookup.c | 6 +- test/test_acl_plugin.py | 191 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 192 insertions(+), 5 deletions(-) (limited to 'src/plugins/acl/hash_lookup.c') diff --git a/src/plugins/acl/hash_lookup.c b/src/plugins/acl/hash_lookup.c index f987b36e..5dbc3589 100644 --- a/src/plugins/acl/hash_lookup.c +++ b/src/plugins/acl/hash_lookup.c @@ -134,11 +134,7 @@ multi_acl_match_get_applied_ace_index(acl_main_t *am, fa_5tuple_t *match) } if (curr_index < curr_match_index) { DBG("The index %d is the new candidate in portrange matches.", curr_index); - curr_match_index = result_val->applied_entry_index; - if (!result_val->shadowed) { - /* new result is known to not be shadowed, so no point to look up further */ - break; - } + curr_match_index = curr_index; } else { DBG("Curr portmatch index %d is too big vs. current matched one %d", curr_index, curr_match_index); } diff --git a/test/test_acl_plugin.py b/test/test_acl_plugin.py index 5f830ea2..3a180d21 100644 --- a/test/test_acl_plugin.py +++ b/test/test_acl_plugin.py @@ -42,6 +42,7 @@ class TestACLplugin(VppTestCase): # port ranges PORTS_ALL = -1 PORTS_RANGE = 0 + PORTS_RANGE_2 = 1 udp_sport_from = 10 udp_sport_to = udp_sport_from + 5 udp_dport_from = 20000 @@ -51,11 +52,27 @@ class TestACLplugin(VppTestCase): tcp_dport_from = 40000 tcp_dport_to = tcp_dport_from + 5000 + udp_sport_from_2 = 90 + udp_sport_to_2 = udp_sport_from_2 + 5 + udp_dport_from_2 = 30000 + udp_dport_to_2 = udp_dport_from_2 + 5000 + tcp_sport_from_2 = 130 + tcp_sport_to_2 = tcp_sport_from_2 + 5 + tcp_dport_from_2 = 20000 + tcp_dport_to_2 = tcp_dport_from_2 + 5000 + icmp4_type = 8 # echo request icmp4_code = 3 icmp6_type = 128 # echo request icmp6_code = 3 + icmp4_type_2 = 8 + icmp4_code_from_2 = 5 + icmp4_code_to_2 = 20 + icmp6_type_2 = 128 + icmp6_code_from_2 = 8 + icmp6_code_to_2 = 42 + # Test variables bd_id = 1 @@ -182,6 +199,27 @@ class TestACLplugin(VppTestCase): sport_to = self.udp_sport_to dport_from = self.udp_dport_from dport_to = self.udp_dport_to + elif ports == self.PORTS_RANGE_2: + if proto == 1: + sport_from = self.icmp4_type_2 + sport_to = self.icmp4_type_2 + dport_from = self.icmp4_code_from_2 + dport_to = self.icmp4_code_to_2 + elif proto == 58: + sport_from = self.icmp6_type_2 + sport_to = self.icmp6_type_2 + dport_from = self.icmp6_code_from_2 + dport_to = self.icmp6_code_to_2 + elif proto == self.proto[self.IP][self.TCP]: + sport_from = self.tcp_sport_from_2 + sport_to = self.tcp_sport_to_2 + dport_from = self.tcp_dport_from_2 + dport_to = self.tcp_dport_to_2 + elif proto == self.proto[self.IP][self.UDP]: + sport_from = self.udp_sport_from_2 + sport_to = self.udp_sport_to_2 + dport_from = self.udp_dport_from_2 + dport_to = self.udp_dport_to_2 else: sport_from = ports sport_to = ports @@ -1123,5 +1161,158 @@ class TestACLplugin(VppTestCase): self.logger.info("ACLP_TEST_FINISH_0023") + def test_0108_tcp_permit_v4(self): + """ permit TCPv4 + non-match range + """ + self.logger.info("ACLP_TEST_START_0108") + + # Add an ACL + rules = [] + rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_RANGE_2, + self.proto[self.IP][self.TCP])) + rules.append(self.create_rule(self.IPV4, self.PERMIT, self.PORTS_RANGE, + self.proto[self.IP][self.TCP])) + # deny ip any any in the end + rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_ALL, 0)) + + # Apply rules + self.apply_rules(rules, "permit ipv4 tcp") + + # Traffic should still pass + self.run_verify_test(self.IP, self.IPV4, self.proto[self.IP][self.TCP]) + + self.logger.info("ACLP_TEST_FINISH_0108") + + def test_0109_tcp_permit_v6(self): + """ permit TCPv6 + non-match range + """ + self.logger.info("ACLP_TEST_START_0109") + + # Add an ACL + rules = [] + rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_RANGE_2, + self.proto[self.IP][self.TCP])) + rules.append(self.create_rule(self.IPV6, self.PERMIT, self.PORTS_RANGE, + self.proto[self.IP][self.TCP])) + # deny ip any any in the end + rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_ALL, 0)) + + # Apply rules + self.apply_rules(rules, "permit ip6 tcp") + + # Traffic should still pass + self.run_verify_test(self.IP, self.IPV6, self.proto[self.IP][self.TCP]) + + self.logger.info("ACLP_TEST_FINISH_0109") + + def test_0110_udp_permit_v4(self): + """ permit UDPv4 + non-match range + """ + self.logger.info("ACLP_TEST_START_0110") + + # Add an ACL + rules = [] + rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_RANGE_2, + self.proto[self.IP][self.UDP])) + rules.append(self.create_rule(self.IPV4, self.PERMIT, self.PORTS_RANGE, + self.proto[self.IP][self.UDP])) + # deny ip any any in the end + rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_ALL, 0)) + + # Apply rules + self.apply_rules(rules, "permit ipv4 udp") + + # Traffic should still pass + self.run_verify_test(self.IP, self.IPV4, self.proto[self.IP][self.UDP]) + + self.logger.info("ACLP_TEST_FINISH_0110") + + def test_0111_udp_permit_v6(self): + """ permit UDPv6 + non-match range + """ + self.logger.info("ACLP_TEST_START_0111") + + # Add an ACL + rules = [] + rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_RANGE_2, + self.proto[self.IP][self.UDP])) + rules.append(self.create_rule(self.IPV6, self.PERMIT, self.PORTS_RANGE, + self.proto[self.IP][self.UDP])) + # deny ip any any in the end + rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_ALL, 0)) + + # Apply rules + self.apply_rules(rules, "permit ip6 udp") + + # Traffic should still pass + self.run_verify_test(self.IP, self.IPV6, self.proto[self.IP][self.UDP]) + + self.logger.info("ACLP_TEST_FINISH_0111") + + def test_0112_tcp_deny(self): + """ deny TCPv4/v6 + non-match range + """ + self.logger.info("ACLP_TEST_START_0112") + + # Add an ACL + rules = [] + rules.append(self.create_rule(self.IPV4, self.PERMIT, + self.PORTS_RANGE_2, + self.proto[self.IP][self.TCP])) + rules.append(self.create_rule(self.IPV6, self.PERMIT, + self.PORTS_RANGE_2, + self.proto[self.IP][self.TCP])) + rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_RANGE, + self.proto[self.IP][self.TCP])) + rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_RANGE, + self.proto[self.IP][self.TCP])) + # permit ip any any in the end + rules.append(self.create_rule(self.IPV4, self.PERMIT, + self.PORTS_ALL, 0)) + rules.append(self.create_rule(self.IPV6, self.PERMIT, + self.PORTS_ALL, 0)) + + # Apply rules + self.apply_rules(rules, "deny ip4/ip6 tcp") + + # Traffic should not pass + self.run_verify_negat_test(self.IP, self.IPRANDOM, + self.proto[self.IP][self.TCP]) + + self.logger.info("ACLP_TEST_FINISH_0112") + + def test_0113_udp_deny(self): + """ deny UDPv4/v6 + non-match range + """ + self.logger.info("ACLP_TEST_START_0113") + + # Add an ACL + rules = [] + rules.append(self.create_rule(self.IPV4, self.PERMIT, + self.PORTS_RANGE_2, + self.proto[self.IP][self.UDP])) + rules.append(self.create_rule(self.IPV6, self.PERMIT, + self.PORTS_RANGE_2, + self.proto[self.IP][self.UDP])) + rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_RANGE, + self.proto[self.IP][self.UDP])) + rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_RANGE, + self.proto[self.IP][self.UDP])) + # permit ip any any in the end + rules.append(self.create_rule(self.IPV4, self.PERMIT, + self.PORTS_ALL, 0)) + rules.append(self.create_rule(self.IPV6, self.PERMIT, + self.PORTS_ALL, 0)) + + # Apply rules + self.apply_rules(rules, "deny ip4/ip6 udp") + + # Traffic should not pass + self.run_verify_negat_test(self.IP, self.IPRANDOM, + self.proto[self.IP][self.UDP]) + + self.logger.info("ACLP_TEST_FINISH_0113") + + if __name__ == '__main__': unittest.main(testRunner=VppTestRunner) -- cgit 1.2.3-korg From a546ef96a8170aeea70d771ee45662cadc628344 Mon Sep 17 00:00:00 2001 From: Andrew Yourtchenko Date: Thu, 7 Sep 2017 13:49:07 +0200 Subject: acl-plugin: add hitcount to applied hash-acl entries Add a counter incremented upon the ACL check, so it is easier to see which kind of traffic is being checked by the policy, add the corresponding output to the debug CLI "show acl-plugin tables" command. Change-Id: Id811dddf204e63eeceabfcc509e3e9c5aae1dbc8 Signed-off-by: Andrew Yourtchenko --- src/plugins/acl/acl.c | 8 ++++---- src/plugins/acl/hash_lookup.c | 2 ++ src/plugins/acl/hash_lookup_types.h | 4 ++++ 3 files changed, 10 insertions(+), 4 deletions(-) (limited to 'src/plugins/acl/hash_lookup.c') diff --git a/src/plugins/acl/acl.c b/src/plugins/acl/acl.c index 7790b30b..1ded1afa 100644 --- a/src/plugins/acl/acl.c +++ b/src/plugins/acl/acl.c @@ -2460,9 +2460,9 @@ acl_show_aclplugin_fn (vlib_main_t * vm, out0 = format(out0, " input lookup applied entries:\n"); for(j=0; jinput_hash_entry_vec_by_sw_if_index[swi]); j++) { applied_hash_ace_entry_t *pae = &am->input_hash_entry_vec_by_sw_if_index[swi][j]; - out0 = format(out0, " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d tail %d\n", + out0 = format(out0, " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d tail %d hitcount %lld\n", j, pae->acl_index, pae->ace_index, pae->action, pae->hash_ace_info_index, - pae->next_applied_entry_index, pae->prev_applied_entry_index, pae->tail_applied_entry_index); + pae->next_applied_entry_index, pae->prev_applied_entry_index, pae->tail_applied_entry_index, pae->hitcount); } } @@ -2475,9 +2475,9 @@ acl_show_aclplugin_fn (vlib_main_t * vm, out0 = format(out0, " output lookup applied entries:\n"); for(j=0; joutput_hash_entry_vec_by_sw_if_index[swi]); j++) { applied_hash_ace_entry_t *pae = &am->output_hash_entry_vec_by_sw_if_index[swi][j]; - out0 = format(out0, " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d tail %d\n", + out0 = format(out0, " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d tail %d hitcount %lld\n", j, pae->acl_index, pae->ace_index, pae->action, pae->hash_ace_info_index, - pae->next_applied_entry_index, pae->prev_applied_entry_index, pae->tail_applied_entry_index); + pae->next_applied_entry_index, pae->prev_applied_entry_index, pae->tail_applied_entry_index, pae->hitcount); } } diff --git a/src/plugins/acl/hash_lookup.c b/src/plugins/acl/hash_lookup.c index 5dbc3589..13bc6b46 100644 --- a/src/plugins/acl/hash_lookup.c +++ b/src/plugins/acl/hash_lookup.c @@ -371,6 +371,7 @@ hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) pae->acl_index = acl_index; pae->ace_index = ha->rules[i].ace_index; pae->action = ha->rules[i].action; + pae->hitcount = 0; pae->hash_ace_info_index = i; /* we might link it in later */ pae->next_applied_entry_index = ~0; @@ -876,6 +877,7 @@ hash_multi_acl_match_5tuple (u32 sw_if_index, fa_5tuple_t * pkt_5tuple, int is_l u32 match_index = multi_acl_match_get_applied_ace_index(am, pkt_5tuple); if (match_index < vec_len((*applied_hash_aces))) { applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), match_index); + pae->hitcount++; *acl_match_p = pae->acl_index; *rule_match_p = pae->ace_index; return pae->action; diff --git a/src/plugins/acl/hash_lookup_types.h b/src/plugins/acl/hash_lookup_types.h index f7110007..1fa197ec 100644 --- a/src/plugins/acl/hash_lookup_types.h +++ b/src/plugins/acl/hash_lookup_types.h @@ -64,6 +64,10 @@ typedef struct { * chain tail, if this is the first entry */ u32 tail_applied_entry_index; + /* + * number of hits on this entry + */ + u64 hitcount; /* * Action of this applied ACE */ -- cgit 1.2.3-korg From cada5a92faaf1dd2887df5ca200195011d2a0b8d Mon Sep 17 00:00:00 2001 From: Andrew Yourtchenko Date: Mon, 11 Sep 2017 17:22:03 +0200 Subject: acl-plugin: add startup-config section "acl-plugin" and heap/hash parameters This adds the ability to tweak the memory allocation parameters of the ACL plugin from the startup config. It may be useful in the cases involving higher limit of the connections than the default 1M, or the high number of cores. Change-Id: I2b6fb3f61126ff3ee998424b762b6aefe8fb1b8e Signed-off-by: Andrew Yourtchenko --- src/plugins/acl/acl.c | 50 +++++++++++++++++++++++++++++++++++++++++-- src/plugins/acl/acl.h | 10 +++++++++ src/plugins/acl/hash_lookup.c | 4 ++-- 3 files changed, 60 insertions(+), 4 deletions(-) (limited to 'src/plugins/acl/hash_lookup.c') diff --git a/src/plugins/acl/acl.c b/src/plugins/acl/acl.c index 611efbb7..cdfe0682 100644 --- a/src/plugins/acl/acl.c +++ b/src/plugins/acl/acl.c @@ -91,7 +91,7 @@ static void * acl_set_heap(acl_main_t *am) { if (0 == am->acl_mheap) { - am->acl_mheap = mheap_alloc (0 /* use VM */ , 2 << 29); + am->acl_mheap = mheap_alloc (0 /* use VM */ , am->acl_mheap_size); mheap_t *h = mheap_header (am->acl_mheap); h->flags |= MHEAP_FLAG_THREAD_SAFE; } @@ -2596,7 +2596,47 @@ VLIB_CLI_COMMAND (aclplugin_clear_command, static) = { }; /* *INDENT-ON* */ - +static clib_error_t * +acl_plugin_config (vlib_main_t * vm, unformat_input_t * input) +{ + acl_main_t *am = &acl_main; + u32 conn_table_hash_buckets; + u32 conn_table_hash_memory_size; + u32 conn_table_max_entries; + u32 main_heap_size; + u32 hash_heap_size; + u32 hash_lookup_hash_buckets; + u32 hash_lookup_hash_memory; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "connection hash buckets %d", &conn_table_hash_buckets)) + am->fa_conn_table_hash_num_buckets = conn_table_hash_buckets; + else if (unformat (input, "connection hash memory %d", + &conn_table_hash_memory_size)) + am->fa_conn_table_hash_memory_size = conn_table_hash_memory_size; + else if (unformat (input, "connection count max %d", + &conn_table_max_entries)) + am->fa_conn_table_max_entries = conn_table_max_entries; + else if (unformat (input, "main heap size %d", + &main_heap_size)) + am->acl_mheap_size = main_heap_size; + else if (unformat (input, "hash lookup heap size %d", + &hash_heap_size)) + am->hash_lookup_mheap_size = hash_heap_size; + else if (unformat (input, "hash lookup hash buckets %d", + &hash_lookup_hash_buckets)) + am->hash_lookup_hash_buckets = hash_lookup_hash_buckets; + else if (unformat (input, "hash lookup hash memory %d", + &hash_lookup_hash_memory)) + am->hash_lookup_hash_memory = hash_lookup_hash_memory; + else + return clib_error_return (0, "unknown input '%U'", + format_unformat_error, input); + } + return 0; +} +VLIB_CONFIG_FUNCTION (acl_plugin_config, "acl-plugin"); static clib_error_t * acl_init (vlib_main_t * vm) @@ -2622,6 +2662,12 @@ acl_init (vlib_main_t * vm) acl_setup_fa_nodes(); + am->acl_mheap_size = ACL_FA_DEFAULT_HEAP_SIZE; + am->hash_lookup_mheap_size = ACL_PLUGIN_HASH_LOOKUP_HEAP_SIZE; + + am->hash_lookup_hash_buckets = ACL_PLUGIN_HASH_LOOKUP_HASH_BUCKETS; + am->hash_lookup_hash_memory = ACL_PLUGIN_HASH_LOOKUP_HASH_MEMORY; + am->session_timeout_sec[ACL_TIMEOUT_TCP_TRANSIENT] = TCP_SESSION_TRANSIENT_TIMEOUT_SEC; am->session_timeout_sec[ACL_TIMEOUT_TCP_IDLE] = TCP_SESSION_IDLE_TIMEOUT_SEC; am->session_timeout_sec[ACL_TIMEOUT_UDP_IDLE] = UDP_SESSION_IDLE_TIMEOUT_SEC; diff --git a/src/plugins/acl/acl.h b/src/plugins/acl/acl.h index 26084a66..bed22e5f 100644 --- a/src/plugins/acl/acl.h +++ b/src/plugins/acl/acl.h @@ -37,6 +37,12 @@ #define TCP_SESSION_IDLE_TIMEOUT_SEC (3600*24) #define TCP_SESSION_TRANSIENT_TIMEOUT_SEC 120 +#define ACL_FA_DEFAULT_HEAP_SIZE (2 << 29) + +#define ACL_PLUGIN_HASH_LOOKUP_HEAP_SIZE (2 << 25) +#define ACL_PLUGIN_HASH_LOOKUP_HASH_BUCKETS 65536 +#define ACL_PLUGIN_HASH_LOOKUP_HASH_MEMORY (2 << 25) + extern vlib_node_registration_t acl_in_node; extern vlib_node_registration_t acl_out_node; @@ -125,6 +131,7 @@ typedef struct typedef struct { /* mheap to hold all the ACL module related allocations, other than hash */ void *acl_mheap; + u32 acl_mheap_size; /* API message ID base */ u16 msg_id_base; @@ -132,9 +139,12 @@ typedef struct { acl_list_t *acls; /* Pool of ACLs */ hash_acl_info_t *hash_acl_infos; /* corresponding hash matching housekeeping info */ clib_bihash_48_8_t acl_lookup_hash; /* ACL lookup hash table. */ + u32 hash_lookup_hash_buckets; + u32 hash_lookup_hash_memory; /* mheap to hold all the miscellaneous allocations related to hash-based lookups */ void *hash_lookup_mheap; + u32 hash_lookup_mheap_size; int acl_lookup_hash_initialized; applied_hash_ace_entry_t **input_hash_entry_vec_by_sw_if_index; applied_hash_ace_entry_t **output_hash_entry_vec_by_sw_if_index; diff --git a/src/plugins/acl/hash_lookup.c b/src/plugins/acl/hash_lookup.c index 13bc6b46..7869027b 100644 --- a/src/plugins/acl/hash_lookup.c +++ b/src/plugins/acl/hash_lookup.c @@ -264,7 +264,7 @@ static void * hash_acl_set_heap(acl_main_t *am) { if (0 == am->hash_lookup_mheap) { - am->hash_lookup_mheap = mheap_alloc (0 /* use VM */ , 2 << 25); + am->hash_lookup_mheap = mheap_alloc (0 /* use VM */ , am->hash_lookup_mheap_size); mheap_t *h = mheap_header (am->hash_lookup_mheap); h->flags |= MHEAP_FLAG_THREAD_SAFE; } @@ -307,7 +307,7 @@ hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index) DBG0("HASH ACL apply: sw_if_index %d is_input %d acl %d", sw_if_index, is_input, acl_index); if (!am->acl_lookup_hash_initialized) { BV (clib_bihash_init) (&am->acl_lookup_hash, "ACL plugin rule lookup bihash", - 65536, 2 << 25); + am->hash_lookup_hash_buckets, am->hash_lookup_hash_memory); am->acl_lookup_hash_initialized = 1; } -- cgit 1.2.3-korg