summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorAndrew Yourtchenko <ayourtch@gmail.com>2018-08-29 17:23:03 +0200
committerNeale Ranns <nranns@cisco.com>2018-09-05 10:36:46 +0000
commit3328e9011463855b5b2d52b849cff3114d264b68 (patch)
tree03e36aacc07b8af7a4246af90e01dcb772ae7f8c /src
parent5d28c7afbc0abd172d0053768b2ebe37b7a6c348 (diff)
acl-plugin: VPP-1400: VPP may crash when performing ACL modifications on applied ACLs
The partition_split() did not increment the refcount when using a mask type index, thus subsequent modifications potentially resulted in double frees and in the best case immediate crash, in the worst case delayed crash in another place. Introduce the lock_mask_type_index() and call it, move the mask type index related functions closer to the top of the file. Make the assignment of the new mask type indices for the tuplemerge case to use the assign_mask_type_index(). Keep some debugs in case we need to investigate this further at some point. Change-Id: Iae370f5cd92e1fe1442480db34656a8a3442dbc0 Signed-off-by: Andrew Yourtchenko <ayourtch@gmail.com> (cherry picked from commit 1edc406da3d4f6e63de2f278360b5753f55c00df)
Diffstat (limited to 'src')
-rw-r--r--src/plugins/acl/acl.h1
-rw-r--r--src/plugins/acl/hash_lookup.c151
2 files changed, 91 insertions, 61 deletions
diff --git a/src/plugins/acl/acl.h b/src/plugins/acl/acl.h
index b994b8506b7..d030681ac3a 100644
--- a/src/plugins/acl/acl.h
+++ b/src/plugins/acl/acl.h
@@ -122,6 +122,7 @@ typedef struct
CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
fa_5tuple_t mask;
u32 refcount;
+ u8 from_tm;
} ace_mask_type_entry_t;
typedef struct {
diff --git a/src/plugins/acl/hash_lookup.c b/src/plugins/acl/hash_lookup.c
index f16f7b2fd59..82df45ea304 100644
--- a/src/plugins/acl/hash_lookup.c
+++ b/src/plugins/acl/hash_lookup.c
@@ -258,13 +258,75 @@ relax_tuple(fa_5tuple_t *mask, int is_ip6, int relax2){
DBG( "TM-relaxing-end");
}
+static u32
+find_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
+{
+ ace_mask_type_entry_t *mte;
+ /* *INDENT-OFF* */
+ pool_foreach(mte, am->ace_mask_type_pool,
+ ({
+ if(memcmp(&mte->mask, mask, sizeof(*mask)) == 0)
+ return (mte - am->ace_mask_type_pool);
+ }));
+ /* *INDENT-ON* */
+ return ~0;
+}
+
+static u32
+assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
+{
+ u32 mask_type_index = find_mask_type_index(am, mask);
+ ace_mask_type_entry_t *mte;
+ if(~0 == mask_type_index) {
+ pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES);
+ mask_type_index = mte - am->ace_mask_type_pool;
+ clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
+ mte->refcount = 0;
+
+ /*
+ * We can use only 16 bits, since in the match there is only u16 field.
+ * Realistically, once you go to 64K of mask types, it is a huge
+ * problem anyway, so we might as well stop half way.
+ */
+ ASSERT(mask_type_index < 32768);
+ }
+ mte = am->ace_mask_type_pool + mask_type_index;
+ mte->refcount++;
+ DBG0("ASSIGN MTE index %d new refcount %d", mask_type_index, mte->refcount);
+ return mask_type_index;
+}
+
+static void
+lock_mask_type_index(acl_main_t *am, u32 mask_type_index)
+{
+ DBG0("LOCK MTE index %d", mask_type_index);
+ ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
+ mte->refcount++;
+ DBG0("LOCK MTE index %d new refcount %d", mask_type_index, mte->refcount);
+}
+
+
+static void
+release_mask_type_index(acl_main_t *am, u32 mask_type_index)
+{
+ DBG0("RELEAS MTE index %d", mask_type_index);
+ ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
+ mte->refcount--;
+ DBG0("RELEAS MTE index %d new refcount %d", mask_type_index, mte->refcount);
+ if (mte->refcount == 0) {
+ /* we are not using this entry anymore */
+ memset(mte, 0xae, sizeof(*mte));
+ pool_put(am->ace_mask_type_pool, mte);
+ }
+}
+
static u32
tm_assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask, int is_ip6, u32 lc_index)
{
u32 mask_type_index = ~0;
u32 for_mask_type_index = ~0;
- ace_mask_type_entry_t *mte;
+ ace_mask_type_entry_t *mte = 0;
int order_index;
/* look for existing mask comparable with the one in input */
@@ -278,6 +340,7 @@ tm_assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask, int is_ip6, u32 lc_
mte = vec_elt_at_index(am->ace_mask_type_pool, for_mask_type_index);
if(first_mask_contains_second_mask(is_ip6, &mte->mask, mask)){
mask_type_index = (mte - am->ace_mask_type_pool);
+ lock_mask_type_index(am, mask_type_index);
break;
}
}
@@ -286,8 +349,9 @@ tm_assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask, int is_ip6, u32 lc_
if(~0 == mask_type_index) {
/* if no mask is found, then let's use a relaxed version of the original one, in order to be used by new ace_entries */
DBG( "TM-assigning mask type index-new one");
- pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES);
- mask_type_index = mte - am->ace_mask_type_pool;
+ fa_5tuple_t relaxed_mask = *mask;
+ relax_tuple(&relaxed_mask, is_ip6, 0);
+ mask_type_index = assign_mask_type_index(am, &relaxed_mask);
hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
@@ -299,10 +363,6 @@ tm_assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask, int is_ip6, u32 lc_
minfo->max_collisions = 0;
minfo->first_rule_index = ~0;
- clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
- relax_tuple(&mte->mask, is_ip6, 0);
-
- mte->refcount = 0;
/*
* We can use only 16 bits, since in the match there is only u16 field.
* Realistically, once you go to 64K of mask types, it is a huge
@@ -311,7 +371,7 @@ tm_assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask, int is_ip6, u32 lc_
ASSERT(mask_type_index < 32768);
}
mte = am->ace_mask_type_pool + mask_type_index;
- mte->refcount++;
+ DBG0("TM-ASSIGN MTE index %d new refcount %d", mask_type_index, mte->refcount);
return mask_type_index;
}
@@ -361,58 +421,12 @@ add_del_hashtable_entry(acl_main_t *am,
}
-static u32
-find_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
-{
- ace_mask_type_entry_t *mte;
- /* *INDENT-OFF* */
- pool_foreach(mte, am->ace_mask_type_pool,
- ({
- if(memcmp(&mte->mask, mask, sizeof(*mask)) == 0)
- return (mte - am->ace_mask_type_pool);
- }));
- /* *INDENT-ON* */
- return ~0;
-}
-
-static u32
-assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
-{
- u32 mask_type_index = find_mask_type_index(am, mask);
- ace_mask_type_entry_t *mte;
- if(~0 == mask_type_index) {
- pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES);
- mask_type_index = mte - am->ace_mask_type_pool;
- clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
- mte->refcount = 0;
- /*
- * We can use only 16 bits, since in the match there is only u16 field.
- * Realistically, once you go to 64K of mask types, it is a huge
- * problem anyway, so we might as well stop half way.
- */
- ASSERT(mask_type_index < 32768);
- }
- mte = am->ace_mask_type_pool + mask_type_index;
- mte->refcount++;
- return mask_type_index;
-}
-
-static void
-release_mask_type_index(acl_main_t *am, u32 mask_type_index)
-{
- ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
- mte->refcount--;
- if (mte->refcount == 0) {
- /* we are not using this entry anymore */
- pool_put(am->ace_mask_type_pool, mte);
- }
-}
-
static void
remake_hash_applied_mask_info_vec (acl_main_t * am,
applied_hash_ace_entry_t **
applied_hash_aces, u32 lc_index)
{
+ DBG0("remake applied hash mask info lc_index %d", lc_index);
hash_applied_mask_info_t *new_hash_applied_mask_info_vec =
vec_new (hash_applied_mask_info_t, 0);
@@ -438,6 +452,7 @@ remake_hash_applied_mask_info_vec (acl_main_t * am,
minfo = vec_elt_at_index ((new_hash_applied_mask_info_vec), search);
if (search == new_pointer)
{
+ DBG0("remaking index %d", search);
minfo->mask_type_index = pae->mask_type_index;
minfo->num_entries = 0;
minfo->max_collisions = 0;
@@ -857,6 +872,10 @@ deactivate_applied_ace_hash_entry(acl_main_t *am,
{
applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
DBG("UNAPPLY DEACTIVATE: lc_index %d applied index %d", lc_index, old_index);
+ if (ACL_HASH_LOOKUP_DEBUG > 0) {
+ clib_warning("Deactivating pae at index %d", old_index);
+ acl_plugin_print_pae(am->vlib_main, old_index, pae);
+ }
if (pae->prev_applied_entry_index != ~0) {
DBG("UNAPPLY = index %d has prev_applied_entry_index %d", old_index, pae->prev_applied_entry_index);
@@ -898,7 +917,7 @@ deactivate_applied_ace_hash_entry(acl_main_t *am,
applied_hash_aces, old_index, 0);
}
}
-
+ DBG0("Releasing mask type index %d for pae index %d on lc_index %d", pae->mask_type_index, old_index, lc_index);
release_mask_type_index(am, pae->mask_type_index);
/* invalidate the old entry */
pae->mask_type_index = ~0;
@@ -922,6 +941,13 @@ hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
+ if (ACL_HASH_LOOKUP_DEBUG > 0) {
+ clib_warning("unapplying acl %d", acl_index);
+ acl_plugin_show_tables_mask_type();
+ acl_plugin_show_tables_acl_hash_info(acl_index);
+ acl_plugin_show_tables_applied_info(lc_index);
+ }
+
/* remove this acl# from the list of applied hash acls */
u32 index = vec_search(pal->applied_acls, acl_index);
if (index == ~0) {
@@ -966,7 +992,7 @@ hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
for(i=0; i < tail_len; i ++) {
/* move the entry at tail offset to base offset */
/* that is, from (tail_offset+i) -> (base_offset+i) */
- DBG("UNAPPLY MOVE: lc_index %d, applied index %d -> %d", lc_index, tail_offset+i, base_offset + i);
+ DBG0("UNAPPLY MOVE: lc_index %d, applied index %d -> %d", lc_index, tail_offset+i, base_offset + i);
move_applied_ace_hash_entry(am, lc_index, applied_hash_aces, tail_offset + i, base_offset + i);
}
/* trim the end of the vector */
@@ -1278,9 +1304,9 @@ static void
acl_plugin_print_pae (vlib_main_t * vm, int j, applied_hash_ace_entry_t * pae)
{
vlib_cli_output (vm,
- " %4d: acl %d rule %d action %d bitmask-ready rule %d colliding_rules: %d next %d prev %d tail %d hitcount %lld acl_pos: %d",
+ " %4d: acl %d rule %d action %d bitmask-ready rule %d mask type index: %d colliding_rules: %d next %d prev %d tail %d hitcount %lld acl_pos: %d",
j, pae->acl_index, pae->ace_index, pae->action,
- pae->hash_ace_info_index, vec_len(pae->colliding_rules), pae->next_applied_entry_index,
+ pae->hash_ace_info_index, pae->mask_type_index, vec_len(pae->colliding_rules), pae->next_applied_entry_index,
pae->prev_applied_entry_index,
pae->tail_applied_entry_index, pae->hitcount, pae->acl_position);
int jj;
@@ -1456,7 +1482,6 @@ split_partition(acl_main_t *am, u32 first_index,
int i=0;
u64 collisions = vec_len(pae->colliding_rules);
-// while(pae->next_applied_entry_index == ~0){
for(i=0; i<collisions; i++){
DBG( "TM-collision: base_ace:%d (ace_mask:%d, first_collision_mask:%d)",
@@ -1507,7 +1532,7 @@ split_partition(acl_main_t *am, u32 first_index,
max_tuple->pkt.as_u64 = mask->pkt.as_u64;
}
- pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
+ pae = pae->next_applied_entry_index == ~0 ? 0 : vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
}
/* Computing field with max difference between (min/max)_mask */
@@ -1632,6 +1657,7 @@ split_partition(acl_main_t *am, u32 first_index,
/* populate new partition */
DBG( "TM-Populate new partition");
u32 r_ace_index = first_index;
+ int repopulate_count = 0;
// for(i=0; i<collisions; i++){
for(r_ace_index=0; r_ace_index < vec_len((*applied_hash_aces)); r_ace_index++) {
@@ -1657,6 +1683,9 @@ split_partition(acl_main_t *am, u32 first_index,
/* insert the new entry */
pop_pae->mask_type_index = new_mask_type_index;
+ /* The very first repopulation gets the lock by virtue of a new mask being created above */
+ if (++repopulate_count > 1)
+ lock_mask_type_index(am, new_mask_type_index);
activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, r_ace_index);