aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/plugins/acl/acl.h1
-rw-r--r--src/plugins/acl/hash_lookup.c151
2 files changed, 91 insertions, 61 deletions
diff --git a/src/plugins/acl/acl.h b/src/plugins/acl/acl.h
index b994b8506b7..d030681ac3a 100644
--- a/src/plugins/acl/acl.h
+++ b/src/plugins/acl/acl.h
@@ -122,6 +122,7 @@ typedef struct
CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
fa_5tuple_t mask;
u32 refcount;
+ u8 from_tm;
} ace_mask_type_entry_t;
typedef struct {
diff --git a/src/plugins/acl/hash_lookup.c b/src/plugins/acl/hash_lookup.c
index 6b14612b49d..0c51ab4fc19 100644
--- a/src/plugins/acl/hash_lookup.c
+++ b/src/plugins/acl/hash_lookup.c
@@ -258,13 +258,75 @@ relax_tuple(fa_5tuple_t *mask, int is_ip6, int relax2){
DBG( "TM-relaxing-end");
}
+static u32
+find_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
+{
+ ace_mask_type_entry_t *mte;
+ /* *INDENT-OFF* */
+ pool_foreach(mte, am->ace_mask_type_pool,
+ ({
+ if(memcmp(&mte->mask, mask, sizeof(*mask)) == 0)
+ return (mte - am->ace_mask_type_pool);
+ }));
+ /* *INDENT-ON* */
+ return ~0;
+}
+
+static u32
+assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
+{
+ u32 mask_type_index = find_mask_type_index(am, mask);
+ ace_mask_type_entry_t *mte;
+ if(~0 == mask_type_index) {
+ pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES);
+ mask_type_index = mte - am->ace_mask_type_pool;
+ clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
+ mte->refcount = 0;
+
+ /*
+ * We can use only 16 bits, since in the match there is only u16 field.
+ * Realistically, once you go to 64K of mask types, it is a huge
+ * problem anyway, so we might as well stop half way.
+ */
+ ASSERT(mask_type_index < 32768);
+ }
+ mte = am->ace_mask_type_pool + mask_type_index;
+ mte->refcount++;
+ DBG0("ASSIGN MTE index %d new refcount %d", mask_type_index, mte->refcount);
+ return mask_type_index;
+}
+
+static void
+lock_mask_type_index(acl_main_t *am, u32 mask_type_index)
+{
+ DBG0("LOCK MTE index %d", mask_type_index);
+ ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
+ mte->refcount++;
+ DBG0("LOCK MTE index %d new refcount %d", mask_type_index, mte->refcount);
+}
+
+
+static void
+release_mask_type_index(acl_main_t *am, u32 mask_type_index)
+{
+ DBG0("RELEAS MTE index %d", mask_type_index);
+ ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
+ mte->refcount--;
+ DBG0("RELEAS MTE index %d new refcount %d", mask_type_index, mte->refcount);
+ if (mte->refcount == 0) {
+ /* we are not using this entry anymore */
+ memset(mte, 0xae, sizeof(*mte));
+ pool_put(am->ace_mask_type_pool, mte);
+ }
+}
+
static u32
tm_assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask, int is_ip6, u32 lc_index)
{
u32 mask_type_index = ~0;
u32 for_mask_type_index = ~0;
- ace_mask_type_entry_t *mte;
+ ace_mask_type_entry_t *mte = 0;
int order_index;
/* look for existing mask comparable with the one in input */
@@ -278,6 +340,7 @@ tm_assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask, int is_ip6, u32 lc_
mte = vec_elt_at_index(am->ace_mask_type_pool, for_mask_type_index);
if(first_mask_contains_second_mask(is_ip6, &mte->mask, mask)){
mask_type_index = (mte - am->ace_mask_type_pool);
+ lock_mask_type_index(am, mask_type_index);
break;
}
}
@@ -286,8 +349,9 @@ tm_assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask, int is_ip6, u32 lc_
if(~0 == mask_type_index) {
/* if no mask is found, then let's use a relaxed version of the original one, in order to be used by new ace_entries */
DBG( "TM-assigning mask type index-new one");
- pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES);
- mask_type_index = mte - am->ace_mask_type_pool;
+ fa_5tuple_t relaxed_mask = *mask;
+ relax_tuple(&relaxed_mask, is_ip6, 0);
+ mask_type_index = assign_mask_type_index(am, &relaxed_mask);
hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
@@ -299,10 +363,6 @@ tm_assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask, int is_ip6, u32 lc_
minfo->max_collisions = 0;
minfo->first_rule_index = ~0;
- clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
- relax_tuple(&mte->mask, is_ip6, 0);
-
- mte->refcount = 0;
/*
* We can use only 16 bits, since in the match there is only u16 field.
* Realistically, once you go to 64K of mask types, it is a huge
@@ -311,7 +371,7 @@ tm_assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask, int is_ip6, u32 lc_
ASSERT(mask_type_index < 32768);
}
mte = am->ace_mask_type_pool + mask_type_index;
- mte->refcount++;
+ DBG0("TM-ASSIGN MTE index %d new refcount %d", mask_type_index, mte->refcount);
return mask_type_index;
}
@@ -361,58 +421,12 @@ add_del_hashtable_entry(acl_main_t *am,
}
-static u32
-find_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
-{
- ace_mask_type_entry_t *mte;
- /* *INDENT-OFF* */
- pool_foreach(mte, am->ace_mask_type_pool,
- ({
- if(memcmp(&mte->mask, mask, sizeof(*mask)) == 0)
- return (mte - am->ace_mask_type_pool);
- }));
- /* *INDENT-ON* */
- return ~0;
-}
-
-static u32
-assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
-{
- u32 mask_type_index = find_mask_type_index(am, mask);
- ace_mask_type_entry_t *mte;
- if(~0 == mask_type_index) {
- pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES);
- mask_type_index = mte - am->ace_mask_type_pool;
- clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
- mte->refcount = 0;
- /*
- * We can use only 16 bits, since in the match there is only u16 field.
- * Realistically, once you go to 64K of mask types, it is a huge
- * problem anyway, so we might as well stop half way.
- */
- ASSERT(mask_type_index < 32768);
- }
- mte = am->ace_mask_type_pool + mask_type_index;
- mte->refcount++;
- return mask_type_index;
-}
-
-static void
-release_mask_type_index(acl_main_t *am, u32 mask_type_index)
-{
- ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
- mte->refcount--;
- if (mte->refcount == 0) {
- /* we are not using this entry anymore */
- pool_put(am->ace_mask_type_pool, mte);
- }
-}
-
static void
remake_hash_applied_mask_info_vec (acl_main_t * am,
applied_hash_ace_entry_t **
applied_hash_aces, u32 lc_index)
{
+ DBG0("remake applied hash mask info lc_index %d", lc_index);
hash_applied_mask_info_t *new_hash_applied_mask_info_vec =
vec_new (hash_applied_mask_info_t, 0);
@@ -438,6 +452,7 @@ remake_hash_applied_mask_info_vec (acl_main_t * am,
minfo = vec_elt_at_index ((new_hash_applied_mask_info_vec), search);
if (search == new_pointer)
{
+ DBG0("remaking index %d", search);
minfo->mask_type_index = pae->mask_type_index;
minfo->num_entries = 0;
minfo->max_collisions = 0;
@@ -852,6 +867,10 @@ deactivate_applied_ace_hash_entry(acl_main_t *am,
{
applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
DBG("UNAPPLY DEACTIVATE: lc_index %d applied index %d", lc_index, old_index);
+ if (ACL_HASH_LOOKUP_DEBUG > 0) {
+ clib_warning("Deactivating pae at index %d", old_index);
+ acl_plugin_print_pae(am->vlib_main, old_index, pae);
+ }
if (pae->prev_applied_entry_index != ~0) {
DBG("UNAPPLY = index %d has prev_applied_entry_index %d", old_index, pae->prev_applied_entry_index);
@@ -893,7 +912,7 @@ deactivate_applied_ace_hash_entry(acl_main_t *am,
applied_hash_aces, old_index, 0);
}
}
-
+ DBG0("Releasing mask type index %d for pae index %d on lc_index %d", pae->mask_type_index, old_index, lc_index);
release_mask_type_index(am, pae->mask_type_index);
/* invalidate the old entry */
pae->mask_type_index = ~0;
@@ -917,6 +936,13 @@ hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
+ if (ACL_HASH_LOOKUP_DEBUG > 0) {
+ clib_warning("unapplying acl %d", acl_index);
+ acl_plugin_show_tables_mask_type();
+ acl_plugin_show_tables_acl_hash_info(acl_index);
+ acl_plugin_show_tables_applied_info(lc_index);
+ }
+
/* remove this acl# from the list of applied hash acls */
u32 index = vec_search(pal->applied_acls, acl_index);
if (index == ~0) {
@@ -961,7 +987,7 @@ hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
for(i=0; i < tail_len; i ++) {
/* move the entry at tail offset to base offset */
/* that is, from (tail_offset+i) -> (base_offset+i) */
- DBG("UNAPPLY MOVE: lc_index %d, applied index %d -> %d", lc_index, tail_offset+i, base_offset + i);
+ DBG0("UNAPPLY MOVE: lc_index %d, applied index %d -> %d", lc_index, tail_offset+i, base_offset + i);
move_applied_ace_hash_entry(am, lc_index, applied_hash_aces, tail_offset + i, base_offset + i);
}
/* trim the end of the vector */
@@ -1273,9 +1299,9 @@ static void
acl_plugin_print_pae (vlib_main_t * vm, int j, applied_hash_ace_entry_t * pae)
{
vlib_cli_output (vm,
- " %4d: acl %d rule %d action %d bitmask-ready rule %d colliding_rules: %d next %d prev %d tail %d hitcount %lld acl_pos: %d",
+ " %4d: acl %d rule %d action %d bitmask-ready rule %d mask type index: %d colliding_rules: %d next %d prev %d tail %d hitcount %lld acl_pos: %d",
j, pae->acl_index, pae->ace_index, pae->action,
- pae->hash_ace_info_index, vec_len(pae->colliding_rules), pae->next_applied_entry_index,
+ pae->hash_ace_info_index, pae->mask_type_index, vec_len(pae->colliding_rules), pae->next_applied_entry_index,
pae->prev_applied_entry_index,
pae->tail_applied_entry_index, pae->hitcount, pae->acl_position);
int jj;
@@ -1451,7 +1477,6 @@ split_partition(acl_main_t *am, u32 first_index,
int i=0;
u64 collisions = vec_len(pae->colliding_rules);
-// while(pae->next_applied_entry_index == ~0){
for(i=0; i<collisions; i++){
DBG( "TM-collision: base_ace:%d (ace_mask:%d, first_collision_mask:%d)",
@@ -1502,7 +1527,7 @@ split_partition(acl_main_t *am, u32 first_index,
max_tuple->pkt.as_u64 = mask->pkt.as_u64;
}
- pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
+ pae = pae->next_applied_entry_index == ~0 ? 0 : vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
}
/* Computing field with max difference between (min/max)_mask */
@@ -1627,6 +1652,7 @@ split_partition(acl_main_t *am, u32 first_index,
/* populate new partition */
DBG( "TM-Populate new partition");
u32 r_ace_index = first_index;
+ int repopulate_count = 0;
// for(i=0; i<collisions; i++){
for(r_ace_index=0; r_ace_index < vec_len((*applied_hash_aces)); r_ace_index++) {
@@ -1652,6 +1678,9 @@ split_partition(acl_main_t *am, u32 first_index,
/* insert the new entry */
pop_pae->mask_type_index = new_mask_type_index;
+ /* The very first repopulation gets the lock by virtue of a new mask being created above */
+ if (++repopulate_count > 1)
+ lock_mask_type_index(am, new_mask_type_index);
activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, r_ace_index);