aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/plugins/acl/acl.h24
-rw-r--r--src/plugins/acl/hash_lookup.c388
-rw-r--r--src/plugins/acl/hash_lookup_private.h2
-rw-r--r--src/plugins/acl/hash_lookup_types.h47
-rw-r--r--src/plugins/acl/public_inlines.h207
-rw-r--r--src/plugins/acl/types.h40
6 files changed, 456 insertions, 252 deletions
diff --git a/src/plugins/acl/acl.h b/src/plugins/acl/acl.h
index f63771f4dac..733d785d886 100644
--- a/src/plugins/acl/acl.h
+++ b/src/plugins/acl/acl.h
@@ -28,6 +28,7 @@
#include <vppinfra/bihash_40_8.h>
#include <vppinfra/bihash_16_8.h>
+#include "types.h"
#include "fa_node.h"
#include "hash_lookup_types.h"
#include "lookup_context.h"
@@ -72,26 +73,6 @@ typedef struct
} addr;
} address_t;
-/*
- * ACL rules
- */
-typedef struct
-{
- u8 is_permit;
- u8 is_ipv6;
- ip46_address_t src;
- u8 src_prefixlen;
- ip46_address_t dst;
- u8 dst_prefixlen;
- u8 proto;
- u16 src_port_or_type_first;
- u16 src_port_or_type_last;
- u16 dst_port_or_code_first;
- u16 dst_port_or_code_last;
- u8 tcp_flags_value;
- u8 tcp_flags_mask;
-} acl_rule_t;
-
typedef struct
{
u8 is_permit;
@@ -216,6 +197,9 @@ typedef struct {
/* a pool of all mask types present in all ACEs */
ace_mask_type_entry_t *ace_mask_type_pool;
+ /* vec of vectors of all info of all mask types present in ACEs contained in each lc_index */
+ hash_applied_mask_info_t **hash_applied_mask_info_vec_by_lc_index;
+
/*
* Classify tables used to grab the packets for the ACL check,
* and serving as the 5-tuple session tables at the same time
diff --git a/src/plugins/acl/hash_lookup.c b/src/plugins/acl/hash_lookup.c
index 935ef2680ab..8c884dc997d 100644
--- a/src/plugins/acl/hash_lookup.c
+++ b/src/plugins/acl/hash_lookup.c
@@ -64,15 +64,25 @@ fill_applied_hash_ace_kv(acl_main_t *am,
applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
- memcpy(kv_key, &(vec_elt_at_index(ha->rules, pae->hash_ace_info_index)->match), sizeof(*kv_key));
- /* initialize the sw_if_index and direction */
+ /* apply the mask to ace key */
+ hash_ace_info_t *ace_info = vec_elt_at_index(ha->rules, pae->hash_ace_info_index);
+ ace_mask_type_entry_t *mte = vec_elt_at_index(am->ace_mask_type_pool, pae->mask_type_index);
+
+ u64 *pmatch = (u64 *) &ace_info->match;
+ u64 *pmask = (u64 *)&mte->mask;
+ u64 *pkey = (u64 *)kv->key;
+
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+
+ kv_key->pkt.mask_type_index_lsb = pae->mask_type_index;
kv_key->pkt.lc_index = lc_index;
kv_val->as_u64 = 0;
kv_val->applied_entry_index = new_index;
- kv_val->need_portrange_check = vec_elt_at_index(ha->rules, pae->hash_ace_info_index)->src_portrange_not_powerof2 ||
- vec_elt_at_index(ha->rules, pae->hash_ace_info_index)->dst_portrange_not_powerof2;
- /* by default assume all values are shadowed -> check all mask types */
- kv_val->shadowed = 1;
}
static void
@@ -88,6 +98,148 @@ add_del_hashtable_entry(acl_main_t *am,
}
+static u32
+find_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
+{
+ ace_mask_type_entry_t *mte;
+ /* *INDENT-OFF* */
+ pool_foreach(mte, am->ace_mask_type_pool,
+ ({
+ if(memcmp(&mte->mask, mask, sizeof(*mask)) == 0)
+ return (mte - am->ace_mask_type_pool);
+ }));
+ /* *INDENT-ON* */
+ return ~0;
+}
+
+static u32
+assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
+{
+ u32 mask_type_index = find_mask_type_index(am, mask);
+ ace_mask_type_entry_t *mte;
+ if(~0 == mask_type_index) {
+ pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES);
+ mask_type_index = mte - am->ace_mask_type_pool;
+ clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
+ mte->refcount = 0;
+ /*
+ * We can use only 16 bits, since in the match there is only u16 field.
+ * Realistically, once you go to 64K of mask types, it is a huge
+ * problem anyway, so we might as well stop half way.
+ */
+ ASSERT(mask_type_index < 32768);
+ }
+ mte = am->ace_mask_type_pool + mask_type_index;
+ mte->refcount++;
+ return mask_type_index;
+}
+
+static void
+release_mask_type_index(acl_main_t *am, u32 mask_type_index)
+{
+ ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
+ mte->refcount--;
+ if (mte->refcount == 0) {
+ /* we are not using this entry anymore */
+ pool_put(am->ace_mask_type_pool, mte);
+ }
+}
+
+static void
+remake_hash_applied_mask_info_vec (acl_main_t * am,
+ applied_hash_ace_entry_t **
+ applied_hash_aces, u32 lc_index)
+{
+ hash_applied_mask_info_t *new_hash_applied_mask_info_vec =
+ vec_new (hash_applied_mask_info_t, 0);
+
+ hash_applied_mask_info_t *minfo;
+ int i;
+ for (i = 0; i < vec_len ((*applied_hash_aces)); i++)
+ {
+ applied_hash_ace_entry_t *pae =
+ vec_elt_at_index ((*applied_hash_aces), i);
+
+ /* check if mask_type_index is already there */
+ u32 new_pointer = vec_len (new_hash_applied_mask_info_vec);
+ int search;
+ for (search = 0; search < vec_len (new_hash_applied_mask_info_vec);
+ search++)
+ {
+ minfo = vec_elt_at_index (new_hash_applied_mask_info_vec, search);
+ if (minfo->mask_type_index == pae->mask_type_index)
+ break;
+ }
+
+ vec_validate ((new_hash_applied_mask_info_vec), search);
+ minfo = vec_elt_at_index ((new_hash_applied_mask_info_vec), search);
+ if (search == new_pointer)
+ {
+ minfo->mask_type_index = pae->mask_type_index;
+ minfo->num_entries = 0;
+ minfo->max_collisions = 0;
+ minfo->first_rule_index = ~0;
+ }
+
+ minfo->num_entries = minfo->num_entries + 1;
+
+ if (vec_len (pae->colliding_rules) > minfo->max_collisions)
+ minfo->max_collisions = vec_len (pae->colliding_rules);
+
+ if (minfo->first_rule_index > i)
+ minfo->first_rule_index = i;
+ }
+
+ hash_applied_mask_info_t **hash_applied_mask_info_vec =
+ vec_elt_at_index (am->hash_applied_mask_info_vec_by_lc_index, lc_index);
+
+ vec_free ((*hash_applied_mask_info_vec));
+ (*hash_applied_mask_info_vec) = new_hash_applied_mask_info_vec;
+}
+
+static void
+vec_del_collision_rule (collision_match_rule_t ** pvec,
+ u32 applied_entry_index)
+{
+ u32 i;
+ for (i = 0; i < vec_len ((*pvec)); i++)
+ {
+ collision_match_rule_t *cr = vec_elt_at_index ((*pvec), i);
+ if (cr->applied_entry_index == applied_entry_index)
+ {
+ vec_del1 ((*pvec), i);
+ }
+ }
+}
+
+static void
+del_colliding_rule (applied_hash_ace_entry_t ** applied_hash_aces,
+ u32 head_index, u32 applied_entry_index)
+{
+ applied_hash_ace_entry_t *head_pae =
+ vec_elt_at_index ((*applied_hash_aces), head_index);
+ vec_del_collision_rule (&head_pae->colliding_rules, applied_entry_index);
+}
+
+static void
+add_colliding_rule (acl_main_t * am,
+ applied_hash_ace_entry_t ** applied_hash_aces,
+ u32 head_index, u32 applied_entry_index)
+{
+ applied_hash_ace_entry_t *head_pae =
+ vec_elt_at_index ((*applied_hash_aces), head_index);
+ applied_hash_ace_entry_t *pae =
+ vec_elt_at_index ((*applied_hash_aces), applied_entry_index);
+
+ collision_match_rule_t cr;
+
+ cr.acl_index = pae->acl_index;
+ cr.ace_index = pae->ace_index;
+ cr.acl_position = pae->acl_position;
+ cr.applied_entry_index = applied_entry_index;
+ cr.rule = am->acls[pae->acl_index].rules[pae->ace_index];
+ vec_add1 (head_pae->colliding_rules, cr);
+}
static void
activate_applied_ace_hash_entry(acl_main_t *am,
@@ -126,28 +278,16 @@ activate_applied_ace_hash_entry(acl_main_t *am,
pae->prev_applied_entry_index = last_index;
/* adjust the pointer to the new tail */
first_pae->tail_applied_entry_index = new_index;
+ add_colliding_rule(am, applied_hash_aces, first_index, new_index);
} else {
/* It's the very first entry */
hashtable_add_del(am, &kv, 1);
ASSERT(new_index != ~0);
pae->tail_applied_entry_index = new_index;
+ add_colliding_rule(am, applied_hash_aces, new_index, new_index);
}
}
-static void
-applied_hash_entries_analyze(acl_main_t *am, applied_hash_ace_entry_t **applied_hash_aces)
-{
- /*
- * Go over the rules and check which ones are shadowed and which aren't.
- * Naive approach: try to match the match value from every ACE as if it
- * was a live packet, and see if the resulting match happens earlier in the list.
- * if it does not match or it is later in the ACL - then the entry is not shadowed.
- *
- * This approach fails, an example:
- * deny tcp 2001:db8::/32 2001:db8::/32
- * permit ip 2001:db8::1/128 2001:db8::2/128
- */
-}
static void *
hash_acl_set_heap(acl_main_t *am)
@@ -193,6 +333,23 @@ acl_plugin_hash_acl_set_trace_heap(int on)
}
}
+static void
+assign_mask_type_index_to_pae(acl_main_t *am, applied_hash_ace_entry_t *pae)
+{
+ hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
+ hash_ace_info_t *ace_info = vec_elt_at_index(ha->rules, pae->hash_ace_info_index);
+
+ ace_mask_type_entry_t *mte;
+ fa_5tuple_t *mask;
+ /*
+ * Start taking base_mask associated to ace, and essentially copy it.
+ * With TupleMerge we will assign a relaxed mask here.
+ */
+ mte = vec_elt_at_index(am->ace_mask_type_pool, ace_info->base_mask_type_index);
+ mask = &mte->mask;
+ pae->mask_type_index = assign_mask_type_index(am, mask);
+}
+
void
hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position)
{
@@ -237,8 +394,6 @@ hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position)
}
vec_add1((*hash_acl_applied_lc_index), lc_index);
- pal->mask_type_index_bitmap = clib_bitmap_or(pal->mask_type_index_bitmap,
- ha->mask_type_index_bitmap);
/*
* if the applied ACL is empty, the current code will cause a
* different behavior compared to current linear search: an empty ACL will
@@ -252,6 +407,7 @@ hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position)
/* expand the applied aces vector by the necessary amount */
vec_resize((*applied_hash_aces), vec_len(ha->rules));
+ vec_validate(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
/* add the rules from the ACL to the hash table for lookup and append to the vector*/
for(i=0; i < vec_len(ha->rules); i++) {
u32 new_index = base_offset + i;
@@ -266,9 +422,12 @@ hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position)
pae->next_applied_entry_index = ~0;
pae->prev_applied_entry_index = ~0;
pae->tail_applied_entry_index = ~0;
+ pae->colliding_rules = NULL;
+ pae->mask_type_index = ~0;
+ assign_mask_type_index_to_pae(am, pae);
activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, new_index);
}
- applied_hash_entries_analyze(am, applied_hash_aces);
+ remake_hash_applied_mask_info_vec(am, applied_hash_aces, lc_index);
done:
clib_mem_set_heap (oldheap);
}
@@ -350,13 +509,14 @@ deactivate_applied_ace_hash_entry(acl_main_t *am,
applied_hash_ace_entry_t *prev_pae = vec_elt_at_index((*applied_hash_aces), pae->prev_applied_entry_index);
ASSERT(prev_pae->next_applied_entry_index == old_index);
prev_pae->next_applied_entry_index = pae->next_applied_entry_index;
+
+ u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
+ ASSERT(head_index != ~0);
+ applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
+ del_colliding_rule(applied_hash_aces, head_index, old_index);
+
if (pae->next_applied_entry_index == ~0) {
/* it was a last entry we removed, update the pointer on the first one */
- u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
- DBG("UNAPPLY = index %d head index to update %d", old_index, head_index);
- ASSERT(head_index != ~0);
- applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
-
ASSERT(head_pae->tail_applied_entry_index == old_index);
head_pae->tail_applied_entry_index = pae->prev_applied_entry_index;
} else {
@@ -370,7 +530,9 @@ deactivate_applied_ace_hash_entry(acl_main_t *am,
applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
ASSERT(pae->tail_applied_entry_index != ~0);
next_pae->tail_applied_entry_index = pae->tail_applied_entry_index;
- DBG("Resetting the hash table entry from %d to %d, setting tail index to %d", old_index, pae->next_applied_entry_index, pae->tail_applied_entry_index);
+ /* Remove ourselves and transfer the ownership of the colliding rules vector */
+ del_colliding_rule(applied_hash_aces, old_index, old_index);
+ next_pae->colliding_rules = pae->colliding_rules;
/* unlink from the next element */
next_pae->prev_applied_entry_index = ~0;
add_del_hashtable_entry(am, lc_index,
@@ -381,36 +543,18 @@ deactivate_applied_ace_hash_entry(acl_main_t *am,
applied_hash_aces, old_index, 0);
}
}
+
+ release_mask_type_index(am, pae->mask_type_index);
/* invalidate the old entry */
+ pae->mask_type_index = ~0;
pae->prev_applied_entry_index = ~0;
pae->next_applied_entry_index = ~0;
pae->tail_applied_entry_index = ~0;
+ /* always has to be 0 */
+ pae->colliding_rules = NULL;
}
-static void
-hash_acl_build_applied_lookup_bitmap(acl_main_t *am, u32 lc_index)
-{
- int i;
- uword *new_lookup_bitmap = 0;
-
- applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
- vec_validate((*applied_hash_acls), lc_index);
- applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
-
- for(i=0; i < vec_len(pal->applied_acls); i++) {
- u32 a_acl_index = *vec_elt_at_index((pal->applied_acls), i);
- hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, a_acl_index);
- DBG("Update bitmask = %U or %U (acl_index %d)\n", format_bitmap_hex, new_lookup_bitmap,
- format_bitmap_hex, ha->mask_type_index_bitmap, a_acl_index);
- new_lookup_bitmap = clib_bitmap_or(new_lookup_bitmap,
- ha->mask_type_index_bitmap);
- }
- uword *old_lookup_bitmap = pal->mask_type_index_bitmap;
- pal->mask_type_index_bitmap = new_lookup_bitmap;
- clib_bitmap_free(old_lookup_bitmap);
-}
-
void
hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
{
@@ -473,10 +617,8 @@ hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
/* trim the end of the vector */
_vec_len((*applied_hash_aces)) -= vec_len(ha->rules);
- applied_hash_entries_analyze(am, applied_hash_aces);
+ remake_hash_applied_mask_info_vec(am, applied_hash_aces, lc_index);
- /* After deletion we might not need some of the mask-types anymore... */
- hash_acl_build_applied_lookup_bitmap(am, lc_index);
clib_mem_set_heap (oldheap);
}
@@ -499,8 +641,8 @@ hash_acl_reapply(acl_main_t *am, u32 lc_index, int acl_index)
DBG0("Start index for acl %d in lc_index %d is %d", acl_index, lc_index, start_index);
/*
- * This function is called after we find out the sw_if_index where ACL is applied.
- * If the by-sw_if_index vector does not have the ACL#, then it's a bug.
+ * This function is called after we find out the lc_index where ACL is applied.
+ * If the by-lc_index vector does not have the ACL#, then it's a bug.
*/
ASSERT(start_index < vec_len(*applied_acls));
@@ -543,31 +685,17 @@ make_ip4_address_mask(ip4_address_t *addr, u8 prefix_len)
ip4_address_mask_from_width(addr, prefix_len);
}
-static u8
+static void
make_port_mask(u16 *portmask, u16 port_first, u16 port_last)
{
if (port_first == port_last) {
*portmask = 0xffff;
/* single port is representable by masked value */
- return 0;
- }
- if ((port_first == 0) && (port_last == 65535)) {
- *portmask = 0;
- /* wildcard port is representable by a masked value */
- return 0;
+ return;
}
- /*
- * For now match all the ports, later
- * here might be a better optimization which would
- * pick out bitmaskable portranges.
- *
- * However, adding a new mask type potentially
- * adds a per-packet extra lookup, so the benefit is not clear.
- */
*portmask = 0;
- /* This port range can't be represented via bitmask exactly. */
- return 1;
+ return;
}
static void
@@ -602,9 +730,10 @@ make_mask_and_match_from_rule(fa_5tuple_t *mask, acl_rule_t *r, hash_ace_info_t
hi->match.l4.proto = r->proto;
/* Calculate the src/dst port masks and make the src/dst port matches accordingly */
- hi->src_portrange_not_powerof2 = make_port_mask(&mask->l4.port[0], r->src_port_or_type_first, r->src_port_or_type_last);
+ make_port_mask(&mask->l4.port[0], r->src_port_or_type_first, r->src_port_or_type_last);
hi->match.l4.port[0] = r->src_port_or_type_first & mask->l4.port[0];
- hi->dst_portrange_not_powerof2 = make_port_mask(&mask->l4.port[1], r->dst_port_or_code_first, r->dst_port_or_code_last);
+
+ make_port_mask(&mask->l4.port[1], r->dst_port_or_code_first, r->dst_port_or_code_last);
hi->match.l4.port[1] = r->dst_port_or_code_first & mask->l4.port[1];
/* L4 info must be valid in order to match */
mask->pkt.l4_valid = 1;
@@ -630,52 +759,6 @@ make_mask_and_match_from_rule(fa_5tuple_t *mask, acl_rule_t *r, hash_ace_info_t
}
}
-static u32
-find_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
-{
- ace_mask_type_entry_t *mte;
- /* *INDENT-OFF* */
- pool_foreach(mte, am->ace_mask_type_pool,
- ({
- if(memcmp(&mte->mask, mask, sizeof(*mask)) == 0)
- return (mte - am->ace_mask_type_pool);
- }));
- /* *INDENT-ON* */
- return ~0;
-}
-
-static u32
-assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
-{
- u32 mask_type_index = find_mask_type_index(am, mask);
- ace_mask_type_entry_t *mte;
- if(~0 == mask_type_index) {
- pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES);
- mask_type_index = mte - am->ace_mask_type_pool;
- clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
- mte->refcount = 0;
- /*
- * We can use only 16 bits, since in the match there is only u16 field.
- * Realistically, once you go to 64K of mask types, it is a huge
- * problem anyway, so we might as well stop half way.
- */
- ASSERT(mask_type_index < 32768);
- }
- mte = am->ace_mask_type_pool + mask_type_index;
- mte->refcount++;
- return mask_type_index;
-}
-
-static void
-release_mask_type_index(acl_main_t *am, u32 mask_type_index)
-{
- ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
- mte->refcount--;
- if (mte->refcount == 0) {
- /* we are not using this entry anymore */
- pool_put(am->ace_mask_type_pool, mte);
- }
-}
int hash_acl_exists(acl_main_t *am, int acl_index)
{
@@ -707,12 +790,11 @@ void hash_acl_add(acl_main_t *am, int acl_index)
ace_info.ace_index = i;
make_mask_and_match_from_rule(&mask, &a->rules[i], &ace_info);
- ace_info.mask_type_index = assign_mask_type_index(am, &mask);
+ mask.pkt.flags_reserved = 0b000;
+ ace_info.base_mask_type_index = assign_mask_type_index(am, &mask);
/* assign the mask type index for matching itself */
- ace_info.match.pkt.mask_type_index_lsb = ace_info.mask_type_index;
- DBG("ACE: %d mask_type_index: %d", i, ace_info.mask_type_index);
- /* Ensure a given index is set in the mask type index bitmap for this ACL */
- ha->mask_type_index_bitmap = clib_bitmap_set(ha->mask_type_index_bitmap, ace_info.mask_type_index, 1);
+ ace_info.match.pkt.mask_type_index_lsb = ace_info.base_mask_type_index;
+ DBG("ACE: %d mask_type_index: %d", i, ace_info.base_mask_type_index);
vec_add1(ha->rules, ace_info);
}
/*
@@ -760,9 +842,8 @@ void hash_acl_delete(acl_main_t *am, int acl_index)
* the reference count, possibly freeing up some of them */
int i;
for(i=0; i < vec_len(ha->rules); i++) {
- release_mask_type_index(am, ha->rules[i].mask_type_index);
+ release_mask_type_index(am, ha->rules[i].base_mask_type_index);
}
- clib_bitmap_free(ha->mask_type_index_bitmap);
ha->hash_acl_exists = 0;
vec_free(ha->rules);
clib_mem_set_heap (oldheap);
@@ -813,31 +894,46 @@ acl_plugin_show_tables_acl_hash_info (u32 acl_index)
vlib_cli_output (vm, "acl-index %u bitmask-ready layout\n", i);
vlib_cli_output (vm, " applied lc_index list: %U\n",
format_vec32, ha->lc_index_list, "%d");
- vlib_cli_output (vm, " mask type index bitmap: %U\n",
- format_bitmap_hex, ha->mask_type_index_bitmap);
for (j = 0; j < vec_len (ha->rules); j++)
{
hash_ace_info_t *pa = &ha->rules[j];
m = (u64 *) & pa->match;
vlib_cli_output (vm,
- " %4d: %016llx %016llx %016llx %016llx %016llx %016llx mask index %d acl %d rule %d action %d src/dst portrange not ^2: %d,%d\n",
+ " %4d: %016llx %016llx %016llx %016llx %016llx %016llx base mask index %d acl %d rule %d action %d\n",
j, m[0], m[1], m[2], m[3], m[4], m[5],
- pa->mask_type_index, pa->acl_index, pa->ace_index,
- pa->action, pa->src_portrange_not_powerof2,
- pa->dst_portrange_not_powerof2);
+ pa->base_mask_type_index, pa->acl_index, pa->ace_index,
+ pa->action);
}
}
}
-void
+static void
+acl_plugin_print_colliding_rule (vlib_main_t * vm, int j, collision_match_rule_t *cr) {
+ vlib_cli_output(vm,
+ " %4d: acl %d ace %d acl pos %d pae index: %d",
+ j, cr->acl_index, cr->ace_index, cr->acl_position, cr->applied_entry_index);
+}
+
+static void
acl_plugin_print_pae (vlib_main_t * vm, int j, applied_hash_ace_entry_t * pae)
{
vlib_cli_output (vm,
- " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d tail %d hitcount %lld",
+ " %4d: acl %d rule %d action %d bitmask-ready rule %d colliding_rules: %d next %d prev %d tail %d hitcount %lld acl_pos: %d",
j, pae->acl_index, pae->ace_index, pae->action,
- pae->hash_ace_info_index, pae->next_applied_entry_index,
+ pae->hash_ace_info_index, vec_len(pae->colliding_rules), pae->next_applied_entry_index,
pae->prev_applied_entry_index,
- pae->tail_applied_entry_index, pae->hitcount);
+ pae->tail_applied_entry_index, pae->hitcount, pae->acl_position);
+ int jj;
+ for(jj=0; jj<vec_len(pae->colliding_rules); jj++)
+ acl_plugin_print_colliding_rule(vm, jj, vec_elt_at_index(pae->colliding_rules, jj));
+}
+
+static void
+acl_plugin_print_applied_mask_info (vlib_main_t * vm, int j, hash_applied_mask_info_t *mi)
+{
+ vlib_cli_output (vm,
+ " %4d: mask type index %d first rule index %d num_entries %d max_collisions %d",
+ j, mi->mask_type_index, mi->first_rule_index, mi->num_entries, mi->max_collisions);
}
void
@@ -860,11 +956,21 @@ acl_plugin_show_tables_applied_info (u32 lc_index)
{
applied_hash_acl_info_t *pal =
&am->applied_hash_acl_info_by_lc_index[lci];
- vlib_cli_output (vm, " lookup mask_type_index_bitmap: %U",
- format_bitmap_hex, pal->mask_type_index_bitmap);
vlib_cli_output (vm, " applied acls: %U", format_vec32,
pal->applied_acls, "%d");
}
+ if (lci < vec_len (am->hash_applied_mask_info_vec_by_lc_index))
+ {
+ vlib_cli_output (vm, " applied mask info entries:");
+ for (j = 0;
+ j < vec_len (am->hash_applied_mask_info_vec_by_lc_index[lci]);
+ j++)
+ {
+ acl_plugin_print_applied_mask_info (vm, j,
+ &am->hash_applied_mask_info_vec_by_lc_index
+ [lci][j]);
+ }
+ }
if (lci < vec_len (am->hash_entry_vec_by_lc_index))
{
vlib_cli_output (vm, " lookup applied entries:");
diff --git a/src/plugins/acl/hash_lookup_private.h b/src/plugins/acl/hash_lookup_private.h
index bc621416125..0db02ea031f 100644
--- a/src/plugins/acl/hash_lookup_private.h
+++ b/src/plugins/acl/hash_lookup_private.h
@@ -23,7 +23,7 @@
#define DBG_UNIX_LOG(...)
#elif ACL_HASH_LOOKUP_DEBUG == 2
#define DBG0(...) clib_warning(__VA_ARGS__)
-#define DBG(...) clib_warning(__VA_ARGS__)
+#define DBG(...) do { void *prevheap = clib_mem_set_heap (vlib_global_main.heap_base); vlib_cli_output(&vlib_global_main, __VA_ARGS__); clib_mem_set_heap (prevheap); } while (0)
#define DBG_UNIX_LOG(...) clib_unix_warning(__VA_ARGS__)
#else
#define DBG0(...)
diff --git a/src/plugins/acl/hash_lookup_types.h b/src/plugins/acl/hash_lookup_types.h
index 1a20ebff8f8..3efcf4e372c 100644
--- a/src/plugins/acl/hash_lookup_types.h
+++ b/src/plugins/acl/hash_lookup_types.h
@@ -18,17 +18,17 @@
#ifndef _ACL_HASH_LOOKUP_TYPES_H_
#define _ACL_HASH_LOOKUP_TYPES_H_
+#include "types.h"
+
/* The structure representing the single entry with hash representation */
typedef struct {
+ fa_5tuple_t match;
/* these two entries refer to the original ACL# and rule# within that ACL */
u32 acl_index;
u32 ace_index;
- u32 mask_type_index;
- u8 src_portrange_not_powerof2;
- u8 dst_portrange_not_powerof2;
+ u32 base_mask_type_index;
- fa_5tuple_t match;
u8 action;
} hash_ace_info_t;
@@ -36,8 +36,6 @@ typedef struct {
* The structure holding the information necessary for the hash-based ACL operation
*/
typedef struct {
- /* The mask types present in this ACL */
- uword *mask_type_index_bitmap;
/* hash ACL applied on these lookup contexts */
u32 *lc_index_list;
hash_ace_info_t *rules;
@@ -45,12 +43,23 @@ typedef struct {
int hash_acl_exists;
} hash_acl_info_t;
+
+typedef struct {
+ acl_rule_t rule;
+ u32 acl_index;
+ u32 ace_index;
+ u32 acl_position;
+ u32 applied_entry_index;
+} collision_match_rule_t;
+
typedef struct {
/* original non-compiled ACL */
u32 acl_index;
u32 ace_index;
/* the index of the hash_ace_info_t */
u32 hash_ace_info_index;
+ /* applied mask type index */
+ u32 mask_type_index;
/*
* in case of the same key having multiple entries,
* this holds the index of the next entry.
@@ -66,6 +75,10 @@ typedef struct {
*/
u32 tail_applied_entry_index;
/*
+ * Collision rule vector for matching - set only on head entry
+ */
+ collision_match_rule_t *colliding_rules;
+ /*
* number of hits on this entry
*/
u64 hitcount;
@@ -80,11 +93,7 @@ typedef struct {
} applied_hash_ace_entry_t;
typedef struct {
- /*
- * A logical OR of all the applied_ace_hash_entry_t=>
- * hash_ace_info_t=>mask_type_index bits set
- */
- uword *mask_type_index_bitmap;
+
/* applied ACLs so we can track them independently from main ACL module */
u32 *applied_acls;
} applied_hash_acl_info_t;
@@ -96,13 +105,21 @@ typedef union {
u32 applied_entry_index;
u16 reserved_u16;
u8 reserved_u8;
- /* means there is some other entry in front intersecting with this one */
- u8 shadowed:1;
- u8 need_portrange_check:1;
- u8 reserved_flags:6;
+ u8 reserved_flags:8;
};
} hash_acl_lookup_value_t;
+
+typedef struct {
+ u32 mask_type_index;
+ /* first rule # for this mask */
+ u32 first_rule_index;
+ /* Debug Information */
+ u32 num_entries;
+ u32 max_collisions;
+} hash_applied_mask_info_t;
+
+
#define CT_ASSERT_EQUAL(name, x,y) typedef int assert_ ## name ## _compile_time_assertion_failed[((x) == (y))-1]
CT_ASSERT_EQUAL(hash_acl_lookup_value_t_is_u64, sizeof(hash_acl_lookup_value_t), sizeof(u64));
diff --git a/src/plugins/acl/public_inlines.h b/src/plugins/acl/public_inlines.h
index f5ce0da6da4..ca4251903fa 100644
--- a/src/plugins/acl/public_inlines.h
+++ b/src/plugins/acl/public_inlines.h
@@ -489,91 +489,148 @@ match_portranges(acl_main_t *am, fa_5tuple_t *match, u32 index)
((r->dst_port_or_code_first <= match->l4.port[1]) && r->dst_port_or_code_last >= match->l4.port[1]) );
}
+always_inline int
+single_rule_match_5tuple (acl_rule_t * r, int is_ip6, fa_5tuple_t * pkt_5tuple)
+{
+ if (is_ip6 != r->is_ipv6)
+ {
+ return 0;
+ }
+
+ if (is_ip6)
+ {
+ if (!fa_acl_match_ip6_addr
+ (&pkt_5tuple->ip6_addr[1], &r->dst.ip6, r->dst_prefixlen))
+ return 0;
+ if (!fa_acl_match_ip6_addr
+ (&pkt_5tuple->ip6_addr[0], &r->src.ip6, r->src_prefixlen))
+ return 0;
+ }
+ else
+ {
+ if (!fa_acl_match_ip4_addr
+ (&pkt_5tuple->ip4_addr[1], &r->dst.ip4, r->dst_prefixlen))
+ return 0;
+ if (!fa_acl_match_ip4_addr
+ (&pkt_5tuple->ip4_addr[0], &r->src.ip4, r->src_prefixlen))
+ return 0;
+ }
+
+ if (r->proto)
+ {
+ if (pkt_5tuple->l4.proto != r->proto)
+ return 0;
+
+ /* A sanity check just to ensure we are about to match the ports extracted from the packet */
+ if (PREDICT_FALSE (!pkt_5tuple->pkt.l4_valid))
+ return 0;
+
+
+ if (!fa_acl_match_port
+ (pkt_5tuple->l4.port[0], r->src_port_or_type_first,
+ r->src_port_or_type_last, pkt_5tuple->pkt.is_ip6))
+ return 0;
+
+
+ if (!fa_acl_match_port
+ (pkt_5tuple->l4.port[1], r->dst_port_or_code_first,
+ r->dst_port_or_code_last, pkt_5tuple->pkt.is_ip6))
+ return 0;
+
+ if (pkt_5tuple->pkt.tcp_flags_valid
+ && ((pkt_5tuple->pkt.tcp_flags & r->tcp_flags_mask) !=
+ r->tcp_flags_value))
+ return 0;
+ }
+ /* everything matches! */
+ return 1;
+}
+
always_inline u32
-multi_acl_match_get_applied_ace_index(acl_main_t *am, fa_5tuple_t *match)
+multi_acl_match_get_applied_ace_index (acl_main_t * am, int is_ip6, fa_5tuple_t * match)
{
clib_bihash_kv_48_8_t kv;
clib_bihash_kv_48_8_t result;
- fa_5tuple_t *kv_key = (fa_5tuple_t *)kv.key;
- hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value;
- u64 *pmatch = (u64 *)match;
+ fa_5tuple_t *kv_key = (fa_5tuple_t *) kv.key;
+ hash_acl_lookup_value_t *result_val =
+ (hash_acl_lookup_value_t *) & result.value;
+ u64 *pmatch = (u64 *) match;
u64 *pmask;
u64 *pkey;
- int mask_type_index;
- u32 curr_match_index = ~0;
+ int mask_type_index, order_index;
+ u32 curr_match_index = (~0 - 1);
+
+
u32 lc_index = match->pkt.lc_index;
- applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, match->pkt.lc_index);
- applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
+ applied_hash_ace_entry_t **applied_hash_aces =
+ vec_elt_at_index (am->hash_entry_vec_by_lc_index, lc_index);
- DBG("TRYING TO MATCH: %016llx %016llx %016llx %016llx %016llx %016llx",
- pmatch[0], pmatch[1], pmatch[2], pmatch[3], pmatch[4], pmatch[5]);
+ hash_applied_mask_info_t **hash_applied_mask_info_vec =
+ vec_elt_at_index (am->hash_applied_mask_info_vec_by_lc_index, lc_index);
- for(mask_type_index=0; mask_type_index < pool_len(am->ace_mask_type_pool); mask_type_index++) {
- if (!clib_bitmap_get(vec_elt_at_index((*applied_hash_acls), lc_index)->mask_type_index_bitmap, mask_type_index)) {
- /* This bit is not set. Avoid trying to match */
- continue;
- }
- ace_mask_type_entry_t *mte = vec_elt_at_index(am->ace_mask_type_pool, mask_type_index);
- pmatch = (u64 *)match;
- pmask = (u64 *)&mte->mask;
- pkey = (u64 *)kv.key;
- /*
- * unrolling the below loop results in a noticeable performance increase.
- int i;
- for(i=0; i<6; i++) {
- kv.key[i] = pmatch[i] & pmask[i];
- }
- */
-
- *pkey++ = *pmatch++ & *pmask++;
- *pkey++ = *pmatch++ & *pmask++;
- *pkey++ = *pmatch++ & *pmask++;
- *pkey++ = *pmatch++ & *pmask++;
- *pkey++ = *pmatch++ & *pmask++;
- *pkey++ = *pmatch++ & *pmask++;
-
- kv_key->pkt.mask_type_index_lsb = mask_type_index;
- DBG(" KEY %3d: %016llx %016llx %016llx %016llx %016llx %016llx", mask_type_index,
- kv.key[0], kv.key[1], kv.key[2], kv.key[3], kv.key[4], kv.key[5]);
- int res = clib_bihash_search_48_8 (&am->acl_lookup_hash, &kv, &result);
- if (res == 0) {
- DBG("ACL-MATCH! result_val: %016llx", result_val->as_u64);
- if (result_val->applied_entry_index < curr_match_index) {
- if (PREDICT_FALSE(result_val->need_portrange_check)) {
- /*
- * This is going to be slow, since we can have multiple superset
- * entries for narrow-ish portranges, e.g.:
- * 0..42 100..400, 230..60000,
- * so we need to walk linearly and check if they match.
- */
-
- u32 curr_index = result_val->applied_entry_index;
- while ((curr_index != ~0) && !match_portranges(am, match, curr_index)) {
- /* while no match and there are more entries, walk... */
- applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces),curr_index);
- DBG("entry %d did not portmatch, advancing to %d", curr_index, pae->next_applied_entry_index);
- curr_index = pae->next_applied_entry_index;
- }
- if (curr_index < curr_match_index) {
- DBG("The index %d is the new candidate in portrange matches.", curr_index);
- curr_match_index = curr_index;
- } else {
- DBG("Curr portmatch index %d is too big vs. current matched one %d", curr_index, curr_match_index);
- }
- } else {
- /* The usual path is here. Found an entry in front of the current candiate - so it's a new one */
- DBG("This match is the new candidate");
- curr_match_index = result_val->applied_entry_index;
- if (!result_val->shadowed) {
- /* new result is known to not be shadowed, so no point to look up further */
- break;
- }
- }
- }
+ hash_applied_mask_info_t *minfo;
+
+ DBG ("TRYING TO MATCH: %016llx %016llx %016llx %016llx %016llx %016llx",
+ pmatch[0], pmatch[1], pmatch[2], pmatch[3], pmatch[4], pmatch[5]);
+
+ for (order_index = 0; order_index < vec_len ((*hash_applied_mask_info_vec));
+ order_index++)
+ {
+ minfo = vec_elt_at_index ((*hash_applied_mask_info_vec), order_index);
+ if (minfo->first_rule_index > curr_match_index)
+ {
+ /* Index in this and following (by construction) partitions are greater than our candidate, Avoid trying to match! */
+ break;
+ }
+
+ mask_type_index = minfo->mask_type_index;
+ ace_mask_type_entry_t *mte =
+ vec_elt_at_index (am->ace_mask_type_pool, mask_type_index);
+ pmatch = (u64 *) match;
+ pmask = (u64 *) & mte->mask;
+ pkey = (u64 *) kv.key;
+ /*
+ * unrolling the below loop results in a noticeable performance increase.
+ int i;
+ for(i=0; i<6; i++) {
+ kv.key[i] = pmatch[i] & pmask[i];
+ }
+ */
+
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+
+ kv_key->pkt.mask_type_index_lsb = mask_type_index;
+ int res =
+ clib_bihash_search_inline_2_48_8 (&am->acl_lookup_hash, &kv, &result);
+
+ if (res == 0)
+ {
+ /* There is a hit in the hash, so check the collision vector */
+ u32 curr_index = result_val->applied_entry_index;
+ applied_hash_ace_entry_t *pae =
+ vec_elt_at_index ((*applied_hash_aces), curr_index);
+ collision_match_rule_t *crs = pae->colliding_rules;
+ int i;
+ for (i = 0; i < vec_len (crs); i++)
+ {
+ if (crs[i].applied_entry_index >= curr_match_index)
+ {
+ continue;
+ }
+ if (single_rule_match_5tuple (&crs[i].rule, is_ip6, match))
+ {
+ curr_match_index = crs[i].applied_entry_index;
+ }
+ }
+ }
}
- }
- DBG("MATCH-RESULT: %d", curr_match_index);
+ DBG ("MATCH-RESULT: %d", curr_match_index);
return curr_match_index;
}
@@ -584,7 +641,7 @@ hash_multi_acl_match_5tuple (void *p_acl_main, u32 lc_index, fa_5tuple_t * pkt_5
{
acl_main_t *am = p_acl_main;
applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, lc_index);
- u32 match_index = multi_acl_match_get_applied_ace_index(am, pkt_5tuple);
+ u32 match_index = multi_acl_match_get_applied_ace_index(am, is_ip6, pkt_5tuple);
if (match_index < vec_len((*applied_hash_aces))) {
applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), match_index);
pae->hitcount++;
diff --git a/src/plugins/acl/types.h b/src/plugins/acl/types.h
new file mode 100644
index 00000000000..7011a28cbbe
--- /dev/null
+++ b/src/plugins/acl/types.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_acl_types_h
+#define included_acl_types_h
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+
+typedef struct
+{
+ u8 is_permit;
+ u8 is_ipv6;
+ ip46_address_t src;
+ u8 src_prefixlen;
+ ip46_address_t dst;
+ u8 dst_prefixlen;
+ u8 proto;
+ u16 src_port_or_type_first;
+ u16 src_port_or_type_last;
+ u16 dst_port_or_code_first;
+ u16 dst_port_or_code_last;
+ u8 tcp_flags_value;
+ u8 tcp_flags_mask;
+} acl_rule_t;
+
+
+#endif // included_acl_types_h
+