aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins/acl
diff options
context:
space:
mode:
authorAndrew Yourtchenko <ayourtch@gmail.com>2017-07-03 12:32:44 +0200
committerOle Trøan <otroan@employees.org>2017-07-04 11:59:14 +0000
commita016216753c955683bda76ed250daa540ff35107 (patch)
treec17c9608f77da004a34f9a7d946bada75f222de7 /src/plugins/acl
parentc86e592f9a65e5098c9a70c38ee228252dbd32ce (diff)
acl-plugin: VPP-897: applying of large number of ACEs is slow
When applying ACEs, in the new hash-based scheme, for each ACE the lookup in the hash table is done, and either that ACE is added to the end of the existing list if there is a match, or a new list is created if there is no match. Usually ACEs do not overlap, so this operation is fast, however, the fragment-permit entries in case of a large number of ACLs create a huge list which needs to be traversed for every other ACE being added, slowing down the process dramatically. The solution is to add an explicit flag to denote the first element of the chain, and use the "prev" index of that element to point to the tail element. The "next" field of the last element is still ~0 and if we touch that one, we do the linear search to find the first one, but that is a relatively infrequent operation. Change-Id: I352a3becd7854cf39aae65f0950afad7d18a70aa Signed-off-by: Andrew Yourtchenko <ayourtch@gmail.com> (cherry picked from commit 204cf74aed51ca07933df7c606754abb4b26fd82)
Diffstat (limited to 'src/plugins/acl')
-rw-r--r--src/plugins/acl/hash_lookup.c57
-rw-r--r--src/plugins/acl/hash_lookup_types.h8
2 files changed, 45 insertions, 20 deletions
diff --git a/src/plugins/acl/hash_lookup.c b/src/plugins/acl/hash_lookup.c
index 8e8c52740ee..3600faf13ea 100644
--- a/src/plugins/acl/hash_lookup.c
+++ b/src/plugins/acl/hash_lookup.c
@@ -216,26 +216,26 @@ activate_applied_ace_hash_entry(acl_main_t *am,
int res = BV (clib_bihash_search) (&am->acl_lookup_hash, &kv, &result);
if (res == 0) {
/* There already exists an entry or more. Append at the end. */
- u32 existing_index = result_val->applied_entry_index;
+ u32 first_index = result_val->applied_entry_index;
DBG("A key already exists, with applied entry index: %d", existing_index);
- ASSERT(existing_index != ~0);
- applied_hash_ace_entry_t *existing_pae = &((*applied_hash_aces)[existing_index]);
- /* walk the list to the last element */
- while (existing_pae->next_applied_entry_index != ~0) {
- if (existing_index == existing_pae->next_applied_entry_index) {
- DBG("existing and next index of entry %d are the same, abort", existing_index);
- ASSERT(existing_index != existing_pae->next_applied_entry_index);
- }
- existing_index = existing_pae->next_applied_entry_index;
- existing_pae = &((*applied_hash_aces)[existing_index]);
- DBG("...advance to chained entry index: %d", existing_index);
- }
+ ASSERT(first_index != ~0);
+ applied_hash_ace_entry_t *first_pae = &((*applied_hash_aces)[first_index]);
+ /* move to "prev" by one, this should land us in the end of the list */
+ u32 last_index = first_pae->prev_applied_entry_index;
+ ASSERT(last_index != ~0);
+ applied_hash_ace_entry_t *last_pae = &((*applied_hash_aces)[last_index]);
+ ASSERT(last_pae->next_applied_entry_index == ~0);
/* link ourseves in */
- existing_pae->next_applied_entry_index = new_index;
- pae->prev_applied_entry_index = existing_index;
+ last_pae->next_applied_entry_index = new_index;
+ pae->prev_applied_entry_index = last_index;
+ /* make a new reference from the very first element to new tail */
+ first_pae->prev_applied_entry_index = new_index;
} else {
/* It's the very first entry */
hashtable_add_del(am, &kv, 1);
+ pae->is_first_entry = 1;
+ /* we are the tail */
+ pae->prev_applied_entry_index = new_index;
}
}
@@ -306,6 +306,7 @@ hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index)
for(i=0; i < vec_len(ha->rules); i++) {
u32 new_index = base_offset + i;
applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[new_index]);
+ pae->is_first_entry = 0;
pae->acl_index = acl_index;
pae->ace_index = ha->rules[i].ace_index;
pae->action = ha->rules[i].action;
@@ -324,14 +325,13 @@ move_applied_ace_hash_entry(acl_main_t *am,
applied_hash_ace_entry_t **applied_hash_aces,
u32 old_index, u32 new_index)
{
-
/* move the entry */
(*applied_hash_aces)[new_index] = (*applied_hash_aces)[old_index];
/* update the linkage and hash table if necessary */
applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[old_index]);
- if (pae->prev_applied_entry_index != ~0) {
+ if (!pae->is_first_entry) {
applied_hash_ace_entry_t *prev_pae = &((*applied_hash_aces)[pae->prev_applied_entry_index]);
ASSERT(prev_pae->next_applied_entry_index == old_index);
prev_pae->next_applied_entry_index = new_index;
@@ -344,6 +344,20 @@ move_applied_ace_hash_entry(acl_main_t *am,
applied_hash_ace_entry_t *next_pae = &((*applied_hash_aces)[pae->next_applied_entry_index]);
ASSERT(next_pae->prev_applied_entry_index == old_index);
next_pae->prev_applied_entry_index = new_index;
+ } else {
+ /*
+ * Moving the very last entry, so we need to update the tail pointer in the first one.
+ * find back the first entry. Inefficient so might need to be a bit cleverer
+ * if this proves to be a problem..
+ */
+ u32 an_index = pae->prev_applied_entry_index;
+ applied_hash_ace_entry_t *head_pae = &((*applied_hash_aces)[pae->prev_applied_entry_index]);
+ while(!head_pae->is_first_entry) {
+ an_index = head_pae->prev_applied_entry_index;
+ head_pae = &((*applied_hash_aces)[an_index]);
+ }
+ ASSERT(head_pae->prev_applied_entry_index == old_index);
+ head_pae->prev_applied_entry_index = new_index;
}
/* invalidate the old entry */
pae->prev_applied_entry_index = ~0;
@@ -364,13 +378,20 @@ deactivate_applied_ace_hash_entry(acl_main_t *am,
next_pae->prev_applied_entry_index = pae->prev_applied_entry_index;
}
- if (pae->prev_applied_entry_index != ~0) {
+ if (!pae->is_first_entry) {
applied_hash_ace_entry_t *prev_pae = &((*applied_hash_aces)[pae->prev_applied_entry_index]);
ASSERT(prev_pae->next_applied_entry_index == old_index);
prev_pae->next_applied_entry_index = pae->next_applied_entry_index;
} else {
/* It was the first entry. We need either to reset the hash entry or delete it */
+ pae->is_first_entry = 0;
if (pae->next_applied_entry_index != ~0) {
+ /* a custom case of relinking for the first node, with the tail not forward linked to it */
+ applied_hash_ace_entry_t *next_pae = &((*applied_hash_aces)[pae->next_applied_entry_index]);
+ /* this is the tail the new head should be aware of */
+ next_pae->prev_applied_entry_index = pae->prev_applied_entry_index;
+ next_pae->is_first_entry = 1;
+
add_del_hashtable_entry(am, sw_if_index, is_input,
applied_hash_aces, pae->next_applied_entry_index, 1);
} else {
diff --git a/src/plugins/acl/hash_lookup_types.h b/src/plugins/acl/hash_lookup_types.h
index 1c04459155c..1848c5ffc6a 100644
--- a/src/plugins/acl/hash_lookup_types.h
+++ b/src/plugins/acl/hash_lookup_types.h
@@ -53,11 +53,15 @@ typedef struct {
*/
u32 next_applied_entry_index;
/*
- * previous entry in the list of the chained ones,
- * if ~0 then this is entry in the hash.
+ * previous entry in the ring list of the chained ones.
*/
u32 prev_applied_entry_index;
/*
+ * 1 if it is the very first entry in the list,
+ * referenced from the hash.
+ */
+ u8 is_first_entry;
+ /*
* Action of this applied ACE
*/
u8 action;