summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrew Yourtchenko <ayourtch@gmail.com>2017-05-24 13:20:47 +0200
committerDamjan Marion <dmarion.lists@gmail.com>2017-06-19 11:56:10 +0000
commit7f4d577d6bc243f53a044d92ca9367b3f1fa170e (patch)
tree5bdf5a57c97cf6faba644209a52559e572b3db64
parent029f3d2c1c6b04a6cfef17242cb36b304025fe23 (diff)
acl-plugin: bihash-based ACL lookup
Add a bihash-based ACL lookup mechanism and make it a new default. This changes the time required to lookup a 5-tuple match from O(total_N_entries) to O(total_N_mask_types), where "mask type" is an overall mask on the 5-tuple required to represent an ACE. For testing/comparison there is a temporary debug CLI "set acl-plugin use-hash-acl-matching {0|1}", which, when set to 0, makes the plugin use the "old" linear lookup, and when set to 1, makes it use the hash-based lookup. Based on the discussions on vpp-dev mailing list, prevent assigning the ACL index to an interface, when the ACL with that index is not defined, also prevent deleting an ACL if that ACL is applied. Also, for the easier debugging of the state, there are new debug CLI commands to see the ACL plugin state at several layers: "show acl-plugin acl [index N]" - show a high-level ACL representation, used for the linear lookup and as a base for building the hashtable-based lookup. Also shows if a given ACL is applied somewhere. "show acl-plugin interface [sw_if_index N]" - show which interfaces have which ACL(s) applied. "show acl-plugin tables" - a lower-level debug command used to see the state of all of the related data structures at once. There are specifiers possible, which make for a more focused and maybe augmented output: "show acl-plugin tables acl [index N]" show the "bitmask-ready" representations of the ACLs, we well as the mask types and their associated indices. "show acl-plutin tables mask" show the derived mask types and their indices only. "show acl-plugin tables applied [sw_if_index N]" show the table of all of the ACEs applied for a given sw_if_index or all interfaces. "show acl-plugin tables hash [verbose N]" show the 48x8 bihash used for the ACL lookup. Change-Id: I89fff051424cb44bcb189e3cee04c1b8f76efc28 Signed-off-by: Andrew Yourtchenko <ayourtch@gmail.com>
-rw-r--r--src/plugins/acl.am1
-rw-r--r--src/plugins/acl/acl.c341
-rw-r--r--src/plugins/acl/acl.h30
-rw-r--r--src/plugins/acl/fa_node.c39
-rw-r--r--src/plugins/acl/fa_node.h7
-rw-r--r--src/plugins/acl/hash_lookup.c742
-rw-r--r--src/plugins/acl/hash_lookup.h60
-rw-r--r--src/plugins/acl/hash_lookup.md241
-rw-r--r--src/plugins/acl/hash_lookup_private.h27
-rw-r--r--src/plugins/acl/hash_lookup_types.h94
-rw-r--r--test/test_acl_plugin.py3
-rw-r--r--test/test_acl_plugin_conns.py12
-rw-r--r--test/test_acl_plugin_l2l3.py3
13 files changed, 1590 insertions, 10 deletions
diff --git a/src/plugins/acl.am b/src/plugins/acl.am
index 01e0197c929..0a414481dbf 100644
--- a/src/plugins/acl.am
+++ b/src/plugins/acl.am
@@ -16,6 +16,7 @@ vppplugins_LTLIBRARIES += acl_plugin.la
acl_plugin_la_SOURCES = \
acl/acl.c \
+ acl/hash_lookup.c \
acl/fa_node.c \
acl/l2sess.h \
acl/manual_fns.h \
diff --git a/src/plugins/acl/acl.c b/src/plugins/acl/acl.c
index e7b8549535d..51929ad9188 100644
--- a/src/plugins/acl/acl.c
+++ b/src/plugins/acl/acl.c
@@ -52,6 +52,7 @@
#undef vl_api_version
#include "fa_node.h"
+#include "hash_lookup.h"
acl_main_t acl_main;
@@ -154,6 +155,7 @@ acl_add_list (u32 count, vl_api_acl_rule_t rules[],
for (i = 0; i < count; i++)
{
r = &acl_new_rules[i];
+ memset(r, 0, sizeof(*r));
r->is_permit = rules[i].is_permit;
r->is_ipv6 = rules[i].is_ipv6;
if (r->is_ipv6)
@@ -188,12 +190,14 @@ acl_add_list (u32 count, vl_api_acl_rule_t rules[],
else
{
a = am->acls + *acl_list_index;
+ hash_acl_delete(am, *acl_list_index);
/* Get rid of the old rules */
clib_mem_free (a->rules);
}
a->rules = acl_new_rules;
a->count = count;
memcpy (a->tag, tag, sizeof (a->tag));
+ hash_acl_add(am, *acl_list_index);
return 0;
}
@@ -209,6 +213,19 @@ acl_del_list (u32 acl_list_index)
return -1;
}
+ if (acl_list_index < vec_len(am->input_sw_if_index_vec_by_acl)) {
+ if (vec_len(am->input_sw_if_index_vec_by_acl[acl_list_index]) > 0) {
+ /* ACL is applied somewhere inbound. Refuse to delete */
+ return -1;
+ }
+ }
+ if (acl_list_index < vec_len(am->output_sw_if_index_vec_by_acl)) {
+ if (vec_len(am->output_sw_if_index_vec_by_acl[acl_list_index]) > 0) {
+ /* ACL is applied somewhere outbound. Refuse to delete */
+ return -1;
+ }
+ }
+
/* delete any references to the ACL */
for (i = 0; i < vec_len (am->output_acl_vec_by_sw_if_index); i++)
{
@@ -240,7 +257,9 @@ acl_del_list (u32 acl_list_index)
}
}
}
+ /* delete the hash table data */
+ hash_acl_delete(am, acl_list_index);
/* now we can delete the ACL itself */
a = &am->acls[acl_list_index];
if (a->rules)
@@ -619,28 +638,62 @@ acl_interface_out_enable_disable (acl_main_t * am, u32 sw_if_index,
return rv;
}
+static int
+acl_is_not_defined(acl_main_t *am, u32 acl_list_index)
+{
+ return (pool_is_free_index (am->acls, acl_list_index));
+}
+
static int
acl_interface_add_inout_acl (u32 sw_if_index, u8 is_input, u32 acl_list_index)
{
acl_main_t *am = &acl_main;
+ if (acl_is_not_defined(am, acl_list_index)) {
+ /* ACL is not defined. Can not apply */
+ return -1;
+ }
+
if (is_input)
{
vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index);
+
+ u32 index = vec_search(am->input_acl_vec_by_sw_if_index[sw_if_index], acl_list_index);
+ if (index < vec_len(am->input_acl_vec_by_sw_if_index[sw_if_index])) {
+ clib_warning("ACL %d is already applied inbound on sw_if_index %d (index %d)",
+ acl_list_index, sw_if_index, index);
+ /* the entry is already there */
+ return -1;
+ }
vec_add (am->input_acl_vec_by_sw_if_index[sw_if_index], &acl_list_index,
1);
+ vec_validate (am->input_sw_if_index_vec_by_acl, acl_list_index);
+ vec_add (am->input_sw_if_index_vec_by_acl[acl_list_index], &sw_if_index,
+ 1);
acl_interface_in_enable_disable (am, sw_if_index, 1);
}
else
{
vec_validate (am->output_acl_vec_by_sw_if_index, sw_if_index);
+
+ u32 index = vec_search(am->output_acl_vec_by_sw_if_index[sw_if_index], acl_list_index);
+ if (index < vec_len(am->output_acl_vec_by_sw_if_index[sw_if_index])) {
+ clib_warning("ACL %d is already applied outbound on sw_if_index %d (index %d)",
+ acl_list_index, sw_if_index, index);
+ /* the entry is already there */
+ return -1;
+ }
vec_add (am->output_acl_vec_by_sw_if_index[sw_if_index],
&acl_list_index, 1);
+ vec_validate (am->output_sw_if_index_vec_by_acl, acl_list_index);
+ vec_add (am->output_sw_if_index_vec_by_acl[acl_list_index], &sw_if_index,
+ 1);
acl_interface_out_enable_disable (am, sw_if_index, 1);
}
return 0;
}
+
static int
acl_interface_del_inout_acl (u32 sw_if_index, u8 is_input, u32 acl_list_index)
{
@@ -661,6 +714,15 @@ acl_interface_del_inout_acl (u32 sw_if_index, u8 is_input, u32 acl_list_index)
break;
}
}
+
+ if (acl_list_index < vec_len(am->input_sw_if_index_vec_by_acl)) {
+ u32 index = vec_search(am->input_sw_if_index_vec_by_acl[acl_list_index], sw_if_index);
+ if (index < vec_len(am->input_sw_if_index_vec_by_acl[acl_list_index])) {
+ hash_acl_unapply(am, sw_if_index, is_input, acl_list_index);
+ vec_del1 (am->input_sw_if_index_vec_by_acl[acl_list_index], index);
+ }
+ }
+
if (0 == vec_len (am->input_acl_vec_by_sw_if_index[sw_if_index]))
{
acl_interface_in_enable_disable (am, sw_if_index, 0);
@@ -680,6 +742,15 @@ acl_interface_del_inout_acl (u32 sw_if_index, u8 is_input, u32 acl_list_index)
break;
}
}
+
+ if (acl_list_index < vec_len(am->output_sw_if_index_vec_by_acl)) {
+ u32 index = vec_search(am->output_sw_if_index_vec_by_acl[acl_list_index], sw_if_index);
+ if (index < vec_len(am->output_sw_if_index_vec_by_acl[acl_list_index])) {
+ hash_acl_unapply(am, sw_if_index, is_input, acl_list_index);
+ vec_del1 (am->output_sw_if_index_vec_by_acl[acl_list_index], index);
+ }
+ }
+
if (0 == vec_len (am->output_acl_vec_by_sw_if_index[sw_if_index]))
{
acl_interface_out_enable_disable (am, sw_if_index, 0);
@@ -692,16 +763,41 @@ static void
acl_interface_reset_inout_acls (u32 sw_if_index, u8 is_input)
{
acl_main_t *am = &acl_main;
+ int i;
if (is_input)
{
acl_interface_in_enable_disable (am, sw_if_index, 0);
vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index);
+
+ for(i = vec_len(am->input_acl_vec_by_sw_if_index[sw_if_index])-1; i>=0; i--) {
+ u32 acl_list_index = am->input_acl_vec_by_sw_if_index[sw_if_index][i];
+ hash_acl_unapply(am, sw_if_index, is_input, acl_list_index);
+ if (acl_list_index < vec_len(am->input_sw_if_index_vec_by_acl)) {
+ u32 index = vec_search(am->input_sw_if_index_vec_by_acl[acl_list_index], sw_if_index);
+ if (index < vec_len(am->input_sw_if_index_vec_by_acl[acl_list_index])) {
+ vec_del1 (am->input_sw_if_index_vec_by_acl[acl_list_index], index);
+ }
+ }
+ }
+
vec_reset_length (am->input_acl_vec_by_sw_if_index[sw_if_index]);
}
else
{
acl_interface_out_enable_disable (am, sw_if_index, 0);
vec_validate (am->output_acl_vec_by_sw_if_index, sw_if_index);
+
+ for(i = vec_len(am->output_acl_vec_by_sw_if_index[sw_if_index])-1; i>=0; i--) {
+ u32 acl_list_index = am->output_acl_vec_by_sw_if_index[sw_if_index][i];
+ hash_acl_unapply(am, sw_if_index, is_input, acl_list_index);
+ if (acl_list_index < vec_len(am->output_sw_if_index_vec_by_acl)) {
+ u32 index = vec_search(am->output_sw_if_index_vec_by_acl[acl_list_index], sw_if_index);
+ if (index < vec_len(am->output_sw_if_index_vec_by_acl[acl_list_index])) {
+ vec_del1 (am->output_sw_if_index_vec_by_acl[acl_list_index], index);
+ }
+ }
+ }
+
vec_reset_length (am->output_acl_vec_by_sw_if_index[sw_if_index]);
}
}
@@ -711,13 +807,19 @@ acl_interface_add_del_inout_acl (u32 sw_if_index, u8 is_add, u8 is_input,
u32 acl_list_index)
{
int rv = -1;
+ acl_main_t *am = &acl_main;
if (is_add)
{
rv =
acl_interface_add_inout_acl (sw_if_index, is_input, acl_list_index);
+ if (rv == 0)
+ {
+ hash_acl_apply(am, sw_if_index, is_input, acl_list_index);
+ }
}
else
{
+ hash_acl_unapply(am, sw_if_index, is_input, acl_list_index);
rv =
acl_interface_del_inout_acl (sw_if_index, is_input, acl_list_index);
}
@@ -1198,9 +1300,18 @@ vl_api_acl_interface_set_acl_list_t_handler
for (i = 0; i < mp->count; i++)
{
- acl_interface_add_del_inout_acl (sw_if_index, 1, (i < mp->n_input),
- ntohl (mp->acls[i]));
+ if(acl_is_not_defined(am, ntohl (mp->acls[i]))) {
+ /* ACL does not exist, so we can not apply it */
+ rv = -1;
+ }
}
+ if (0 == rv) {
+ for (i = 0; i < mp->count; i++)
+ {
+ acl_interface_add_del_inout_acl (sw_if_index, 1, (i < mp->n_input),
+ ntohl (mp->acls[i]));
+ }
+ }
}
REPLY_MACRO (VL_API_ACL_INTERFACE_SET_ACL_LIST_REPLY);
@@ -1706,6 +1817,11 @@ acl_set_aclplugin_fn (vlib_main_t * vm,
}
goto done;
}
+ if (unformat (input, "use-hash-acl-matching %u", &val))
+ {
+ am->use_hash_acl_matching = (val !=0);
+ goto done;
+ }
if (unformat (input, "l4-match-nonfirst-fragment %u", &val))
{
am->l4_match_nonfirst_fragment = (val != 0);
@@ -1800,7 +1916,6 @@ done:
return error;
}
-
static clib_error_t *
acl_show_aclplugin_fn (vlib_main_t * vm,
unformat_input_t * input,
@@ -1809,13 +1924,16 @@ acl_show_aclplugin_fn (vlib_main_t * vm,
clib_error_t *error = 0;
acl_main_t *am = &acl_main;
vnet_interface_main_t *im = &am->vnet_main->interface_main;
+ u32 *pj;
vnet_sw_interface_t *swif;
if (unformat (input, "sessions"))
{
- u8 * out0 = 0;
+ u8 * out0 = format(0, "");
u16 wk;
+ u32 show_bihash_verbose = 0;
+ unformat (input, "verbose %u", &show_bihash_verbose);
pool_foreach (swif, im->sw_interfaces,
({
u32 sw_if_index = swif->sw_if_index;
@@ -1856,6 +1974,215 @@ acl_show_aclplugin_fn (vlib_main_t * vm,
am->fa_cleaner_wait_time_increment * 1000.0, ((f64)am->fa_current_cleaner_timer_wait_interval) * 1000.0/(f64)vm->clib_time.clocks_per_second);
vec_free(out0);
+ show_fa_sessions_hash(vm, show_bihash_verbose);
+ }
+ else if (unformat (input, "interface"))
+ {
+ u32 sw_if_index = ~0;
+ u32 swi;
+ u8 * out0 = format(0, "");
+ unformat (input, "sw_if_index %u", &sw_if_index);
+ for(swi = 0; (swi < vec_len(am->input_acl_vec_by_sw_if_index)) ||
+ (swi < vec_len(am->output_acl_vec_by_sw_if_index)); swi++) {
+ out0 = format(out0, "sw_if_index %d:\n", swi);
+
+ if ((swi < vec_len(am->input_acl_vec_by_sw_if_index)) &&
+ (vec_len(am->input_acl_vec_by_sw_if_index[swi]) > 0)) {
+ out0 = format(out0, " input acl(s): ");
+ vec_foreach(pj, am->input_acl_vec_by_sw_if_index[swi]) {
+ out0 = format(out0, "%d ", *pj);
+ }
+ out0 = format(out0, "\n");
+ }
+
+ if ((swi < vec_len(am->output_acl_vec_by_sw_if_index)) &&
+ (vec_len(am->output_acl_vec_by_sw_if_index[swi]) > 0)) {
+ out0 = format(out0, " output acl(s): ");
+ vec_foreach(pj, am->output_acl_vec_by_sw_if_index[swi]) {
+ out0 = format(out0, "%d ", *pj);
+ }
+ out0 = format(out0, "\n");
+ }
+
+ }
+ vlib_cli_output(vm, "\n%s\n", out0);
+ vec_free(out0);
+ }
+ else if (unformat (input, "acl"))
+ {
+ u32 acl_index = ~0;
+ u32 i;
+ u8 * out0 = format(0, "");
+ unformat (input, "index %u", &acl_index);
+ for(i=0; i<vec_len(am->acls); i++) {
+ if (acl_is_not_defined(am, i)) {
+ /* don't attempt to show the ACLs that do not exist */
+ continue;
+ }
+ if ((acl_index != ~0) && (acl_index != i)) {
+ continue;
+ }
+ out0 = format(out0, "acl-index %u count %u tag {%s}\n", i, am->acls[i].count, am->acls[i].tag);
+ acl_rule_t *r;
+ int j;
+ for(j=0; j<am->acls[i].count; j++) {
+ r = &am->acls[i].rules[j];
+ out0 = format(out0, " %4d: %s ", j, r->is_ipv6 ? "ipv6" : "ipv4");
+ out0 = format_acl_action(out0, r->is_permit);
+ out0 = format(out0, " src %U/%d", format_ip46_address, &r->src, IP46_TYPE_ANY, r->src_prefixlen);
+ out0 = format(out0, " dst %U/%d", format_ip46_address, &r->dst, IP46_TYPE_ANY, r->dst_prefixlen);
+ out0 = format(out0, " proto %d", r->proto);
+ out0 = format(out0, " sport %d", r->src_port_or_type_first);
+ if (r->src_port_or_type_first != r->src_port_or_type_last) {
+ out0 = format(out0, "-%d", r->src_port_or_type_last);
+ }
+ out0 = format(out0, " dport %d", r->dst_port_or_code_first);
+ if (r->dst_port_or_code_first != r->dst_port_or_code_last) {
+ out0 = format(out0, "-%d", r->dst_port_or_code_last);
+ }
+ if (r->tcp_flags_mask || r->tcp_flags_value) {
+ out0 = format(out0, " tcpflags %d mask %d", r->tcp_flags_value, r->tcp_flags_mask);
+ }
+ out0 = format(out0, "\n");
+ }
+
+ if (i<vec_len(am->input_sw_if_index_vec_by_acl)) {
+ out0 = format(out0, " applied inbound on sw_if_index: ");
+ vec_foreach(pj, am->input_sw_if_index_vec_by_acl[i]) {
+ out0 = format(out0, "%d ", *pj);
+ }
+ out0 = format(out0, "\n");
+ }
+ if (i<vec_len(am->output_sw_if_index_vec_by_acl)) {
+ out0 = format(out0, " applied outbound on sw_if_index: ");
+ vec_foreach(pj, am->output_sw_if_index_vec_by_acl[i]) {
+ out0 = format(out0, "%d ", *pj);
+ }
+ out0 = format(out0, "\n");
+ }
+ }
+ vlib_cli_output(vm, "\n%s\n", out0);
+ vec_free(out0);
+ }
+ else if (unformat (input, "tables"))
+ {
+ ace_mask_type_entry_t *mte;
+ u32 acl_index = ~0;
+ u32 sw_if_index = ~0;
+ int show_acl_hash_info = 0;
+ int show_applied_info = 0;
+ int show_mask_type = 0;
+ int show_bihash = 0;
+ u32 show_bihash_verbose = 0;
+
+ if (unformat (input, "acl")) {
+ show_mask_type = 1;
+ unformat (input, "index %u", &acl_index);
+ } else if (unformat (input, "applied")) {
+ show_applied_info = 1;
+ unformat (input, "sw_if_index %u", &sw_if_index);
+ } else if (unformat (input, "mask")) {
+ show_mask_type = 1;
+ } else if (unformat (input, "hash")) {
+ show_bihash = 1;
+ unformat (input, "verbose %u", &show_bihash_verbose);
+ }
+
+ if ( ! (show_mask_type || show_acl_hash_info || show_applied_info || show_bihash) ) {
+ /* if no qualifiers specified, show all */
+ show_mask_type = 1;
+ show_acl_hash_info = 1;
+ show_applied_info = 1;
+ show_bihash = 1;
+ }
+
+ if (show_mask_type) {
+ vlib_cli_output(vm, "Mask-type entries:");
+ /* *INDENT-OFF* */
+ pool_foreach(mte, am->ace_mask_type_pool,
+ ({
+ vlib_cli_output(vm, " %3d: %016llx %016llx %016llx %016llx %016llx %016llx refcount %d",
+ mte - am->ace_mask_type_pool,
+ mte->mask.kv.key[0], mte->mask.kv.key[1], mte->mask.kv.key[2],
+ mte->mask.kv.key[3], mte->mask.kv.key[4], mte->mask.kv.value, mte->refcount);
+ }));
+ /* *INDENT-ON* */
+ }
+
+ if (show_acl_hash_info) {
+ u32 i,j;
+ u8 * out0 = format(0, "");
+ u64 *m;
+ out0 = format(out0, "Mask-ready ACL representations\n");
+ for (i=0; i< vec_len(am->hash_acl_infos); i++) {
+ if ((acl_index != ~0) && (acl_index != i)) {
+ continue;
+ }
+ hash_acl_info_t *ha = &am->hash_acl_infos[i];
+ out0 = format(out0, "acl-index %u bitmask-ready layout\n", i);
+ out0 = format(out0, " mask type index bitmap: %U\n", format_bitmap_hex, ha->mask_type_index_bitmap);
+ for(j=0; j<vec_len(ha->rules); j++) {
+ hash_ace_info_t *pa = &ha->rules[j];
+ m = (u64 *)&pa->match;
+ out0 = format(out0, " %4d: %016llx %016llx %016llx %016llx %016llx %016llx mask index %d acl %d rule %d action %d src/dst portrange not ^2: %d,%d\n",
+ j, m[0], m[1], m[2], m[3], m[4], m[5], pa->mask_type_index,
+ pa->acl_index, pa->ace_index, pa->action,
+ pa->src_portrange_not_powerof2, pa->dst_portrange_not_powerof2);
+ }
+ }
+ vlib_cli_output(vm, "\n%s\n", out0);
+ vec_free(out0);
+ }
+
+ if (show_applied_info) {
+ u32 swi, j;
+ u8 * out0 = format(0, "");
+ out0 = format(out0, "Applied lookup entries for interfaces\n");
+
+ for(swi = 0; (swi < vec_len(am->input_applied_hash_acl_info_by_sw_if_index)) ||
+ (swi < vec_len(am->output_applied_hash_acl_info_by_sw_if_index)) ||
+ (swi < vec_len(am->input_hash_entry_vec_by_sw_if_index)) ||
+ (swi < vec_len(am->output_hash_entry_vec_by_sw_if_index)); swi++) {
+ if ((sw_if_index != ~0) && (sw_if_index != swi)) {
+ continue;
+ }
+ out0 = format(out0, "sw_if_index %d:\n", swi);
+ if (swi < vec_len(am->input_applied_hash_acl_info_by_sw_if_index)) {
+ applied_hash_acl_info_t *pal = &am->input_applied_hash_acl_info_by_sw_if_index[swi];
+ out0 = format(out0, " input lookup mask_type_index_bitmap: %U\n", format_bitmap_hex, pal->mask_type_index_bitmap);
+ }
+ if (swi < vec_len(am->input_hash_entry_vec_by_sw_if_index)) {
+ out0 = format(out0, " input lookup applied entries:\n");
+ for(j=0; j<vec_len(am->input_hash_entry_vec_by_sw_if_index[swi]); j++) {
+ applied_hash_ace_entry_t *pae = &am->input_hash_entry_vec_by_sw_if_index[swi][j];
+ out0 = format(out0, " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d\n",
+ j, pae->acl_index, pae->ace_index, pae->action, pae->hash_ace_info_index,
+ pae->next_applied_entry_index, pae->prev_applied_entry_index);
+ }
+ }
+
+ if (swi < vec_len(am->output_applied_hash_acl_info_by_sw_if_index)) {
+ applied_hash_acl_info_t *pal = &am->output_applied_hash_acl_info_by_sw_if_index[swi];
+ out0 = format(out0, " output lookup mask_type_index_bitmap: %U\n", format_bitmap_hex, pal->mask_type_index_bitmap);
+ }
+ if (swi < vec_len(am->output_hash_entry_vec_by_sw_if_index)) {
+ out0 = format(out0, " output lookup applied entries:\n");
+ for(j=0; j<vec_len(am->output_hash_entry_vec_by_sw_if_index[swi]); j++) {
+ applied_hash_ace_entry_t *pae = &am->output_hash_entry_vec_by_sw_if_index[swi][j];
+ out0 = format(out0, " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d\n",
+ j, pae->acl_index, pae->ace_index, pae->action, pae->hash_ace_info_index,
+ pae->next_applied_entry_index, pae->prev_applied_entry_index);
+ }
+ }
+
+ }
+ vlib_cli_output(vm, "\n%s\n", out0);
+ vec_free(out0);
+ }
+
+ if (show_bihash) {
+ show_hash_acl_hash(vm, am, show_bihash_verbose);
+ }
}
return error;
}
@@ -1870,7 +2197,7 @@ VLIB_CLI_COMMAND (aclplugin_set_command, static) = {
VLIB_CLI_COMMAND (aclplugin_show_command, static) = {
.path = "show acl-plugin",
- .short_help = "show acl-plugin sessions",
+ .short_help = "show acl-plugin {sessions|acl|interface|tables}",
.function = acl_show_aclplugin_fn,
};
/* *INDENT-ON* */
@@ -1940,6 +2267,10 @@ acl_init (vlib_main_t * vm)
am->l4_match_nonfirst_fragment = 1;
+ /* use the new fancy hash-based matching */
+ // NOT IMMEDIATELY
+ am->use_hash_acl_matching = 1;
+
return error;
}
diff --git a/src/plugins/acl/acl.h b/src/plugins/acl/acl.h
index 65bc9e74ead..c9f403e9d1b 100644
--- a/src/plugins/acl/acl.h
+++ b/src/plugins/acl/acl.h
@@ -24,8 +24,10 @@
#include <vppinfra/error.h>
#include <vppinfra/bitmap.h>
#include <vppinfra/elog.h>
+#include <vppinfra/bihash_48_8.h>
#include "bihash_40_8.h"
#include "fa_node.h"
+#include "hash_lookup_types.h"
#define ACL_PLUGIN_VERSION_MAJOR 1
#define ACL_PLUGIN_VERSION_MINOR 3
@@ -109,17 +111,45 @@ typedef struct
u32 l2_table_index;
} macip_acl_list_t;
+/*
+ * An element describing a particular configuration fo the mask,
+ * and how many times it has been used.
+ */
+typedef struct
+{
+ fa_5tuple_t mask;
+ u32 refcount;
+} ace_mask_type_entry_t;
+
typedef struct {
/* API message ID base */
u16 msg_id_base;
acl_list_t *acls; /* Pool of ACLs */
+ hash_acl_info_t *hash_acl_infos; /* corresponding hash matching housekeeping info */
+ clib_bihash_48_8_t acl_lookup_hash; /* ACL lookup hash table. */
+ int acl_lookup_hash_initialized;
+ applied_hash_ace_entry_t **input_hash_entry_vec_by_sw_if_index;
+ applied_hash_ace_entry_t **output_hash_entry_vec_by_sw_if_index;
+ applied_hash_acl_info_t *input_applied_hash_acl_info_by_sw_if_index;
+ applied_hash_acl_info_t *output_applied_hash_acl_info_by_sw_if_index;
+
macip_acl_list_t *macip_acls; /* Pool of MAC-IP ACLs */
/* ACLs associated with interfaces */
u32 **input_acl_vec_by_sw_if_index;
u32 **output_acl_vec_by_sw_if_index;
+ /* interfaces on which given ACLs are applied */
+ u32 **input_sw_if_index_vec_by_acl;
+ u32 **output_sw_if_index_vec_by_acl;
+
+ /* Do we use hash-based ACL matching or linear */
+ int use_hash_acl_matching;
+
+ /* a pool of all mask types present in all ACEs */
+ ace_mask_type_entry_t *ace_mask_type_pool;
+
/*
* Classify tables used to grab the packets for the ACL check,
* and serving as the 5-tuple session tables at the same time
diff --git a/src/plugins/acl/fa_node.c b/src/plugins/acl/fa_node.c
index 66621b6ba78..bfb2fc1eaca 100644
--- a/src/plugins/acl/fa_node.c
+++ b/src/plugins/acl/fa_node.c
@@ -26,6 +26,7 @@
#include <vppinfra/bihash_template.c>
#include "fa_node.h"
+#include "hash_lookup.h"
typedef struct
{
@@ -136,7 +137,7 @@ fa_acl_match_port (u16 port, u16 port_first, u16 port_last, int is_ip6)
}
int
-acl_match_5tuple (acl_main_t * am, u32 acl_index, fa_5tuple_t * pkt_5tuple,
+single_acl_match_5tuple (acl_main_t * am, u32 acl_index, fa_5tuple_t * pkt_5tuple,
int is_ip6, u8 * r_action, u32 * r_acl_match_p,
u32 * r_rule_match_p, u32 * trace_bitmap)
{
@@ -259,7 +260,7 @@ acl_match_5tuple (acl_main_t * am, u32 acl_index, fa_5tuple_t * pkt_5tuple,
}
static u8
-full_acl_match_5tuple (u32 sw_if_index, fa_5tuple_t * pkt_5tuple, int is_l2,
+linear_multi_acl_match_5tuple (u32 sw_if_index, fa_5tuple_t * pkt_5tuple, int is_l2,
int is_ip6, int is_input, u32 * acl_match_p,
u32 * rule_match_p, u32 * trace_bitmap)
{
@@ -284,7 +285,7 @@ full_acl_match_5tuple (u32 sw_if_index, fa_5tuple_t * pkt_5tuple, int is_l2,
clib_warning ("ACL_FA_NODE_DBG: Trying to match ACL: %d",
acl_vector[i]);
#endif
- if (acl_match_5tuple
+ if (single_acl_match_5tuple
(am, acl_vector[i], pkt_5tuple, is_ip6, &action,
acl_match_p, rule_match_p, trace_bitmap))
{
@@ -303,6 +304,21 @@ full_acl_match_5tuple (u32 sw_if_index, fa_5tuple_t * pkt_5tuple, int is_l2,
return 0;
}
+static u8
+multi_acl_match_5tuple (u32 sw_if_index, fa_5tuple_t * pkt_5tuple, int is_l2,
+ int is_ip6, int is_input, u32 * acl_match_p,
+ u32 * rule_match_p, u32 * trace_bitmap)
+{
+ acl_main_t *am = &acl_main;
+ if (am->use_hash_acl_matching) {
+ return hash_multi_acl_match_5tuple(sw_if_index, pkt_5tuple, is_l2, is_ip6,
+ is_input, acl_match_p, rule_match_p, trace_bitmap);
+ } else {
+ return linear_multi_acl_match_5tuple(sw_if_index, pkt_5tuple, is_l2, is_ip6,
+ is_input, acl_match_p, rule_match_p, trace_bitmap);
+ }
+}
+
static int
offset_within_packet (vlib_buffer_t * b0, int offset)
{
@@ -973,6 +989,10 @@ acl_fa_node_fn (vlib_main_t * vm,
acl_fill_5tuple (am, b0, is_ip6, is_input, is_l2_path, &fa_5tuple);
fa_5tuple.l4.lsb_of_sw_if_index = sw_if_index0 & 0xffff;
acl_make_5tuple_session_key (is_input, &fa_5tuple, &kv_sess);
+ fa_5tuple.pkt.sw_if_index = sw_if_index0;
+ fa_5tuple.pkt.is_ip6 = is_ip6;
+ fa_5tuple.pkt.is_input = is_input;
+ fa_5tuple.pkt.mask_type_index_lsb = ~0;
#ifdef FA_NODE_VERBOSE_DEBUG
clib_warning
("ACL_FA_NODE_DBG: session 5-tuple %016llx %016llx %016llx %016llx %016llx : %016llx",
@@ -1039,7 +1059,7 @@ acl_fa_node_fn (vlib_main_t * vm,
if (acl_check_needed)
{
action =
- full_acl_match_5tuple (sw_if_index0, &fa_5tuple, is_l2_path,
+ multi_acl_match_5tuple (sw_if_index0, &fa_5tuple, is_l2_path,
is_ip6, is_input, &match_acl_in_index,
&match_rule_index, &trace_bitmap);
error0 = action;
@@ -1590,6 +1610,17 @@ acl_fa_enable_disable (u32 sw_if_index, int is_input, int enable_disable)
}
}
+void
+show_fa_sessions_hash(vlib_main_t * vm, u32 verbose)
+{
+ acl_main_t *am = &acl_main;
+ if (am->fa_sessions_hash_is_initialized) {
+ vlib_cli_output(vm, "\nSession lookup hash table:\n%U\n\n",
+ BV (format_bihash), &am->fa_sessions_hash, verbose);
+ } else {
+ vlib_cli_output(vm, "\nSession lookup hash table is not allocated.\n\n");
+ }
+}
/* *INDENT-OFF* */
diff --git a/src/plugins/acl/fa_node.h b/src/plugins/acl/fa_node.h
index 671593a8c99..e4cd2ab57d8 100644
--- a/src/plugins/acl/fa_node.h
+++ b/src/plugins/acl/fa_node.h
@@ -22,12 +22,15 @@
typedef union {
u64 as_u64;
struct {
+ u32 sw_if_index;
+ u16 mask_type_index_lsb;
u8 tcp_flags;
u8 tcp_flags_valid:1;
u8 is_input:1;
u8 l4_valid:1;
u8 is_nonfirst_fragment:1;
- u8 flags_reserved:4;
+ u8 is_ip6:1;
+ u8 flags_reserved:3;
};
} fa_packet_info_t;
@@ -158,5 +161,7 @@ enum
void acl_fa_enable_disable(u32 sw_if_index, int is_input, int enable_disable);
+void show_fa_sessions_hash(vlib_main_t * vm, u32 verbose);
+
#endif
diff --git a/src/plugins/acl/hash_lookup.c b/src/plugins/acl/hash_lookup.c
new file mode 100644
index 00000000000..8e8c52740ee
--- /dev/null
+++ b/src/plugins/acl/hash_lookup.c
@@ -0,0 +1,742 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <stddef.h>
+#include <netinet/in.h>
+
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vlibsocket/api.h>
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vppinfra/error.h>
+#include <vnet/plugin/plugin.h>
+#include <acl/acl.h>
+#include <vppinfra/bihash_48_8.h>
+
+#include "hash_lookup.h"
+#include "hash_lookup_private.h"
+
+/*
+ * This returns true if there is indeed a match on the portranges.
+ * With all these levels of indirections, this is not going to be very fast,
+ * so, best use the individual ports or wildcard ports for performance.
+ */
+static int
+match_portranges(acl_main_t *am, fa_5tuple_t *match, u32 index)
+{
+ applied_hash_ace_entry_t **applied_hash_aces = match->pkt.is_input ? &am->input_hash_entry_vec_by_sw_if_index[match->pkt.sw_if_index] :
+ &am->output_hash_entry_vec_by_sw_if_index[match->pkt.sw_if_index];
+ applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[index]);
+
+ // hash_acl_info_t *ha = &am->hash_acl_infos[pae->acl_index];
+ acl_rule_t *r = &(am->acls[pae->acl_index].rules[pae->ace_index]);
+ DBG("PORTMATCH: %d <= %d <= %d && %d <= %d <= %d ?",
+ r->src_port_or_type_first, match->l4.port[0], r->src_port_or_type_last,
+ r->dst_port_or_code_first, match->l4.port[1], r->dst_port_or_code_last);
+
+ return ( ((r->src_port_or_type_first <= match->l4.port[0]) && r->src_port_or_type_last >= match->l4.port[0]) &&
+ ((r->dst_port_or_code_first <= match->l4.port[1]) && r->dst_port_or_code_last >= match->l4.port[1]) );
+}
+
+static u32
+multi_acl_match_get_applied_ace_index(acl_main_t *am, fa_5tuple_t *match)
+{
+ clib_bihash_kv_48_8_t kv;
+ clib_bihash_kv_48_8_t result;
+ fa_5tuple_t *kv_key = (fa_5tuple_t *)kv.key;
+ hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value;
+ u64 *pmatch = (u64 *)match;
+ u64 *pmask;
+ u64 *pkey;
+ int mask_type_index;
+ u32 curr_match_index = ~0;
+
+ u32 sw_if_index = match->pkt.sw_if_index;
+ u8 is_input = match->pkt.is_input;
+ applied_hash_ace_entry_t **applied_hash_aces = is_input ? &am->input_hash_entry_vec_by_sw_if_index[sw_if_index] :
+ &am->output_hash_entry_vec_by_sw_if_index[sw_if_index];
+ applied_hash_acl_info_t **applied_hash_acls = is_input ? &am->input_applied_hash_acl_info_by_sw_if_index :
+ &am->output_applied_hash_acl_info_by_sw_if_index;
+
+ DBG("TRYING TO MATCH: %016llx %016llx %016llx %016llx %016llx %016llx",
+ pmatch[0], pmatch[1], pmatch[2], pmatch[3], pmatch[4], pmatch[5]);
+
+ for(mask_type_index=0; mask_type_index < pool_len(am->ace_mask_type_pool); mask_type_index++) {
+ if (!clib_bitmap_get((*applied_hash_acls)[sw_if_index].mask_type_index_bitmap, mask_type_index)) {
+ /* This bit is not set. Avoid trying to match */
+ continue;
+ }
+ ace_mask_type_entry_t *mte = &am->ace_mask_type_pool[mask_type_index];
+ pmatch = (u64 *)match;
+ pmask = (u64 *)&mte->mask;
+ pkey = (u64 *)kv.key;
+ /*
+ * unrolling the below loop results in a noticeable performance increase.
+ int i;
+ for(i=0; i<6; i++) {
+ kv.key[i] = pmatch[i] & pmask[i];
+ }
+ */
+
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+
+ kv_key->pkt.mask_type_index_lsb = mask_type_index;
+ DBG(" KEY %3d: %016llx %016llx %016llx %016llx %016llx %016llx", mask_type_index,
+ kv.key[0], kv.key[1], kv.key[2], kv.key[3], kv.key[4], kv.key[5]);
+ int res = BV (clib_bihash_search) (&am->acl_lookup_hash, &kv, &result);
+ if (res == 0) {
+ DBG("ACL-MATCH! result_val: %016llx", result_val->as_u64);
+ if (result_val->applied_entry_index < curr_match_index) {
+ if (PREDICT_FALSE(result_val->need_portrange_check)) {
+ /*
+ * This is going to be slow, since we can have multiple superset
+ * entries for narrow-ish portranges, e.g.:
+ * 0..42 100..400, 230..60000,
+ * so we need to walk linearly and check if they match.
+ */
+
+ u32 curr_index = result_val->applied_entry_index;
+ while ((curr_index != ~0) && !match_portranges(am, match, curr_index)) {
+ /* while no match and there are more entries, walk... */
+ applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[curr_index]);
+ DBG("entry %d did not portmatch, advancing to %d", curr_index, pae->next_applied_entry_index);
+ curr_index = pae->next_applied_entry_index;
+ }
+ if (curr_index < curr_match_index) {
+ DBG("The index %d is the new candidate in portrange matches.", curr_index);
+ curr_match_index = result_val->applied_entry_index;
+ if (!result_val->shadowed) {
+ /* new result is known to not be shadowed, so no point to look up further */
+ break;
+ }
+ } else {
+ DBG("Curr portmatch index %d is too big vs. current matched one %d", curr_index, curr_match_index);
+ }
+ } else {
+ /* The usual path is here. Found an entry in front of the current candiate - so it's a new one */
+ DBG("This match is the new candidate");
+ curr_match_index = result_val->applied_entry_index;
+ if (!result_val->shadowed) {
+ /* new result is known to not be shadowed, so no point to look up further */
+ break;
+ }
+ }
+ }
+ }
+ }
+ DBG("MATCH-RESULT: %d", curr_match_index);
+ return curr_match_index;
+}
+
+static void
+hashtable_add_del(acl_main_t *am, clib_bihash_kv_48_8_t *kv, int is_add)
+{
+ DBG("HASH ADD/DEL: %016llx %016llx %016llx %016llx %016llx %016llx add %d",
+ kv->key[0], kv->key[1], kv->key[2],
+ kv->key[3], kv->key[4], kv->key[5], is_add);
+ BV (clib_bihash_add_del) (&am->acl_lookup_hash, kv, is_add);
+}
+
+static void
+fill_applied_hash_ace_kv(acl_main_t *am,
+ applied_hash_ace_entry_t **applied_hash_aces,
+ u32 sw_if_index, u8 is_input,
+ u32 new_index, clib_bihash_kv_48_8_t *kv)
+{
+ fa_5tuple_t *kv_key = (fa_5tuple_t *)kv->key;
+ hash_acl_lookup_value_t *kv_val = (hash_acl_lookup_value_t *)&kv->value;
+ applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[new_index]);
+ hash_acl_info_t *ha = &am->hash_acl_infos[pae->acl_index];
+
+ memcpy(kv_key, &ha->rules[pae->hash_ace_info_index].match, sizeof(*kv_key));
+ /* initialize the sw_if_index and direction */
+ kv_key->pkt.sw_if_index = sw_if_index;
+ kv_key->pkt.is_input = is_input;
+ kv_val->as_u64 = 0;
+ kv_val->applied_entry_index = new_index;
+ kv_val->need_portrange_check = ha->rules[pae->hash_ace_info_index].src_portrange_not_powerof2 ||
+ ha->rules[pae->hash_ace_info_index].dst_portrange_not_powerof2;
+ /* by default assume all values are shadowed -> check all mask types */
+ kv_val->shadowed = 1;
+}
+
+static void
+add_del_hashtable_entry(acl_main_t *am,
+ u32 sw_if_index, u8 is_input,
+ applied_hash_ace_entry_t **applied_hash_aces,
+ u32 index, int is_add)
+{
+ clib_bihash_kv_48_8_t kv;
+
+ fill_applied_hash_ace_kv(am, applied_hash_aces, sw_if_index, is_input, index, &kv);
+ hashtable_add_del(am, &kv, is_add);
+}
+
+
+
+static void
+activate_applied_ace_hash_entry(acl_main_t *am,
+ u32 sw_if_index, u8 is_input,
+ applied_hash_ace_entry_t **applied_hash_aces,
+ u32 new_index)
+{
+ clib_bihash_kv_48_8_t kv;
+ applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[new_index]);
+
+ fill_applied_hash_ace_kv(am, applied_hash_aces, sw_if_index, is_input, new_index, &kv);
+
+ DBG("APPLY ADD KY: %016llx %016llx %016llx %016llx %016llx %016llx",
+ kv.key[0], kv.key[1], kv.key[2],
+ kv.key[3], kv.key[4], kv.key[5]);
+
+ clib_bihash_kv_48_8_t result;
+ hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value;
+ int res = BV (clib_bihash_search) (&am->acl_lookup_hash, &kv, &result);
+ if (res == 0) {
+ /* There already exists an entry or more. Append at the end. */
+ u32 existing_index = result_val->applied_entry_index;
+ DBG("A key already exists, with applied entry index: %d", existing_index);
+ ASSERT(existing_index != ~0);
+ applied_hash_ace_entry_t *existing_pae = &((*applied_hash_aces)[existing_index]);
+ /* walk the list to the last element */
+ while (existing_pae->next_applied_entry_index != ~0) {
+ if (existing_index == existing_pae->next_applied_entry_index) {
+ DBG("existing and next index of entry %d are the same, abort", existing_index);
+ ASSERT(existing_index != existing_pae->next_applied_entry_index);
+ }
+ existing_index = existing_pae->next_applied_entry_index;
+ existing_pae = &((*applied_hash_aces)[existing_index]);
+ DBG("...advance to chained entry index: %d", existing_index);
+ }
+ /* link ourseves in */
+ existing_pae->next_applied_entry_index = new_index;
+ pae->prev_applied_entry_index = existing_index;
+ } else {
+ /* It's the very first entry */
+ hashtable_add_del(am, &kv, 1);
+ }
+}
+
+static void
+applied_hash_entries_analyze(acl_main_t *am, applied_hash_ace_entry_t **applied_hash_aces)
+{
+ /*
+ * Go over the rules and check which ones are shadowed and which aren't.
+ * Naive approach: try to match the match value from every ACE as if it
+ * was a live packet, and see if the resulting match happens earlier in the list.
+ * if it does not match or it is later in the ACL - then the entry is not shadowed.
+ *
+ * This approach fails, an example:
+ * deny tcp 2001:db8::/32 2001:db8::/32
+ * permit ip 2001:db8::1/128 2001:db8::2/128
+ */
+}
+
+void
+hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index)
+{
+ int i;
+
+ DBG("HASH ACL apply: sw_if_index %d is_input %d acl %d", sw_if_index, is_input, acl_index);
+ u32 *acl_vec = is_input ? am->input_acl_vec_by_sw_if_index[sw_if_index] :
+ am->output_acl_vec_by_sw_if_index[sw_if_index];
+ if (is_input) {
+ vec_validate(am->input_hash_entry_vec_by_sw_if_index, sw_if_index);
+ } else {
+ vec_validate(am->output_hash_entry_vec_by_sw_if_index, sw_if_index);
+ }
+ vec_validate(am->hash_acl_infos, acl_index);
+ applied_hash_ace_entry_t **applied_hash_aces = is_input ? &am->input_hash_entry_vec_by_sw_if_index[sw_if_index] :
+ &am->output_hash_entry_vec_by_sw_if_index[sw_if_index];
+ u32 order_index = vec_search(acl_vec, acl_index);
+ hash_acl_info_t *ha = &am->hash_acl_infos[acl_index];
+ ASSERT(order_index != ~0);
+
+ if (!am->acl_lookup_hash_initialized) {
+ BV (clib_bihash_init) (&am->acl_lookup_hash, "ACL plugin rule lookup bihash",
+ 65536, 2 << 25);
+ am->acl_lookup_hash_initialized = 1;
+ }
+ int base_offset = vec_len(*applied_hash_aces);
+
+ /* Update the bitmap of the mask types with which the lookup
+ needs to happen for the ACLs applied to this sw_if_index */
+ applied_hash_acl_info_t **applied_hash_acls = is_input ? &am->input_applied_hash_acl_info_by_sw_if_index :
+ &am->output_applied_hash_acl_info_by_sw_if_index;
+ vec_validate((*applied_hash_acls), sw_if_index);
+ applied_hash_acl_info_t *pal = &(*applied_hash_acls)[sw_if_index];
+ pal->mask_type_index_bitmap = clib_bitmap_or(pal->mask_type_index_bitmap,
+ ha->mask_type_index_bitmap);
+ /*
+ * if the applied ACL is empty, the current code will cause a
+ * different behavior compared to current linear search: an empty ACL will
+ * simply fallthrough to the next ACL, or the default deny in the end.
+ *
+ * This is not a problem, because after vpp-dev discussion,
+ * the consensus was it should not be possible to apply the non-existent
+ * ACL, so the change adding this code also takes care of that.
+ */
+
+ /* expand the applied aces vector by the necessary amount */
+ vec_resize((*applied_hash_aces), vec_len(ha->rules));
+
+ /* add the rules from the ACL to the hash table for lookup and append to the vector*/
+ for(i=0; i < vec_len(ha->rules); i++) {
+ u32 new_index = base_offset + i;
+ applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[new_index]);
+ pae->acl_index = acl_index;
+ pae->ace_index = ha->rules[i].ace_index;
+ pae->action = ha->rules[i].action;
+ pae->hash_ace_info_index = i;
+ /* we might link it in later */
+ pae->next_applied_entry_index = ~0;
+ pae->prev_applied_entry_index = ~0;
+ activate_applied_ace_hash_entry(am, sw_if_index, is_input, applied_hash_aces, new_index);
+ }
+ applied_hash_entries_analyze(am, applied_hash_aces);
+}
+
+static void
+move_applied_ace_hash_entry(acl_main_t *am,
+ u32 sw_if_index, u8 is_input,
+ applied_hash_ace_entry_t **applied_hash_aces,
+ u32 old_index, u32 new_index)
+{
+
+ /* move the entry */
+ (*applied_hash_aces)[new_index] = (*applied_hash_aces)[old_index];
+
+ /* update the linkage and hash table if necessary */
+ applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[old_index]);
+
+ if (pae->prev_applied_entry_index != ~0) {
+ applied_hash_ace_entry_t *prev_pae = &((*applied_hash_aces)[pae->prev_applied_entry_index]);
+ ASSERT(prev_pae->next_applied_entry_index == old_index);
+ prev_pae->next_applied_entry_index = new_index;
+ } else {
+ /* first entry - so the hash points to it, update */
+ add_del_hashtable_entry(am, sw_if_index, is_input,
+ applied_hash_aces, new_index, 1);
+ }
+ if (pae->next_applied_entry_index != ~0) {
+ applied_hash_ace_entry_t *next_pae = &((*applied_hash_aces)[pae->next_applied_entry_index]);
+ ASSERT(next_pae->prev_applied_entry_index == old_index);
+ next_pae->prev_applied_entry_index = new_index;
+ }
+ /* invalidate the old entry */
+ pae->prev_applied_entry_index = ~0;
+ pae->next_applied_entry_index = ~0;
+}
+
+static void
+deactivate_applied_ace_hash_entry(acl_main_t *am,
+ u32 sw_if_index, u8 is_input,
+ applied_hash_ace_entry_t **applied_hash_aces,
+ u32 old_index)
+{
+ applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[old_index]);
+
+ if (pae->next_applied_entry_index != ~0) {
+ applied_hash_ace_entry_t *next_pae = &((*applied_hash_aces)[pae->next_applied_entry_index]);
+ ASSERT(next_pae->prev_applied_entry_index == old_index);
+ next_pae->prev_applied_entry_index = pae->prev_applied_entry_index;
+ }
+
+ if (pae->prev_applied_entry_index != ~0) {
+ applied_hash_ace_entry_t *prev_pae = &((*applied_hash_aces)[pae->prev_applied_entry_index]);
+ ASSERT(prev_pae->next_applied_entry_index == old_index);
+ prev_pae->next_applied_entry_index = pae->next_applied_entry_index;
+ } else {
+ /* It was the first entry. We need either to reset the hash entry or delete it */
+ if (pae->next_applied_entry_index != ~0) {
+ add_del_hashtable_entry(am, sw_if_index, is_input,
+ applied_hash_aces, pae->next_applied_entry_index, 1);
+ } else {
+ /* no next entry, so just delete the entry in the hash table */
+ add_del_hashtable_entry(am, sw_if_index, is_input,
+ applied_hash_aces, old_index, 0);
+ }
+ }
+}
+
+
+static void
+hash_acl_build_applied_lookup_bitmap(acl_main_t *am, u32 sw_if_index, u8 is_input)
+{
+ int i;
+ uword *new_lookup_bitmap = 0;
+ u32 **applied_acls = is_input ? &am->input_acl_vec_by_sw_if_index[sw_if_index] :
+ &am->output_acl_vec_by_sw_if_index[sw_if_index];
+ applied_hash_acl_info_t **applied_hash_acls = is_input ? &am->input_applied_hash_acl_info_by_sw_if_index :
+ &am->output_applied_hash_acl_info_by_sw_if_index;
+ applied_hash_acl_info_t *pal = &(*applied_hash_acls)[sw_if_index];
+ for(i=0; i < vec_len(*applied_acls); i++) {
+ u32 a_acl_index = (*applied_acls)[i];
+ hash_acl_info_t *ha = &am->hash_acl_infos[a_acl_index];
+ new_lookup_bitmap = clib_bitmap_or(new_lookup_bitmap,
+ ha->mask_type_index_bitmap);
+ }
+ uword *old_lookup_bitmap = pal->mask_type_index_bitmap;
+ pal->mask_type_index_bitmap = new_lookup_bitmap;
+ clib_bitmap_free(old_lookup_bitmap);
+}
+
+void
+hash_acl_unapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index)
+{
+ int i;
+
+ DBG("HASH ACL unapply: sw_if_index %d is_input %d acl %d", sw_if_index, is_input, acl_index);
+ hash_acl_info_t *ha = &am->hash_acl_infos[acl_index];
+ applied_hash_ace_entry_t **applied_hash_aces = is_input ? &am->input_hash_entry_vec_by_sw_if_index[sw_if_index] :
+ &am->output_hash_entry_vec_by_sw_if_index[sw_if_index];
+
+ for(i=0; i < vec_len((*applied_hash_aces)); i++) {
+ if ((*applied_hash_aces)[i].acl_index == acl_index) {
+ DBG("Found applied ACL#%d at applied index %d", acl_index, i);
+ break;
+ }
+ }
+ if (vec_len((*applied_hash_aces)) <= i) {
+ DBG("Did not find applied ACL#%d at sw_if_index %d", acl_index, sw_if_index);
+ /* we went all the way without finding any entries. Probably a list was empty. */
+ return;
+ }
+ int base_offset = i;
+ int tail_offset = base_offset + vec_len(ha->rules);
+ int tail_len = vec_len((*applied_hash_aces)) - tail_offset;
+ DBG("base_offset: %d, tail_offset: %d, tail_len: %d", base_offset, tail_offset, tail_len);
+
+ for(i=0; i < vec_len(ha->rules); i ++) {
+ DBG("UNAPPLY DEACTIVATE: sw_if_index %d is_input %d, applied index %d", sw_if_index, is_input, base_offset + i);
+ deactivate_applied_ace_hash_entry(am, sw_if_index, is_input,
+ applied_hash_aces, base_offset + i);
+ }
+ for(i=0; i < tail_len; i ++) {
+ /* move the entry at tail offset to base offset */
+ /* that is, from (tail_offset+i) -> (base_offset+i) */
+ DBG("UNAPPLY MOVE: sw_if_index %d is_input %d, applied index %d ->", sw_if_index, is_input, tail_offset+i, base_offset + i);
+ move_applied_ace_hash_entry(am, sw_if_index, is_input, applied_hash_aces, tail_offset + i, base_offset + i);
+ }
+ /* trim the end of the vector */
+ _vec_len((*applied_hash_aces)) -= vec_len(ha->rules);
+
+ applied_hash_entries_analyze(am, applied_hash_aces);
+
+ /* After deletion we might not need some of the mask-types anymore... */
+ hash_acl_build_applied_lookup_bitmap(am, sw_if_index, is_input);
+}
+
+/*
+ * Create the applied ACEs and update the hash table,
+ * taking into account that the ACL may not be the last
+ * in the vector of applied ACLs.
+ *
+ * For now, walk from the end of the vector and unapply the ACLs,
+ * then apply the one in question and reapply the rest.
+ */
+
+void
+hash_acl_reapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index)
+{
+ u32 **applied_acls = is_input ? &am->input_acl_vec_by_sw_if_index[sw_if_index] :
+ &am->output_acl_vec_by_sw_if_index[sw_if_index];
+ int i;
+ int start_index = vec_search((*applied_acls), acl_index);
+ /*
+ * This function is called after we find out the sw_if_index where ACL is applied.
+ * If the by-sw_if_index vector does not have the ACL#, then it's a bug.
+ */
+ ASSERT(start_index < vec_len(*applied_acls));
+
+ /* unapply all the ACLs till the current one */
+ for(i = vec_len(*applied_acls) - 1; i >= start_index; i--) {
+ hash_acl_unapply(am, sw_if_index, is_input, (*applied_acls)[i]);
+ }
+ for(i = start_index; i < vec_len(*applied_acls); i++) {
+ hash_acl_apply(am, sw_if_index, is_input, (*applied_acls)[i]);
+ }
+}
+
+static void
+make_address_mask(ip46_address_t *addr, u8 is_ipv6, u8 prefix_len)
+{
+ if (is_ipv6) {
+ ip6_address_mask_from_width(&addr->ip6, prefix_len);
+ } else {
+ /* FIXME: this may not be correct way */
+ ip6_address_mask_from_width(&addr->ip6, prefix_len + 3*32);
+ ip46_address_mask_ip4(addr);
+ }
+}
+
+static u8
+make_port_mask(u16 *portmask, u16 port_first, u16 port_last)
+{
+ if (port_first == port_last) {
+ *portmask = 0xffff;
+ /* single port is representable by masked value */
+ return 0;
+ }
+ if ((port_first == 0) && (port_last == 65535)) {
+ *portmask = 0;
+ /* wildcard port is representable by a masked value */
+ return 0;
+ }
+
+ /*
+ * For now match all the ports, later
+ * here might be a better optimization which would
+ * pick out bitmaskable portranges.
+ *
+ * However, adding a new mask type potentially
+ * adds a per-packet extra lookup, so the benefit is not clear.
+ */
+ *portmask = 0;
+ /* This port range can't be represented via bitmask exactly. */
+ return 1;
+}
+
+static void
+make_mask_and_match_from_rule(fa_5tuple_t *mask, acl_rule_t *r, hash_ace_info_t *hi, int match_nonfirst_fragment)
+{
+ memset(mask, 0, sizeof(*mask));
+ memset(&hi->match, 0, sizeof(hi->match));
+ hi->action = r->is_permit;
+
+ /* we will need to be matching based on sw_if_index, direction, and mask_type_index when applied */
+ mask->pkt.sw_if_index = ~0;
+ mask->pkt.is_input = 1;
+ /* we will assign the match of mask_type_index later when we find it*/
+ mask->pkt.mask_type_index_lsb = ~0;
+
+ mask->pkt.is_ip6 = 1;
+ hi->match.pkt.is_ip6 = r->is_ipv6;
+
+ make_address_mask(&mask->addr[0], r->is_ipv6, r->src_prefixlen);
+ hi->match.addr[0] = r->src;
+ make_address_mask(&mask->addr[1], r->is_ipv6, r->dst_prefixlen);
+ hi->match.addr[1] = r->dst;
+
+ if (r->proto != 0) {
+ mask->l4.proto = ~0; /* L4 proto needs to be matched */
+ hi->match.l4.proto = r->proto;
+ if (match_nonfirst_fragment) {
+ /* match the non-first fragments only */
+ mask->pkt.is_nonfirst_fragment = 1;
+ hi->match.pkt.is_nonfirst_fragment = 1;
+ } else {
+ /* Calculate the src/dst port masks and make the src/dst port matches accordingly */
+ hi->src_portrange_not_powerof2 = make_port_mask(&mask->l4.port[0], r->src_port_or_type_first, r->src_port_or_type_last);
+ hi->match.l4.port[0] = r->src_port_or_type_first & mask->l4.port[0];
+ hi->dst_portrange_not_powerof2 = make_port_mask(&mask->l4.port[1], r->dst_port_or_code_first, r->dst_port_or_code_last);
+ hi->match.l4.port[1] = r->dst_port_or_code_first & mask->l4.port[1];
+ /* L4 info must be valid in order to match */
+ mask->pkt.l4_valid = 1;
+ hi->match.pkt.l4_valid = 1;
+ /* And we must set the mask to check that it is an initial fragment */
+ mask->pkt.is_nonfirst_fragment = 1;
+ hi->match.pkt.is_nonfirst_fragment = 0;
+ if ((r->proto == IPPROTO_TCP) && (r->tcp_flags_mask != 0)) {
+ /* if we want to match on TCP flags, they must be masked off as well */
+ mask->pkt.tcp_flags = r->tcp_flags_mask;
+ hi->match.pkt.tcp_flags = r->tcp_flags_value;
+ /* and the flags need to be present within the packet being matched */
+ mask->pkt.tcp_flags_valid = 1;
+ hi->match.pkt.tcp_flags_valid = 1;
+ }
+ }
+ }
+ /* Sanitize the mask and the match */
+ u64 *pmask = (u64 *)mask;
+ u64 *pmatch = (u64 *)&hi->match;
+ int j;
+ for(j=0; j<6; j++) {
+ pmatch[j] = pmatch[j] & pmask[j];
+ }
+}
+
+static u32
+find_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
+{
+ ace_mask_type_entry_t *mte;
+ /* *INDENT-OFF* */
+ pool_foreach(mte, am->ace_mask_type_pool,
+ ({
+ if(memcmp(&mte->mask, mask, sizeof(*mask)) == 0)
+ return (mte - am->ace_mask_type_pool);
+ }));
+ /* *INDENT-ON* */
+ return ~0;
+}
+
+static u32
+assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
+{
+ u32 mask_type_index = find_mask_type_index(am, mask);
+ ace_mask_type_entry_t *mte;
+ if(~0 == mask_type_index) {
+ pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES);
+ mask_type_index = mte - am->ace_mask_type_pool;
+ clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
+ mte->refcount = 0;
+ /*
+ * We can use only 16 bits, since in the match there is only u16 field.
+ * Realistically, once you go to 64K of mask types, it is a huge
+ * problem anyway, so we might as well stop half way.
+ */
+ ASSERT(mask_type_index < 32768);
+ }
+ mte = am->ace_mask_type_pool + mask_type_index;
+ mte->refcount++;
+ return mask_type_index;
+}
+
+static void
+release_mask_type_index(acl_main_t *am, u32 mask_type_index)
+{
+ ace_mask_type_entry_t *mte = &am->ace_mask_type_pool[mask_type_index];
+ mte->refcount--;
+ if (mte->refcount == 0) {
+ /* we are not using this entry anymore */
+ pool_put(am->ace_mask_type_pool, mte);
+ }
+}
+
+void hash_acl_add(acl_main_t *am, int acl_index)
+{
+ DBG("HASH ACL add : %d", acl_index);
+ int i;
+ acl_list_t *a = &am->acls[acl_index];
+ vec_validate(am->hash_acl_infos, acl_index);
+ hash_acl_info_t *ha = &am->hash_acl_infos[acl_index];
+ memset(ha, 0, sizeof(*ha));
+
+ /* walk the newly added ACL entries and ensure that for each of them there
+ is a mask type, increment a reference count for that mask type */
+ for(i=0; i < a->count; i++) {
+ hash_ace_info_t ace_info;
+ fa_5tuple_t mask;
+ memset(&ace_info, 0, sizeof(ace_info));
+ ace_info.acl_index = acl_index;
+ ace_info.ace_index = i;
+
+ make_mask_and_match_from_rule(&mask, &a->rules[i], &ace_info, 0);
+ ace_info.mask_type_index = assign_mask_type_index(am, &mask);
+ /* assign the mask type index for matching itself */
+ ace_info.match.pkt.mask_type_index_lsb = ace_info.mask_type_index;
+ DBG("ACE: %d mask_type_index: %d", i, ace_info.mask_type_index);
+ /* Ensure a given index is set in the mask type index bitmap for this ACL */
+ ha->mask_type_index_bitmap = clib_bitmap_set(ha->mask_type_index_bitmap, ace_info.mask_type_index, 1);
+ vec_add1(ha->rules, ace_info);
+ if (am->l4_match_nonfirst_fragment) {
+ /* add the second rule which matches the noninitial fragments with the respective mask */
+ make_mask_and_match_from_rule(&mask, &a->rules[i], &ace_info, 1);
+ ace_info.mask_type_index = assign_mask_type_index(am, &mask);
+ ace_info.match.pkt.mask_type_index_lsb = ace_info.mask_type_index;
+ DBG("ACE: %d (non-initial frags) mask_type_index: %d", i, ace_info.mask_type_index);
+ /* Ensure a given index is set in the mask type index bitmap for this ACL */
+ ha->mask_type_index_bitmap = clib_bitmap_set(ha->mask_type_index_bitmap, ace_info.mask_type_index, 1);
+ vec_add1(ha->rules, ace_info);
+ }
+ }
+ /*
+ * if an ACL is applied somewhere, fill the corresponding lookup data structures.
+ * We need to take care if the ACL is not the last one in the vector of ACLs applied to the interface.
+ */
+ if (acl_index < vec_len(am->input_sw_if_index_vec_by_acl)) {
+ u32 *sw_if_index;
+ vec_foreach(sw_if_index, am->input_sw_if_index_vec_by_acl[acl_index]) {
+ hash_acl_reapply(am, *sw_if_index, 1, acl_index);
+ }
+ }
+ if (acl_index < vec_len(am->output_sw_if_index_vec_by_acl)) {
+ u32 *sw_if_index;
+ vec_foreach(sw_if_index, am->output_sw_if_index_vec_by_acl[acl_index]) {
+ hash_acl_reapply(am, *sw_if_index, 0, acl_index);
+ }
+ }
+}
+
+void hash_acl_delete(acl_main_t *am, int acl_index)
+{
+ DBG("HASH ACL delete : %d", acl_index);
+ /*
+ * If the ACL is applied somewhere, remove the references of it (call hash_acl_unapply)
+ * this is a different behavior from the linear lookup where an empty ACL is "deny all",
+ *
+ * However, following vpp-dev discussion the ACL that is referenced elsewhere
+ * should not be possible to delete, and the change adding this also adds
+ * the safeguards to that respect, so this is not a problem.
+ */
+ if (acl_index < vec_len(am->input_sw_if_index_vec_by_acl)) {
+ u32 *sw_if_index;
+ vec_foreach(sw_if_index, am->input_sw_if_index_vec_by_acl[acl_index]) {
+ hash_acl_unapply(am, *sw_if_index, 1, acl_index);
+ }
+ }
+ if (acl_index < vec_len(am->output_sw_if_index_vec_by_acl)) {
+ u32 *sw_if_index;
+ vec_foreach(sw_if_index, am->output_sw_if_index_vec_by_acl[acl_index]) {
+ hash_acl_unapply(am, *sw_if_index, 0, acl_index);
+ }
+ }
+
+ /* walk the mask types for the ACL about-to-be-deleted, and decrease
+ * the reference count, possibly freeing up some of them */
+ int i;
+ hash_acl_info_t *ha = &am->hash_acl_infos[acl_index];
+ for(i=0; i < vec_len(ha->rules); i++) {
+ release_mask_type_index(am, ha->rules[i].mask_type_index);
+ }
+ clib_bitmap_free(ha->mask_type_index_bitmap);
+ vec_free(ha->rules);
+}
+
+u8
+hash_multi_acl_match_5tuple (u32 sw_if_index, fa_5tuple_t * pkt_5tuple, int is_l2,
+ int is_ip6, int is_input, u32 * acl_match_p,
+ u32 * rule_match_p, u32 * trace_bitmap)
+{
+ acl_main_t *am = &acl_main;
+ applied_hash_ace_entry_t **applied_hash_aces = is_input ? &am->input_hash_entry_vec_by_sw_if_index[sw_if_index] :
+ &am->output_hash_entry_vec_by_sw_if_index[sw_if_index];
+ u32 match_index = multi_acl_match_get_applied_ace_index(am, pkt_5tuple);
+ if (match_index < vec_len((*applied_hash_aces))) {
+ applied_hash_ace_entry_t *pae = &((*applied_hash_aces)[match_index]);
+ *acl_match_p = pae->acl_index;
+ *rule_match_p = pae->ace_index;
+ return pae->action;
+ }
+ return 0;
+}
+
+
+void
+show_hash_acl_hash (vlib_main_t * vm, acl_main_t *am, u32 verbose)
+{
+ vlib_cli_output(vm, "\nACL lookup hash table:\n%U\n",
+ BV (format_bihash), &am->acl_lookup_hash, verbose);
+}
diff --git a/src/plugins/acl/hash_lookup.h b/src/plugins/acl/hash_lookup.h
new file mode 100644
index 00000000000..c5913624f81
--- /dev/null
+++ b/src/plugins/acl/hash_lookup.h
@@ -0,0 +1,60 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef _ACL_HASH_LOOKUP_H_
+#define _ACL_HASH_LOOKUP_H_
+
+#include <stddef.h>
+#include "acl.h"
+
+/*
+ * Do the necessary to logically apply the ACL to the existing vector of ACLs looked up
+ * during the packet processing
+ */
+
+void hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index);
+
+/* Remove the ACL from the packet processing lookups on a given interface */
+
+void hash_acl_unapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index);
+
+/*
+ * Add an ACL or delete an ACL. ACL may already have been referenced elsewhere,
+ * so potentially we also need to do the work to enable the lookups.
+ */
+
+void hash_acl_add(acl_main_t *am, int acl_index);
+void hash_acl_delete(acl_main_t *am, int acl_index);
+
+/*
+ * Do the work required to match a given 5-tuple from the packet,
+ * and return the action as well as populate the values pointed
+ * to by the *_match_p pointers and maybe trace_bitmap.
+ */
+
+u8
+hash_multi_acl_match_5tuple (u32 sw_if_index, fa_5tuple_t * pkt_5tuple, int is_l2,
+ int is_ip6, int is_input, u32 * acl_match_p,
+ u32 * rule_match_p, u32 * trace_bitmap);
+
+
+/*
+ * The debug function to show the contents of the ACL lookup hash
+ */
+void show_hash_acl_hash(vlib_main_t * vm, acl_main_t *am, u32 verbose);
+
+#endif
diff --git a/src/plugins/acl/hash_lookup.md b/src/plugins/acl/hash_lookup.md
new file mode 100644
index 00000000000..95524643e25
--- /dev/null
+++ b/src/plugins/acl/hash_lookup.md
@@ -0,0 +1,241 @@
+ACL plugin constant-time lookup design
+======================================
+
+The initial implementation of ACL plugin performs a trivial for() cycle,
+going through the assigned ACLs on a per-packet basis. This is not very
+efficient, even if for very short ACLs due to its simplicity it can beat
+more advanced methods.
+
+However, to cover the case of longer ACLs with acceptable performance,
+we need to have a better way of matching. This write-up proposes
+a mechanism to make a lookup from O(M) where M is number of entries
+to O(N) where N is number of different mask combinations.
+
+Preparation of ACL(s)
+---------------------
+
+The ACL plugin will maintain a global list of "mask types", i.e. the specific
+configurations of "do not care" bits within the ACEs.
+Upon the creation of a new ACL, a pass will be made through all the
+ACEs, to assign and possibly allocate the "mask type number".
+
+Each ACL has a structure *hash_acl_info_t* representing the "hash-based"
+parts of information related to that ACL, primarily the array of
+*hash_ace_info_t* structures - each of the members of that array
+corresponding to one of the rules (ACEs) in the original ACL,
+for this they have a pair of *(acl_index, ace_index)* to keep track,
+predominantly for the debugging.
+
+Why do we need a whole separate structure, and are not adding new fields
+to the existing rile structure ? First, encapsulation, to minimize
+the pollution of the main ACL code with the hash-based lookup artifacts.
+
+Second, one rule may correspond to more than one "hash-based" ACE.
+In fact, most of the rules do correspond to two of those. Why ?
+
+Consider that the current ACL lookup logic is that if a packet
+is not the initial fragment, and there is an L4 entry acting on the packet,
+the comparison will be made only on the L4 protocol field value rather
+than on the protocol and port values. This beaviour is governed by
+*l4_match_nonfirst_fragment* flag in the *acl_main*, and was needed to
+maintain the compatibility with the existing software switch implementation.
+
+While for the sequential check in *single_acl_match_5tuple()*
+it is very easy to implement by just breaking out at the right moment,
+in case of hash-based matching this cost us two checks:
+one on full 5-tuple and the flag *pkt.is_nonfirst_fragment* being zero,
+the second on 3-tuple and the flag *pkt.is_nonfirst_fragment* being one,
+with the second check triggered by the *acl_main.l4_match_nonfirst_fragment*
+setting being the default 1. This dictates the necessity of having a "match"
+field in a given *hash_ace_info_t* element, which would reflect the value
+we are supposed to match after applying the mask.
+
+There can be other circumstances when it might be beneficial to expand
+the given rule in the original ACL into multiple - for example, as an
+optimization within the port range handling for small port ranges
+(this is not done as of the time of writing).
+
+Assigning ACLs to an interface
+------------------------------
+
+Once the ACL list is assigned to an interface, or, rather, a new ACL
+is added to the list of the existing ACLs applied to the interface,
+we need to update the bihash accelerating the lookup.
+
+All the entries for the lookups are stored within a single *48_8* bihash,
+which captures the 5-tuple from the packet as well as the miscellaneous
+per-packet information flags, e.g. *l4_valid*, *is_non_first_fragment*,
+and so on. To facilitate the use of the single bihash by all the interfaces,
+the *is_ip6*, *is_input*, *sw_if_index* are part of the key,
+as well as *mask_type_index* - the latter being necessary because
+there can be entries with the same value but different masks, e.g.:
+`permit ::/0, permit::/128`.
+
+At the moment of an ACL being applied to an interface, we need to
+walk the list of *hash_ace_info_t* entries corresponding to that ACL,
+and update the bihash with the keys corresponding to the match
+values in these entries.
+
+The value of the hash match contains the index into a per-*sw_if_index* vector
+of *applied_ace_hash_entry_t* elements, as well as a couple of flags:
+*shadowed* (optimization: if this flag on a matched entry is zero, means
+we can stop the lookup early and declare a match - see below),
+and *need_portrange_check* - meaning that what matched was a superset
+of the actual match, and we need to perform an extra check.
+
+Also, upon insertion, we must keep in mind there can be
+multiple *applied_ace_hash_entry_t* for the same key and must keep
+a list of those. This is necessary to incrementally apply/unapply
+the ACLs as part of the ACL vector: say, two ACLs have
+"permit 2001:db8::1/128 any" - we should be able to retain the entry
+for the second ACL even if we have deleted the first one.
+Also, in case there are two entries with the same key but
+different port ranges, say 0..42 and 142..65535 - we need
+to be able to sequentially match on those if we decide not
+to expand them into individual port-specific entries.
+
+Per-packet lookup
+-----------------
+
+The simple single-packet lookup is defined in
+*multi_acl_match_get_applied_ace_index*, which returns the index
+of the applied hash ACE if there was a match, or ~0 if there wasn't.
+
+The future optimized per-packet lookup may be batched in three phases:
+
+1. Prepare the keys in the per-worker vector by doing logical AND of
+ original 5-tuple record with the elements of the mask vector.
+2. Lookup the keys in the bihash in a batch manner, collecting the
+ result with lowest u64 (acl index within vector, ACE index) from
+ the hash lookup value, and performing the list walk if necessary
+ (for portranges)
+3. Take the action from the ACL record as defined by (ACL#, ACE#) from the
+ resulting lookup winner, or, if no match found, then perform default deny.
+
+Shadowed/independent/redundant ACEs
+------------------------------------
+
+During the phase of combining multiple ACLs into one rulebase, when they
+are applied to interface, we also can perform several optimizations.
+
+If a given ACE is a strict subset of another ACE located up in the linear
+search order, we can ignore this ACE completely - because by definition
+it will never match. We will call such an ACE *redundant*. Here is an example:
+
+```
+permit 2001:db8:1::/48 2001:db8:2::/48 (B)
+deny 2001:d8b:1:1::/64 2001:db8:2:1::/64 (A)
+```
+
+A bit more formally, we can define this relationship of an ACE A to ACE B as:
+
+```
+redundant(aceA, aceB) := (contains(protoB, protoA) && contains(srcB, srcA)
+ && contains(dstB, dstA) && is_after(A, B))
+```
+
+Here as "contains" we define an operation operating on the sets defined by
+the protocol, (srcIP, srcPortDefinition) and (dstIP, dstPortDefinition)
+respectively, and returning true if all the elements represented by
+the second argument are represented by the first argument. The "is_after"
+is true if A is located below B in the ruleset.
+
+If a given ACE does not intersect at all with any other ACE
+in front of it, we can mark it as such.
+
+Then during the sequence of the lookups the successful hit on this ACE means
+we do not need to look up other mask combinations - thus potentially
+significantly speeding up the match process. Here is an example,
+assuming we have the following ACL:
+
+```
+permit 2001:db8:1::/48 2001:db8:2::/48 (B)
+deny 2001:db8:3::/48 2001:db8:2:1::/64 (A)
+```
+
+In this case if we match the second entry, we do not need to check whether
+we have matched the first one - the source addresses are completely
+different. We call such an ACE *independent* from another.
+
+We can define this as
+
+```
+independent(aceA, aceB) := (!intersect(protoA, protoB) ||
+ !intersect(srcA, srcB) ||
+ !intersect(dstA, dstB))
+```
+
+where intersect is defined as operation returning true if there are
+elements belonging to the sets of both arguments.
+
+If the entry A is neither redundant nor independent from B, and is below
+B in the ruleset, we call such an entry *shadowed* by B, here is an example:
+
+```
+deny tcp 2001:db8:1::/48 2001:db8:2::/48 (B)
+permit 2001:d8b:1:1::/64 2001:db8:2:1::/64 (A)
+```
+
+This means the earlier rule "carves out" a subset of A, thus leaving
+a "shadow". (Evidently, the action needs to be different for the shadow
+to have an effect, but for for the terminology sake we do not care).
+
+The more formal definition:
+
+```
+shadowed(aceA, aceB) := !redundante(aceA, aceB) &&
+ !independent(aceA, aceB) &&
+ is_after(aceA, aceB)
+```
+
+Using this terminology, any ruleset can be represented as
+a DAG (Directed Acyclic Graph), with the bottom being the implicit
+"deny any", pointing to the set of rules shadowing it or the ones
+it is redundant for.
+
+These rules may in turn be shadowing each other. There is no cycles in
+this graph because of the natural order of the rules - the rule located
+closer to the end of the ruleset can never shadow or make redundant a rule
+higher up.
+
+The optimization that enables can allow for is to skip matching certain
+masks on a per-lookup basis - if a given rule has matched,
+the only adjustments that can happen is the match with one of
+the shadowing rules.
+
+Also, another avenue for the optimization can be starting the lookup process
+with the mask type that maximizes the chances of the independent ACE match,
+thus resulting in an ACE lookup being a single hash table hit.
+
+
+Plumbing
+--------
+
+All the new routines are located in a separate file,
+so we can cleanly experiment with a different approach if this
+does not fit all of the use cases.
+
+The constant-time lookup within the data path has the API with
+the same signature as:
+
+```
+u8
+multi_acl_match_5tuple (u32 sw_if_index, fa_5tuple_t * pkt_5tuple, int is_l2,
+ int is_ip6, int is_input, u32 * acl_match_p,
+ u32 * rule_match_p, u32 * trace_bitmap)
+```
+
+There should be a new upper-level function with the same signature, which
+will make a decision whether to use a linear lookup, or to use the
+constant-time lookup implemented by this work, or to add some other
+optimizations (e.g. by keeping the cache of the last N lookups).
+
+The calls to the routine doing preparatory work should happen
+in `acl_add_list()` after creating the linear-lookup structures,
+and the routine doing the preparatory work populating the hashtable
+should be called from `acl_interface_add_del_inout_acl()` or its callees.
+
+The initial implementation will be geared towards looking up a single
+match at a time, with the subsequent optimizations possible to make
+the lookup for more than one packet.
+
diff --git a/src/plugins/acl/hash_lookup_private.h b/src/plugins/acl/hash_lookup_private.h
new file mode 100644
index 00000000000..7b6449cd6ad
--- /dev/null
+++ b/src/plugins/acl/hash_lookup_private.h
@@ -0,0 +1,27 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#define ACL_HASH_LOOKUP_DEBUG 0
+
+#if ACL_HASH_LOOKUP_DEBUG == 1
+#define DBG(...) clib_warning(__VA_ARGS__)
+#define DBG_UNIX_LOG(...) clib_unix_warning(__VA_ARGS__)
+#else
+#define DBG(...)
+#define DBG_UNIX_LOG(...)
+#endif
+
diff --git a/src/plugins/acl/hash_lookup_types.h b/src/plugins/acl/hash_lookup_types.h
new file mode 100644
index 00000000000..1c04459155c
--- /dev/null
+++ b/src/plugins/acl/hash_lookup_types.h
@@ -0,0 +1,94 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef _ACL_HASH_LOOKUP_TYPES_H_
+#define _ACL_HASH_LOOKUP_TYPES_H_
+
+/* The structure representing the single entry with hash representation */
+typedef struct {
+ /* these two entries refer to the original ACL# and rule# within that ACL */
+ u32 acl_index;
+ u32 ace_index;
+
+ u32 mask_type_index;
+ u8 src_portrange_not_powerof2;
+ u8 dst_portrange_not_powerof2;
+
+ fa_5tuple_t match;
+ u8 action;
+} hash_ace_info_t;
+
+/*
+ * The structure holding the information necessary for the hash-based ACL operation
+ */
+typedef struct {
+ /* The mask types present in this ACL */
+ uword *mask_type_index_bitmap;
+ hash_ace_info_t *rules;
+} hash_acl_info_t;
+
+typedef struct {
+ /* original non-compiled ACL */
+ u32 acl_index;
+ u32 ace_index;
+ /* the index of the hash_ace_info_t */
+ u32 hash_ace_info_index;
+ /*
+ * in case of the same key having multiple entries,
+ * this holds the index of the next entry.
+ */
+ u32 next_applied_entry_index;
+ /*
+ * previous entry in the list of the chained ones,
+ * if ~0 then this is entry in the hash.
+ */
+ u32 prev_applied_entry_index;
+ /*
+ * Action of this applied ACE
+ */
+ u8 action;
+} applied_hash_ace_entry_t;
+
+typedef struct {
+ /*
+ * A logical OR of all the applied_ace_hash_entry_t=>
+ * hash_ace_info_t=>mask_type_index bits set
+ */
+ uword *mask_type_index_bitmap;
+} applied_hash_acl_info_t;
+
+
+typedef union {
+ u64 as_u64;
+ struct {
+ u32 applied_entry_index;
+ u16 reserved_u16;
+ u8 reserved_u8;
+ /* means there is some other entry in front intersecting with this one */
+ u8 shadowed:1;
+ u8 need_portrange_check:1;
+ u8 reserved_flags:6;
+ };
+} hash_acl_lookup_value_t;
+
+#define CT_ASSERT_EQUAL(name, x,y) typedef int assert_ ## name ## _compile_time_assertion_failed[((x) == (y))-1]
+
+CT_ASSERT_EQUAL(hash_acl_lookup_value_t_is_u64, sizeof(hash_acl_lookup_value_t), sizeof(u64));
+
+#undef CT_ASSERT_EQUAL
+
+#endif
diff --git a/test/test_acl_plugin.py b/test/test_acl_plugin.py
index 5267cd27d98..5f830ea2b50 100644
--- a/test/test_acl_plugin.py
+++ b/test/test_acl_plugin.py
@@ -121,6 +121,9 @@ class TestACLplugin(VppTestCase):
super(TestACLplugin, self).tearDown()
if not self.vpp_dead:
self.logger.info(self.vapi.ppcli("show l2fib verbose"))
+ self.logger.info(self.vapi.ppcli("show acl-plugin acl"))
+ self.logger.info(self.vapi.ppcli("show acl-plugin interface"))
+ self.logger.info(self.vapi.ppcli("show acl-plugin tables"))
self.logger.info(self.vapi.ppcli("show bridge-domain %s detail"
% self.bd_id))
diff --git a/test/test_acl_plugin_conns.py b/test/test_acl_plugin_conns.py
index be016d91c3c..705ffbc6191 100644
--- a/test/test_acl_plugin_conns.py
+++ b/test/test_acl_plugin_conns.py
@@ -185,6 +185,18 @@ class ACLPluginConnTestCase(VppTestCase):
i.resolve_arp()
i.resolve_ndp()
+ def tearDown(self):
+ """Run standard test teardown and log various show commands
+ """
+ super(ACLPluginConnTestCase, self).tearDown()
+ if not self.vpp_dead:
+ self.logger.info(self.vapi.cli("show ip arp"))
+ self.logger.info(self.vapi.cli("show ip6 neighbors"))
+ self.logger.info(self.vapi.cli("show acl-plugin sessions"))
+ self.logger.info(self.vapi.cli("show acl-plugin acl"))
+ self.logger.info(self.vapi.cli("show acl-plugin interface"))
+ self.logger.info(self.vapi.cli("show acl-plugin tables"))
+
def api_acl_add_replace(self, acl_index, r, count=-1, tag="",
expected_retval=0):
"""Add/replace an ACL
diff --git a/test/test_acl_plugin_l2l3.py b/test/test_acl_plugin_l2l3.py
index c7f1068aa06..f383a482019 100644
--- a/test/test_acl_plugin_l2l3.py
+++ b/test/test_acl_plugin_l2l3.py
@@ -115,6 +115,9 @@ class TestIpIrb(VppTestCase):
self.logger.info(self.vapi.cli("show ip arp"))
self.logger.info(self.vapi.cli("show ip6 neighbors"))
self.logger.info(self.vapi.cli("show acl-plugin sessions"))
+ self.logger.info(self.vapi.cli("show acl-plugin acl"))
+ self.logger.info(self.vapi.cli("show acl-plugin interface"))
+ self.logger.info(self.vapi.cli("show acl-plugin tables"))
def api_acl_add_replace(self, acl_index, r, count, tag="",
expected_retval=0):