summaryrefslogtreecommitdiffstats
path: root/src/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins')
-rw-r--r--src/plugins/acl.am1
-rw-r--r--src/plugins/acl/acl.c883
-rw-r--r--src/plugins/acl/acl.h24
-rw-r--r--src/plugins/acl/acl_lookup_context.md113
-rw-r--r--src/plugins/acl/elog_acl_trace.h137
-rw-r--r--src/plugins/acl/exports.h31
-rw-r--r--src/plugins/acl/fa_node.c480
-rw-r--r--src/plugins/acl/fa_node.h11
-rw-r--r--src/plugins/acl/hash_lookup.c451
-rw-r--r--src/plugins/acl/hash_lookup.h29
-rw-r--r--src/plugins/acl/hash_lookup_types.h11
-rw-r--r--src/plugins/acl/lookup_context.c304
-rw-r--r--src/plugins/acl/lookup_context.h60
-rw-r--r--src/plugins/acl/public_inlines.h731
14 files changed, 2118 insertions, 1148 deletions
diff --git a/src/plugins/acl.am b/src/plugins/acl.am
index 0a414481dbf..b67191ec938 100644
--- a/src/plugins/acl.am
+++ b/src/plugins/acl.am
@@ -17,6 +17,7 @@ vppplugins_LTLIBRARIES += acl_plugin.la
acl_plugin_la_SOURCES = \
acl/acl.c \
acl/hash_lookup.c \
+ acl/lookup_context.c \
acl/fa_node.c \
acl/l2sess.h \
acl/manual_fns.h \
diff --git a/src/plugins/acl/acl.c b/src/plugins/acl/acl.c
index 8543ac6eef2..314380bcf8f 100644
--- a/src/plugins/acl/acl.c
+++ b/src/plugins/acl/acl.c
@@ -51,7 +51,7 @@
#undef vl_api_version
#include "fa_node.h"
-#include "hash_lookup.h"
+#include "public_inlines.h"
acl_main_t acl_main;
@@ -105,6 +105,16 @@ format_vec16 (u8 * s, va_list * va)
+u8
+acl_plugin_acl_exists (u32 acl_index)
+{
+ acl_main_t *am = &acl_main;
+
+ if (pool_is_free_index (am->acls, acl_index))
+ return 0;
+
+ return 1;
+}
static void *
acl_set_heap (acl_main_t * am)
@@ -191,6 +201,77 @@ vl_api_acl_plugin_control_ping_t_handler (vl_api_acl_plugin_control_ping_t *
/* *INDENT-ON* */
}
+static void
+print_clib_warning_and_reset (vlib_main_t * vm, u8 * out0)
+{
+ clib_warning ("%v", out0);
+ vec_reset_length (out0);
+}
+
+static void
+print_cli_and_reset (vlib_main_t * vm, u8 * out0)
+{
+ vlib_cli_output (vm, "%v", out0);
+ vec_reset_length (out0);
+}
+
+typedef void (*acl_vector_print_func_t) (vlib_main_t * vm, u8 * out0);
+
+static void
+acl_print_acl_x (acl_vector_print_func_t vpr, vlib_main_t * vm,
+ acl_main_t * am, int acl_index)
+{
+ acl_rule_t *r;
+ u8 *out0 = format (0, "acl-index %u count %u tag {%s}\n", acl_index,
+ am->acls[acl_index].count, am->acls[acl_index].tag);
+ int j;
+ vpr (vm, out0);
+ for (j = 0; j < am->acls[acl_index].count; j++)
+ {
+ r = &am->acls[acl_index].rules[j];
+ out0 = format (out0, " %4d: %s ", j, r->is_ipv6 ? "ipv6" : "ipv4");
+ out0 = format_acl_action (out0, r->is_permit);
+ out0 = format (out0, " src %U/%d", format_ip46_address, &r->src,
+ r->is_ipv6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4,
+ r->src_prefixlen);
+ out0 =
+ format (out0, " dst %U/%d", format_ip46_address, &r->dst,
+ r->is_ipv6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4, r->dst_prefixlen);
+ out0 = format (out0, " proto %d", r->proto);
+ out0 = format (out0, " sport %d", r->src_port_or_type_first);
+ if (r->src_port_or_type_first != r->src_port_or_type_last)
+ {
+ out0 = format (out0, "-%d", r->src_port_or_type_last);
+ }
+ out0 = format (out0, " dport %d", r->dst_port_or_code_first);
+ if (r->dst_port_or_code_first != r->dst_port_or_code_last)
+ {
+ out0 = format (out0, "-%d", r->dst_port_or_code_last);
+ }
+ if (r->tcp_flags_mask || r->tcp_flags_value)
+ {
+ out0 =
+ format (out0, " tcpflags %d mask %d", r->tcp_flags_value,
+ r->tcp_flags_mask);
+ }
+ out0 = format (out0, "\n");
+ vpr (vm, out0);
+ }
+}
+
+static void
+acl_print_acl (vlib_main_t * vm, acl_main_t * am, int acl_index)
+{
+ acl_print_acl_x (print_cli_and_reset, vm, am, acl_index);
+}
+
+static void
+warning_acl_print_acl (vlib_main_t * vm, acl_main_t * am, int acl_index)
+{
+ acl_print_acl_x (print_clib_warning_and_reset, vm, am, acl_index);
+}
+
+
static int
acl_add_list (u32 count, vl_api_acl_rule_t rules[],
u32 * acl_list_index, u8 * tag)
@@ -201,6 +282,10 @@ acl_add_list (u32 count, vl_api_acl_rule_t rules[],
acl_rule_t *acl_new_rules = 0;
int i;
+ if (am->trace_acl > 255)
+ clib_warning ("API dbg: acl_add_list index %d tag %s", *acl_list_index,
+ tag);
+
if (*acl_list_index != ~0)
{
/* They supplied some number, let's see if this ACL exists */
@@ -264,7 +349,6 @@ acl_add_list (u32 count, vl_api_acl_rule_t rules[],
else
{
a = am->acls + *acl_list_index;
- hash_acl_delete (am, *acl_list_index);
/* Get rid of the old rules */
if (a->rules)
vec_free (a->rules);
@@ -272,82 +356,54 @@ acl_add_list (u32 count, vl_api_acl_rule_t rules[],
a->rules = acl_new_rules;
a->count = count;
memcpy (a->tag, tag, sizeof (a->tag));
- hash_acl_add (am, *acl_list_index);
+ if (am->trace_acl > 255)
+ warning_acl_print_acl (am->vlib_main, am, *acl_list_index);
+ /* notify the lookup contexts about the ACL changes */
+ acl_plugin_lookup_context_notify_acl_change (*acl_list_index);
clib_mem_set_heap (oldheap);
return 0;
}
static int
+acl_is_used_by (u32 acl_index, u32 ** foo_index_vec_by_acl)
+{
+ if (acl_index < vec_len (foo_index_vec_by_acl))
+ {
+ if (vec_len (vec_elt (foo_index_vec_by_acl, acl_index)) > 0)
+ {
+ /* ACL is applied somewhere. */
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int
acl_del_list (u32 acl_list_index)
{
acl_main_t *am = &acl_main;
acl_list_t *a;
- int i, ii;
if (pool_is_free_index (am->acls, acl_list_index))
{
return VNET_API_ERROR_NO_SUCH_ENTRY;
}
-
- if (acl_list_index < vec_len (am->input_sw_if_index_vec_by_acl))
- {
- if (vec_len (vec_elt (am->input_sw_if_index_vec_by_acl, acl_list_index))
- > 0)
- {
- /* ACL is applied somewhere inbound. Refuse to delete */
- return VNET_API_ERROR_ACL_IN_USE_INBOUND;
- }
- }
- if (acl_list_index < vec_len (am->output_sw_if_index_vec_by_acl))
- {
- if (vec_len
- (vec_elt (am->output_sw_if_index_vec_by_acl, acl_list_index)) > 0)
- {
- /* ACL is applied somewhere outbound. Refuse to delete */
- return VNET_API_ERROR_ACL_IN_USE_OUTBOUND;
- }
- }
+ if (acl_is_used_by (acl_list_index, am->input_sw_if_index_vec_by_acl))
+ return VNET_API_ERROR_ACL_IN_USE_INBOUND;
+ if (acl_is_used_by (acl_list_index, am->output_sw_if_index_vec_by_acl))
+ return VNET_API_ERROR_ACL_IN_USE_OUTBOUND;
+ /* lookup contexts cover other cases, not just inbound/oubound, so check that */
+ if (acl_is_used_by (acl_list_index, am->lc_index_vec_by_acl))
+ return VNET_API_ERROR_ACL_IN_USE_BY_LOOKUP_CONTEXT;
void *oldheap = acl_set_heap (am);
- /* delete any references to the ACL */
- for (i = 0; i < vec_len (am->output_acl_vec_by_sw_if_index); i++)
- {
- for (ii = 0; ii < vec_len (am->output_acl_vec_by_sw_if_index[i]);
- /* see body */ )
- {
- if (acl_list_index == am->output_acl_vec_by_sw_if_index[i][ii])
- {
- vec_del1 (am->output_acl_vec_by_sw_if_index[i], ii);
- }
- else
- {
- ii++;
- }
- }
- }
- for (i = 0; i < vec_len (am->input_acl_vec_by_sw_if_index); i++)
- {
- for (ii = 0; ii < vec_len (am->input_acl_vec_by_sw_if_index[i]);
- /* see body */ )
- {
- if (acl_list_index == am->input_acl_vec_by_sw_if_index[i][ii])
- {
- vec_del1 (am->input_acl_vec_by_sw_if_index[i], ii);
- }
- else
- {
- ii++;
- }
- }
- }
- /* delete the hash table data */
- hash_acl_delete (am, acl_list_index);
/* now we can delete the ACL itself */
a = pool_elt_at_index (am->acls, acl_list_index);
if (a->rules)
vec_free (a->rules);
-
pool_put (am->acls, a);
+ /* acl_list_index is now free, notify the lookup contexts */
+ acl_plugin_lookup_context_notify_acl_change (acl_list_index);
clib_mem_set_heap (oldheap);
return 0;
}
@@ -954,10 +1010,11 @@ acl_hook_l2_output_classify (acl_main_t * am, u32 sw_if_index)
rv =
vnet_l2_output_classify_set_tables (sw_if_index, ip4_table_index,
ip6_table_index, dot1q_table_index);
-
+/*
clib_warning
("ACL enabling on interface sw_if_index %d, setting tables to the following: ip4: %d ip6: %d\n",
sw_if_index, ip4_table_index, ip6_table_index);
+*/
if (rv)
{
acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask,
@@ -1000,17 +1057,32 @@ done:
return rv;
}
-int
+static void
+acl_clear_sessions (acl_main_t * am, u32 sw_if_index)
+{
+ void *oldheap = clib_mem_set_heap (am->vlib_main->heap_base);
+ vlib_process_signal_event (am->vlib_main, am->fa_cleaner_node_index,
+ ACL_FA_CLEANER_DELETE_BY_SW_IF_INDEX,
+ sw_if_index);
+ clib_mem_set_heap (oldheap);
+}
+
+
+static int
acl_interface_in_enable_disable (acl_main_t * am, u32 sw_if_index,
int enable_disable)
{
- int rv;
+ int rv = 0;
/* Utterly wrong? */
if (pool_is_free_index (am->vnet_main->interface_main.sw_interfaces,
sw_if_index))
return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ if (clib_bitmap_get (am->in_acl_on_sw_if_index, sw_if_index) ==
+ enable_disable)
+ return 0;
+
acl_fa_enable_disable (sw_if_index, 1, enable_disable);
if (enable_disable)
@@ -1022,10 +1094,13 @@ acl_interface_in_enable_disable (acl_main_t * am, u32 sw_if_index,
rv = acl_unhook_l2_input_classify (am, sw_if_index);
}
+ am->in_acl_on_sw_if_index =
+ clib_bitmap_set (am->in_acl_on_sw_if_index, sw_if_index, enable_disable);
+
return rv;
}
-int
+static int
acl_interface_out_enable_disable (acl_main_t * am, u32 sw_if_index,
int enable_disable)
{
@@ -1036,6 +1111,10 @@ acl_interface_out_enable_disable (acl_main_t * am, u32 sw_if_index,
sw_if_index))
return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ if (clib_bitmap_get (am->out_acl_on_sw_if_index, sw_if_index) ==
+ enable_disable)
+ return 0;
+
acl_fa_enable_disable (sw_if_index, 0, enable_disable);
if (enable_disable)
@@ -1047,231 +1126,168 @@ acl_interface_out_enable_disable (acl_main_t * am, u32 sw_if_index,
rv = acl_unhook_l2_output_classify (am, sw_if_index);
}
+ am->out_acl_on_sw_if_index =
+ clib_bitmap_set (am->out_acl_on_sw_if_index, sw_if_index, enable_disable);
+
return rv;
}
static int
+acl_interface_inout_enable_disable (acl_main_t * am, u32 sw_if_index,
+ int is_input, int enable_disable)
+{
+ if (is_input)
+ return acl_interface_in_enable_disable (am, sw_if_index, enable_disable);
+ else
+ return acl_interface_out_enable_disable (am, sw_if_index, enable_disable);
+}
+
+static int
acl_is_not_defined (acl_main_t * am, u32 acl_list_index)
{
return (pool_is_free_index (am->acls, acl_list_index));
}
-
static int
-acl_interface_add_inout_acl (u32 sw_if_index, u8 is_input, u32 acl_list_index)
-{
- acl_main_t *am = &acl_main;
- if (acl_is_not_defined (am, acl_list_index))
- {
- /* ACL is not defined. Can not apply */
- return VNET_API_ERROR_NO_SUCH_ENTRY;
- }
- void *oldheap = acl_set_heap (am);
+acl_interface_set_inout_acl_list (acl_main_t * am, u32 sw_if_index,
+ u8 is_input, u32 * vec_acl_list_index,
+ int *may_clear_sessions)
+{
+ u32 *pacln;
+ uword *seen_acl_bitmap = 0;
+ uword *old_seen_acl_bitmap = 0;
+ uword *change_acl_bitmap = 0;
+ int acln;
+ int rv = 0;
- if (is_input)
- {
- vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index);
- u32 index = vec_search (am->input_acl_vec_by_sw_if_index[sw_if_index],
- acl_list_index);
- if (index < vec_len (am->input_acl_vec_by_sw_if_index[sw_if_index]))
- {
- clib_warning
- ("ACL %d is already applied inbound on sw_if_index %d (index %d)",
- acl_list_index, sw_if_index, index);
- /* the entry is already there */
- clib_mem_set_heap (oldheap);
- return VNET_API_ERROR_ACL_IN_USE_INBOUND;
- }
- /* if there was no ACL applied before, enable the ACL processing */
- if (vec_len (am->input_acl_vec_by_sw_if_index[sw_if_index]) == 0)
- {
- acl_interface_in_enable_disable (am, sw_if_index, 1);
- }
- vec_add (am->input_acl_vec_by_sw_if_index[sw_if_index], &acl_list_index,
- 1);
- vec_validate (am->input_sw_if_index_vec_by_acl, acl_list_index);
- vec_add (am->input_sw_if_index_vec_by_acl[acl_list_index], &sw_if_index,
- 1);
- }
- else
- {
- vec_validate (am->output_acl_vec_by_sw_if_index, sw_if_index);
+ if (am->trace_acl > 255)
+ clib_warning
+ ("API dbg: acl_interface_set_inout_acl_list: sw_if_index %d is_input %d acl_vec: [%U]",
+ sw_if_index, is_input, format_vec32, vec_acl_list_index, "%d");
- u32 index = vec_search (am->output_acl_vec_by_sw_if_index[sw_if_index],
- acl_list_index);
- if (index < vec_len (am->output_acl_vec_by_sw_if_index[sw_if_index]))
- {
- clib_warning
- ("ACL %d is already applied outbound on sw_if_index %d (index %d)",
- acl_list_index, sw_if_index, index);
- /* the entry is already there */
- clib_mem_set_heap (oldheap);
- return VNET_API_ERROR_ACL_IN_USE_OUTBOUND;
- }
- /* if there was no ACL applied before, enable the ACL processing */
- if (vec_len (am->output_acl_vec_by_sw_if_index[sw_if_index]) == 0)
- {
- acl_interface_out_enable_disable (am, sw_if_index, 1);
- }
- vec_add (am->output_acl_vec_by_sw_if_index[sw_if_index],
- &acl_list_index, 1);
- vec_validate (am->output_sw_if_index_vec_by_acl, acl_list_index);
- vec_add (am->output_sw_if_index_vec_by_acl[acl_list_index],
- &sw_if_index, 1);
+ vec_foreach (pacln, vec_acl_list_index)
+ {
+ if (acl_is_not_defined (am, *pacln))
+ {
+ /* ACL is not defined. Can not apply */
+ clib_warning ("ERROR: ACL %d not defined", *pacln);
+ rv = VNET_API_ERROR_NO_SUCH_ENTRY;
+ goto done;
+ }
+ if (clib_bitmap_get (seen_acl_bitmap, *pacln))
+ {
+ /* ACL being applied twice within the list. error. */
+ clib_warning ("ERROR: ACL %d being applied twice", *pacln);
+ rv = VNET_API_ERROR_ENTRY_ALREADY_EXISTS;
+ goto done;
+ }
+ seen_acl_bitmap = clib_bitmap_set (seen_acl_bitmap, *pacln, 1);
+ }
+
+
+ u32 **pinout_lc_index_by_sw_if_index =
+ is_input ? &am->
+ input_lc_index_by_sw_if_index : &am->output_lc_index_by_sw_if_index;
+
+ u32 ***pinout_acl_vec_by_sw_if_index =
+ is_input ? &am->
+ input_acl_vec_by_sw_if_index : &am->output_acl_vec_by_sw_if_index;
+
+ u32 ***pinout_sw_if_index_vec_by_acl =
+ is_input ? &am->
+ input_sw_if_index_vec_by_acl : &am->output_sw_if_index_vec_by_acl;
+
+ vec_validate ((*pinout_acl_vec_by_sw_if_index), sw_if_index);
+
+ clib_bitmap_validate (old_seen_acl_bitmap, 1);
+
+ vec_foreach (pacln, (*pinout_acl_vec_by_sw_if_index)[sw_if_index])
+ {
+ old_seen_acl_bitmap = clib_bitmap_set (old_seen_acl_bitmap, *pacln, 1);
+ }
+ change_acl_bitmap =
+ clib_bitmap_dup_xor (old_seen_acl_bitmap, seen_acl_bitmap);
+
+ if (am->trace_acl > 255)
+ clib_warning ("bitmaps: old seen %U new seen %U changed %U",
+ format_bitmap_hex, old_seen_acl_bitmap, format_bitmap_hex,
+ seen_acl_bitmap, format_bitmap_hex, change_acl_bitmap);
+
+/* *INDENT-OFF* */
+ clib_bitmap_foreach(acln, change_acl_bitmap, ({
+ if (clib_bitmap_get(old_seen_acl_bitmap, acln)) {
+ /* ACL is being removed. */
+ if (acln < vec_len((*pinout_sw_if_index_vec_by_acl))) {
+ int index = vec_search((*pinout_sw_if_index_vec_by_acl)[acln], sw_if_index);
+ vec_del1((*pinout_sw_if_index_vec_by_acl)[acln], index);
+ }
+ } else {
+ /* ACL is being added. */
+ vec_validate((*pinout_sw_if_index_vec_by_acl), acln);
+ vec_add1((*pinout_sw_if_index_vec_by_acl)[acln], sw_if_index);
}
- clib_mem_set_heap (oldheap);
- return 0;
-}
+ }));
+/* *INDENT-ON* */
+ vec_free ((*pinout_acl_vec_by_sw_if_index)[sw_if_index]);
+ (*pinout_acl_vec_by_sw_if_index)[sw_if_index] =
+ vec_dup (vec_acl_list_index);
-static int
-acl_interface_del_inout_acl (u32 sw_if_index, u8 is_input, u32 acl_list_index)
-{
- acl_main_t *am = &acl_main;
- int i;
- int rv = VNET_API_ERROR_NO_SUCH_ENTRY;
- void *oldheap = acl_set_heap (am);
- if (is_input)
+ /* if no commonalities between the ACL# - then we should definitely clear the sessions */
+ if (may_clear_sessions && *may_clear_sessions
+ && !clib_bitmap_is_zero (change_acl_bitmap))
{
- vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index);
- for (i = 0; i < vec_len (am->input_acl_vec_by_sw_if_index[sw_if_index]);
- i++)
- {
- if (acl_list_index ==
- am->input_acl_vec_by_sw_if_index[sw_if_index][i])
- {
- vec_del1 (am->input_acl_vec_by_sw_if_index[sw_if_index], i);
- rv = 0;
- break;
- }
- }
-
- if (acl_list_index < vec_len (am->input_sw_if_index_vec_by_acl))
- {
- u32 index =
- vec_search (am->input_sw_if_index_vec_by_acl[acl_list_index],
- sw_if_index);
- if (index <
- vec_len (am->input_sw_if_index_vec_by_acl[acl_list_index]))
- {
- hash_acl_unapply (am, sw_if_index, is_input, acl_list_index);
- vec_del1 (am->input_sw_if_index_vec_by_acl[acl_list_index],
- index);
- }
- }
+ acl_clear_sessions (am, sw_if_index);
+ *may_clear_sessions = 0;
+ }
- /* If there is no more ACLs applied on an interface, disable ACL processing */
- if (0 == vec_len (am->input_acl_vec_by_sw_if_index[sw_if_index]))
+ /*
+ * prepare or delete the lookup context if necessary, and if context exists, set ACL list
+ */
+ vec_validate_init_empty ((*pinout_lc_index_by_sw_if_index), sw_if_index,
+ ~0);
+ if (vec_len (vec_acl_list_index) > 0)
+ {
+ u32 lc_index = (*pinout_lc_index_by_sw_if_index)[sw_if_index];
+ if (~0 == lc_index)
{
- acl_interface_in_enable_disable (am, sw_if_index, 0);
+ lc_index =
+ acl_plugin_get_lookup_context_index (am->interface_acl_user_id,
+ sw_if_index, is_input);
+ ASSERT (lc_index >= 0);
+ (*pinout_lc_index_by_sw_if_index)[sw_if_index] = lc_index;
}
+ acl_plugin_set_acl_vec_for_context (lc_index, vec_acl_list_index);
}
else
{
- vec_validate (am->output_acl_vec_by_sw_if_index, sw_if_index);
- for (i = 0;
- i < vec_len (am->output_acl_vec_by_sw_if_index[sw_if_index]); i++)
+ if (~0 != (*pinout_lc_index_by_sw_if_index)[sw_if_index])
{
- if (acl_list_index ==
- am->output_acl_vec_by_sw_if_index[sw_if_index][i])
- {
- vec_del1 (am->output_acl_vec_by_sw_if_index[sw_if_index], i);
- rv = 0;
- break;
- }
+ acl_plugin_put_lookup_context_index ((*pinout_lc_index_by_sw_if_index)[sw_if_index]);
+ (*pinout_lc_index_by_sw_if_index)[sw_if_index] = ~0;
}
+ }
- if (acl_list_index < vec_len (am->output_sw_if_index_vec_by_acl))
- {
- u32 index =
- vec_search (am->output_sw_if_index_vec_by_acl[acl_list_index],
- sw_if_index);
- if (index <
- vec_len (am->output_sw_if_index_vec_by_acl[acl_list_index]))
- {
- hash_acl_unapply (am, sw_if_index, is_input, acl_list_index);
- vec_del1 (am->output_sw_if_index_vec_by_acl[acl_list_index],
- index);
- }
- }
+ /* ensure ACL processing is enabled/disabled as needed */
+ acl_interface_inout_enable_disable (am, sw_if_index, is_input,
+ vec_len (vec_acl_list_index) > 0);
- /* If there is no more ACLs applied on an interface, disable ACL processing */
- if (0 == vec_len (am->output_acl_vec_by_sw_if_index[sw_if_index]))
- {
- acl_interface_out_enable_disable (am, sw_if_index, 0);
- }
- }
- clib_mem_set_heap (oldheap);
+done:
+ clib_bitmap_free (change_acl_bitmap);
+ clib_bitmap_free (seen_acl_bitmap);
+ clib_bitmap_free (old_seen_acl_bitmap);
return rv;
}
static void
-acl_interface_reset_inout_acls (u32 sw_if_index, u8 is_input)
+acl_interface_reset_inout_acls (u32 sw_if_index, u8 is_input,
+ int *may_clear_sessions)
{
acl_main_t *am = &acl_main;
- int i;
void *oldheap = acl_set_heap (am);
- if (is_input)
- {
- vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index);
- if (vec_len (am->input_acl_vec_by_sw_if_index[sw_if_index]) > 0)
- {
- acl_interface_in_enable_disable (am, sw_if_index, 0);
- }
-
- for (i = vec_len (am->input_acl_vec_by_sw_if_index[sw_if_index]) - 1;
- i >= 0; i--)
- {
- u32 acl_list_index =
- am->input_acl_vec_by_sw_if_index[sw_if_index][i];
- hash_acl_unapply (am, sw_if_index, is_input, acl_list_index);
- if (acl_list_index < vec_len (am->input_sw_if_index_vec_by_acl))
- {
- u32 index =
- vec_search (am->input_sw_if_index_vec_by_acl[acl_list_index],
- sw_if_index);
- if (index <
- vec_len (am->input_sw_if_index_vec_by_acl[acl_list_index]))
- {
- vec_del1 (am->input_sw_if_index_vec_by_acl[acl_list_index],
- index);
- }
- }
- }
-
- vec_reset_length (am->input_acl_vec_by_sw_if_index[sw_if_index]);
- }
- else
- {
- vec_validate (am->output_acl_vec_by_sw_if_index, sw_if_index);
- if (vec_len (am->output_acl_vec_by_sw_if_index[sw_if_index]) > 0)
- {
- acl_interface_out_enable_disable (am, sw_if_index, 0);
- }
-
- for (i = vec_len (am->output_acl_vec_by_sw_if_index[sw_if_index]) - 1;
- i >= 0; i--)
- {
- u32 acl_list_index =
- am->output_acl_vec_by_sw_if_index[sw_if_index][i];
- hash_acl_unapply (am, sw_if_index, is_input, acl_list_index);
- if (acl_list_index < vec_len (am->output_sw_if_index_vec_by_acl))
- {
- u32 index =
- vec_search (am->output_sw_if_index_vec_by_acl[acl_list_index],
- sw_if_index);
- if (index <
- vec_len (am->output_sw_if_index_vec_by_acl[acl_list_index]))
- {
- vec_del1 (am->output_sw_if_index_vec_by_acl[acl_list_index],
- index);
- }
- }
- }
-
- vec_reset_length (am->output_acl_vec_by_sw_if_index[sw_if_index]);
- }
+ acl_interface_set_inout_acl_list (am, sw_if_index, is_input, 0,
+ may_clear_sessions);
clib_mem_set_heap (oldheap);
}
@@ -1279,23 +1295,61 @@ static int
acl_interface_add_del_inout_acl (u32 sw_if_index, u8 is_add, u8 is_input,
u32 acl_list_index)
{
- int rv = VNET_API_ERROR_NO_SUCH_ENTRY;
+
acl_main_t *am = &acl_main;
+ u32 *acl_vec = 0;
+ int may_clear_sessions = 1;
+
+ int error_already_applied = is_input ? VNET_API_ERROR_ACL_IN_USE_INBOUND
+ : VNET_API_ERROR_ACL_IN_USE_OUTBOUND;
+
+ u32 ***pinout_acl_vec_by_sw_if_index =
+ is_input ? &am->
+ input_acl_vec_by_sw_if_index : &am->output_acl_vec_by_sw_if_index;
+ int rv = 0;
+ void *oldheap = acl_set_heap (am);
+
if (is_add)
{
- rv =
- acl_interface_add_inout_acl (sw_if_index, is_input, acl_list_index);
- if (rv == 0)
+ vec_validate ((*pinout_acl_vec_by_sw_if_index), sw_if_index);
+ u32 index = vec_search ((*pinout_acl_vec_by_sw_if_index)[sw_if_index],
+ acl_list_index);
+
+ if (~0 != index)
{
- hash_acl_apply (am, sw_if_index, is_input, acl_list_index);
+ rv = error_already_applied;
+ goto done;
}
+
+ acl_vec = vec_dup ((*pinout_acl_vec_by_sw_if_index)[sw_if_index]);
+ vec_add1 (acl_vec, acl_list_index);
}
else
{
- hash_acl_unapply (am, sw_if_index, is_input, acl_list_index);
- rv =
- acl_interface_del_inout_acl (sw_if_index, is_input, acl_list_index);
+ if (sw_if_index > vec_len (*pinout_acl_vec_by_sw_if_index))
+ {
+ rv = VNET_API_ERROR_NO_SUCH_ENTRY;
+ goto done;
+ }
+
+ u32 index = vec_search ((*pinout_acl_vec_by_sw_if_index)[sw_if_index],
+ acl_list_index);
+
+ if (~0 == index)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_ENTRY;
+ goto done;
+ }
+
+ acl_vec = vec_dup ((*pinout_acl_vec_by_sw_if_index)[sw_if_index]);
+ vec_del1 (acl_vec, index);
}
+
+ rv = acl_interface_set_inout_acl_list (am, sw_if_index, is_input, acl_vec,
+ &may_clear_sessions);
+done:
+ vec_free (acl_vec);
+ clib_mem_set_heap (oldheap);
return rv;
}
@@ -2331,9 +2385,7 @@ static void
rv = VNET_API_ERROR_INVALID_SW_IF_INDEX;
else
{
- acl_interface_reset_inout_acls (sw_if_index, 0);
- acl_interface_reset_inout_acls (sw_if_index, 1);
-
+ int may_clear_sessions = 1;
for (i = 0; i < mp->count; i++)
{
if (acl_is_not_defined (am, ntohl (mp->acls[i])))
@@ -2344,12 +2396,26 @@ static void
}
if (0 == rv)
{
+ void *oldheap = acl_set_heap (am);
+
+ u32 *in_acl_vec = 0;
+ u32 *out_acl_vec = 0;
for (i = 0; i < mp->count; i++)
- {
- acl_interface_add_del_inout_acl (sw_if_index, 1,
- (i < mp->n_input),
- ntohl (mp->acls[i]));
- }
+ if (i < mp->n_input)
+ vec_add1 (in_acl_vec, clib_net_to_host_u32 (mp->acls[i]));
+ else
+ vec_add1 (out_acl_vec, clib_net_to_host_u32 (mp->acls[i]));
+
+ rv =
+ acl_interface_set_inout_acl_list (am, sw_if_index, 0, out_acl_vec,
+ &may_clear_sessions);
+ rv = rv
+ || acl_interface_set_inout_acl_list (am, sw_if_index, 1,
+ in_acl_vec,
+ &may_clear_sessions);
+ vec_free (in_acl_vec);
+ vec_free (out_acl_vec);
+ clib_mem_set_heap (oldheap);
}
}
@@ -2469,6 +2535,8 @@ send_acl_interface_list_details (acl_main_t * am,
vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index);
vec_validate (am->output_acl_vec_by_sw_if_index, sw_if_index);
+ clib_mem_set_heap (oldheap);
+
n_input = vec_len (am->input_acl_vec_by_sw_if_index[sw_if_index]);
n_output = vec_len (am->output_acl_vec_by_sw_if_index[sw_if_index]);
count = n_input + n_output;
@@ -2495,7 +2563,6 @@ send_acl_interface_list_details (acl_main_t * am,
mp->acls[n_input + i] =
htonl (am->output_acl_vec_by_sw_if_index[sw_if_index][i]);
}
- clib_mem_set_heap (oldheap);
vl_api_send_msg (reg, (u8 *) mp);
}
@@ -3046,13 +3113,14 @@ acl_sw_interface_add_del (vnet_main_t * vnm, u32 sw_if_index, u32 is_add)
}
if (0 == is_add)
{
+ int may_clear_sessions = 1;
vlib_process_signal_event (am->vlib_main, am->fa_cleaner_node_index,
ACL_FA_CLEANER_DELETE_BY_SW_IF_INDEX,
sw_if_index);
/* also unapply any ACLs in case the users did not do so. */
macip_acl_interface_del_acl (am, sw_if_index);
- acl_interface_reset_inout_acls (sw_if_index, 0);
- acl_interface_reset_inout_acls (sw_if_index, 1);
+ acl_interface_reset_inout_acls (sw_if_index, 0, &may_clear_sessions);
+ acl_interface_reset_inout_acls (sw_if_index, 1, &may_clear_sessions);
}
return 0;
}
@@ -3090,6 +3158,21 @@ acl_set_aclplugin_fn (vlib_main_t * vm,
am->l4_match_nonfirst_fragment = (val != 0);
goto done;
}
+ if (unformat (input, "event-trace"))
+ {
+ if (!unformat (input, "%u", &val))
+ {
+ error = clib_error_return (0,
+ "expecting trace level, got `%U`",
+ format_unformat_error, input);
+ goto done;
+ }
+ else
+ {
+ am->trace_acl = val;
+ goto done;
+ }
+ }
if (unformat (input, "heap"))
{
if (unformat (input, "main"))
@@ -3103,9 +3186,9 @@ acl_set_aclplugin_fn (vlib_main_t * vm,
else if (unformat (input, "hash"))
{
if (unformat (input, "validate %u", &val))
- acl_plugin_hash_acl_set_validate_heap (am, val);
+ acl_plugin_hash_acl_set_validate_heap (val);
else if (unformat (input, "trace %u", &val))
- acl_plugin_hash_acl_set_trace_heap (am, val);
+ acl_plugin_hash_acl_set_trace_heap (val);
goto done;
}
goto done;
@@ -3345,50 +3428,6 @@ acl_show_aclplugin_macip_interface_fn (vlib_main_t * vm,
return error;
}
-#define PRINT_AND_RESET(vm, out0) do { vlib_cli_output(vm, "%v", out0); vec_reset_length(out0); } while(0)
-static void
-acl_print_acl (vlib_main_t * vm, acl_main_t * am, int acl_index)
-{
- acl_rule_t *r;
- u8 *out0 = format (0, "acl-index %u count %u tag {%s}\n", acl_index,
- am->acls[acl_index].count, am->acls[acl_index].tag);
- int j;
- PRINT_AND_RESET (vm, out0);
- for (j = 0; j < am->acls[acl_index].count; j++)
- {
- r = &am->acls[acl_index].rules[j];
- out0 = format (out0, " %4d: %s ", j, r->is_ipv6 ? "ipv6" : "ipv4");
- out0 = format_acl_action (out0, r->is_permit);
- out0 = format (out0, " src %U/%d", format_ip46_address, &r->src,
- r->is_ipv6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4,
- r->src_prefixlen);
- out0 =
- format (out0, " dst %U/%d", format_ip46_address, &r->dst,
- r->is_ipv6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4, r->dst_prefixlen);
- out0 = format (out0, " proto %d", r->proto);
- out0 = format (out0, " sport %d", r->src_port_or_type_first);
- if (r->src_port_or_type_first != r->src_port_or_type_last)
- {
- out0 = format (out0, "-%d", r->src_port_or_type_last);
- }
- out0 = format (out0, " dport %d", r->dst_port_or_code_first);
- if (r->dst_port_or_code_first != r->dst_port_or_code_last)
- {
- out0 = format (out0, "-%d", r->dst_port_or_code_last);
- }
- if (r->tcp_flags_mask || r->tcp_flags_value)
- {
- out0 =
- format (out0, " tcpflags %d mask %d", r->tcp_flags_value,
- r->tcp_flags_mask);
- }
- out0 = format (out0, "\n");
- PRINT_AND_RESET (vm, out0);
- }
-}
-
-#undef PRINT_AND_RESET
-
static void
acl_plugin_show_acl (acl_main_t * am, u32 acl_index)
{
@@ -3420,6 +3459,11 @@ acl_plugin_show_acl (acl_main_t * am, u32 acl_index)
format_vec32, am->output_sw_if_index_vec_by_acl[i],
"%d");
}
+ if (i < vec_len (am->lc_index_vec_by_acl))
+ {
+ vlib_cli_output (vm, " used in lookup context index: %U\n",
+ format_vec32, am->lc_index_vec_by_acl[i], "%d");
+ }
}
}
@@ -3437,8 +3481,38 @@ acl_show_aclplugin_acl_fn (vlib_main_t * vm,
return error;
}
+static clib_error_t *
+acl_show_aclplugin_lookup_context_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *error = 0;
+
+ u32 lc_index = ~0;
+ (void) unformat (input, "index %u", &lc_index);
+
+ acl_plugin_show_lookup_context (lc_index);
+ return error;
+}
+
+static clib_error_t *
+acl_show_aclplugin_lookup_user_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *error = 0;
+
+ u32 lc_index = ~0;
+ (void) unformat (input, "index %u", &lc_index);
+
+ acl_plugin_show_lookup_user (lc_index);
+ return error;
+}
+
+
static void
-acl_plugin_show_interface (acl_main_t * am, u32 sw_if_index, int show_acl)
+acl_plugin_show_interface (acl_main_t * am, u32 sw_if_index, int show_acl,
+ int detail)
{
vlib_main_t *vm = am->vlib_main;
u32 swi;
@@ -3496,6 +3570,16 @@ acl_plugin_show_interface (acl_main_t * am, u32 sw_if_index, int show_acl)
vlib_cli_output (vm, "\n");
}
}
+ if (detail && (swi < vec_len (am->input_lc_index_by_sw_if_index)))
+ {
+ vlib_cli_output (vm, " input lookup context index: %d",
+ am->input_lc_index_by_sw_if_index[swi]);
+ }
+ if (detail && (swi < vec_len (am->output_lc_index_by_sw_if_index)))
+ {
+ vlib_cli_output (vm, " output lookup context index: %d",
+ am->output_lc_index_by_sw_if_index[swi]);
+ }
}
}
@@ -3531,8 +3615,9 @@ acl_show_aclplugin_interface_fn (vlib_main_t * vm,
u32 sw_if_index = ~0;
(void) unformat (input, "sw_if_index %u", &sw_if_index);
int show_acl = unformat (input, "acl");
+ int detail = unformat (input, "detail");
- acl_plugin_show_interface (am, sw_if_index, show_acl);
+ acl_plugin_show_interface (am, sw_if_index, show_acl, detail);
return error;
}
@@ -3721,148 +3806,12 @@ acl_show_aclplugin_sessions_fn (vlib_main_t * vm,
return error;
}
-static void
-acl_plugin_show_tables_mask_type (acl_main_t * am)
-{
- vlib_main_t *vm = am->vlib_main;
- ace_mask_type_entry_t *mte;
-
- vlib_cli_output (vm, "Mask-type entries:");
- /* *INDENT-OFF* */
- pool_foreach(mte, am->ace_mask_type_pool,
- ({
- vlib_cli_output(vm, " %3d: %016llx %016llx %016llx %016llx %016llx %016llx refcount %d",
- mte - am->ace_mask_type_pool,
- mte->mask.kv.key[0], mte->mask.kv.key[1], mte->mask.kv.key[2],
- mte->mask.kv.key[3], mte->mask.kv.key[4], mte->mask.kv.value, mte->refcount);
- }));
- /* *INDENT-ON* */
-}
-
-static void
-acl_plugin_show_tables_acl_hash_info (acl_main_t * am, u32 acl_index)
-{
- vlib_main_t *vm = am->vlib_main;
- u32 i, j;
- u64 *m;
- vlib_cli_output (vm, "Mask-ready ACL representations\n");
- for (i = 0; i < vec_len (am->hash_acl_infos); i++)
- {
- if ((acl_index != ~0) && (acl_index != i))
- {
- continue;
- }
- hash_acl_info_t *ha = &am->hash_acl_infos[i];
- vlib_cli_output (vm, "acl-index %u bitmask-ready layout\n", i);
- vlib_cli_output (vm, " applied inbound on sw_if_index list: %U\n",
- format_vec32, ha->inbound_sw_if_index_list, "%d");
- vlib_cli_output (vm, " applied outbound on sw_if_index list: %U\n",
- format_vec32, ha->outbound_sw_if_index_list, "%d");
- vlib_cli_output (vm, " mask type index bitmap: %U\n",
- format_bitmap_hex, ha->mask_type_index_bitmap);
- for (j = 0; j < vec_len (ha->rules); j++)
- {
- hash_ace_info_t *pa = &ha->rules[j];
- m = (u64 *) & pa->match;
- vlib_cli_output (vm,
- " %4d: %016llx %016llx %016llx %016llx %016llx %016llx mask index %d acl %d rule %d action %d src/dst portrange not ^2: %d,%d\n",
- j, m[0], m[1], m[2], m[3], m[4], m[5],
- pa->mask_type_index, pa->acl_index, pa->ace_index,
- pa->action, pa->src_portrange_not_powerof2,
- pa->dst_portrange_not_powerof2);
- }
- }
-}
-
-static void
-acl_plugin_print_pae (vlib_main_t * vm, int j, applied_hash_ace_entry_t * pae)
-{
- vlib_cli_output (vm,
- " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d tail %d hitcount %lld",
- j, pae->acl_index, pae->ace_index, pae->action,
- pae->hash_ace_info_index, pae->next_applied_entry_index,
- pae->prev_applied_entry_index,
- pae->tail_applied_entry_index, pae->hitcount);
-}
-
-static void
-acl_plugin_show_tables_applied_info (acl_main_t * am, u32 sw_if_index)
-{
- vlib_main_t *vm = am->vlib_main;
- u32 swi, j;
- vlib_cli_output (vm, "Applied lookup entries for interfaces");
-
- for (swi = 0;
- (swi < vec_len (am->input_applied_hash_acl_info_by_sw_if_index))
- || (swi < vec_len (am->output_applied_hash_acl_info_by_sw_if_index))
- || (swi < vec_len (am->input_hash_entry_vec_by_sw_if_index))
- || (swi < vec_len (am->output_hash_entry_vec_by_sw_if_index)); swi++)
- {
- if ((sw_if_index != ~0) && (sw_if_index != swi))
- {
- continue;
- }
- vlib_cli_output (vm, "sw_if_index %d:", swi);
- if (swi < vec_len (am->input_applied_hash_acl_info_by_sw_if_index))
- {
- applied_hash_acl_info_t *pal =
- &am->input_applied_hash_acl_info_by_sw_if_index[swi];
- vlib_cli_output (vm, " input lookup mask_type_index_bitmap: %U",
- format_bitmap_hex, pal->mask_type_index_bitmap);
- vlib_cli_output (vm, " input applied acls: %U", format_vec32,
- pal->applied_acls, "%d");
- }
- if (swi < vec_len (am->input_hash_entry_vec_by_sw_if_index))
- {
- vlib_cli_output (vm, " input lookup applied entries:");
- for (j = 0;
- j < vec_len (am->input_hash_entry_vec_by_sw_if_index[swi]);
- j++)
- {
- acl_plugin_print_pae (vm, j,
- &am->input_hash_entry_vec_by_sw_if_index
- [swi][j]);
- }
- }
-
- if (swi < vec_len (am->output_applied_hash_acl_info_by_sw_if_index))
- {
- applied_hash_acl_info_t *pal =
- &am->output_applied_hash_acl_info_by_sw_if_index[swi];
- vlib_cli_output (vm, " output lookup mask_type_index_bitmap: %U",
- format_bitmap_hex, pal->mask_type_index_bitmap);
- vlib_cli_output (vm, " output applied acls: %U", format_vec32,
- pal->applied_acls, "%d");
- }
- if (swi < vec_len (am->output_hash_entry_vec_by_sw_if_index))
- {
- vlib_cli_output (vm, " output lookup applied entries:");
- for (j = 0;
- j < vec_len (am->output_hash_entry_vec_by_sw_if_index[swi]);
- j++)
- {
- acl_plugin_print_pae (vm, j,
- &am->output_hash_entry_vec_by_sw_if_index
- [swi][j]);
- }
- }
- }
-}
-
-static void
-acl_plugin_show_tables_bihash (acl_main_t * am, u32 show_bihash_verbose)
-{
- vlib_main_t *vm = am->vlib_main;
- show_hash_acl_hash (vm, am, show_bihash_verbose);
-}
-
static clib_error_t *
acl_show_aclplugin_tables_fn (vlib_main_t * vm,
unformat_input_t * input,
vlib_cli_command_t * cmd)
{
clib_error_t *error = 0;
- acl_main_t *am = &acl_main;
u32 acl_index = ~0;
u32 sw_if_index = ~0;
@@ -3905,13 +3854,13 @@ acl_show_aclplugin_tables_fn (vlib_main_t * vm,
show_bihash = 1;
}
if (show_mask_type)
- acl_plugin_show_tables_mask_type (am);
+ acl_plugin_show_tables_mask_type ();
if (show_acl_hash_info)
- acl_plugin_show_tables_acl_hash_info (am, acl_index);
+ acl_plugin_show_tables_acl_hash_info (acl_index);
if (show_applied_info)
- acl_plugin_show_tables_applied_info (am, sw_if_index);
+ acl_plugin_show_tables_applied_info (sw_if_index);
if (show_bihash)
- acl_plugin_show_tables_bihash (am, show_bihash_verbose);
+ acl_plugin_show_tables_bihash (show_bihash_verbose);
return error;
}
@@ -3940,6 +3889,18 @@ VLIB_CLI_COMMAND (aclplugin_show_acl_command, static) = {
.function = acl_show_aclplugin_acl_fn,
};
+VLIB_CLI_COMMAND (aclplugin_show_lookup_context_command, static) = {
+ .path = "show acl-plugin lookup context",
+ .short_help = "show acl-plugin lookup context [index N]",
+ .function = acl_show_aclplugin_lookup_context_fn,
+};
+
+VLIB_CLI_COMMAND (aclplugin_show_lookup_user_command, static) = {
+ .path = "show acl-plugin lookup user",
+ .short_help = "show acl-plugin lookup user [index N]",
+ .function = acl_show_aclplugin_lookup_user_fn,
+};
+
VLIB_CLI_COMMAND (aclplugin_show_decode_5tuple_command, static) = {
.path = "show acl-plugin decode 5tuple",
.short_help = "show acl-plugin decode 5tuple XXXX XXXX XXXX XXXX XXXX XXXX",
@@ -4113,6 +4074,10 @@ acl_init (vlib_main_t * vm)
/* use the new fancy hash-based matching */
am->use_hash_acl_matching = 1;
+ am->interface_acl_user_id =
+ acl_plugin_register_user_module ("interface ACL", "sw_if_index",
+ "is_input");
+
return error;
}
diff --git a/src/plugins/acl/acl.h b/src/plugins/acl/acl.h
index 555358c4f66..9d66b7f1721 100644
--- a/src/plugins/acl/acl.h
+++ b/src/plugins/acl/acl.h
@@ -29,6 +29,7 @@
#include "fa_node.h"
#include "hash_lookup_types.h"
+#include "lookup_context.h"
#define ACL_PLUGIN_VERSION_MAJOR 1
#define ACL_PLUGIN_VERSION_MINOR 3
@@ -140,6 +141,11 @@ typedef struct {
/* API message ID base */
u16 msg_id_base;
+ /* The pool of users of ACL lookup contexts */
+ acl_lookup_context_user_t *acl_users;
+ /* The pool of ACL lookup contexts */
+ acl_lookup_context_t *acl_lookup_contexts;
+
acl_list_t *acls; /* Pool of ACLs */
hash_acl_info_t *hash_acl_infos; /* corresponding hash matching housekeeping info */
clib_bihash_48_8_t acl_lookup_hash; /* ACL lookup hash table. */
@@ -150,10 +156,20 @@ typedef struct {
void *hash_lookup_mheap;
u32 hash_lookup_mheap_size;
int acl_lookup_hash_initialized;
+/*
applied_hash_ace_entry_t **input_hash_entry_vec_by_sw_if_index;
applied_hash_ace_entry_t **output_hash_entry_vec_by_sw_if_index;
applied_hash_acl_info_t *input_applied_hash_acl_info_by_sw_if_index;
applied_hash_acl_info_t *output_applied_hash_acl_info_by_sw_if_index;
+*/
+ applied_hash_ace_entry_t **hash_entry_vec_by_lc_index;
+ applied_hash_acl_info_t *applied_hash_acl_info_by_lc_index;
+
+ /* Corresponding lookup context indices for in/out lookups per sw_if_index */
+ u32 *input_lc_index_by_sw_if_index;
+ u32 *output_lc_index_by_sw_if_index;
+ /* context user id for interface ACLs */
+ u32 interface_acl_user_id;
macip_acl_list_t *macip_acls; /* Pool of MAC-IP ACLs */
@@ -165,6 +181,13 @@ typedef struct {
u32 **input_sw_if_index_vec_by_acl;
u32 **output_sw_if_index_vec_by_acl;
+ /* bitmaps 1=sw_if_index has in/out ACL processing enabled */
+ uword *in_acl_on_sw_if_index;
+ uword *out_acl_on_sw_if_index;
+
+ /* lookup contexts where a given ACL is used */
+ u32 **lc_index_vec_by_acl;
+
/* Total count of interface+direction pairs enabled */
u32 fa_total_enabled_count;
@@ -239,6 +262,7 @@ typedef struct {
u64 fa_conn_table_max_entries;
int trace_sessions;
+ int trace_acl;
/*
* If the cleaner has to delete more than this number
diff --git a/src/plugins/acl/acl_lookup_context.md b/src/plugins/acl/acl_lookup_context.md
new file mode 100644
index 00000000000..c049aae5f19
--- /dev/null
+++ b/src/plugins/acl/acl_lookup_context.md
@@ -0,0 +1,113 @@
+Lookup contexts aka "ACL as a service" {#acl_lookup_context}
+======================================
+
+The initial implementation of the ACL plugin had tightly tied the policy (L3-L4) ACLs
+to ingress/egress processing on an interface.
+
+However, some uses outside of pure traffic control have appeared, for example,
+ACL-based forwarding, etc. Also, improved algorithms of the ACL lookup
+could benefit of the more abstract representation, not coupled to the interfaces.
+
+This describes a way to accomodate these use cases by generalizing the ACL
+lookups into "ACL lookup contexts", not tied to specific interfaces, usable
+by other portions of the code by utilizing the exports.h header file,
+which provides the necessary interface.
+
+
+Why "lookup contexts" and not "match me an ACL#" ?
+================================================
+
+The first reason is the logical grouping of multiple ACLs.
+
+The interface matching code currently allows for matching multiple ACLs
+in a 'first-match' fashion. Some other use cases also fall into a similar
+pattern: they attemt to match a sequence of ACLs, and the first matched ACL
+determines what the outcome is, e.g. where to forward traffic. Thus,
+a match never happens on an ACL in isolation, but always on a group of
+ACLs.
+
+The second reason is potential optimizations in matching.
+
+A naive match on series of ACLs each represented as a vector of ACEs
+does not care about the API level - it could be "match one ACL", or
+"match the set of ACLs" - there will be just a simple loop iterating over
+the ACLs to match, returning the first match. Be it in the ACL code or
+in the user code.
+
+However, for more involved lookup methods, providing a more high-level
+interface of matching over the entire group of ACLs allows for future
+improvements in the algorithms, delivered at once to all the users
+of the API.
+
+What is a "lookup context" ?
+============================
+
+An ACL lookup context is an entity that groups the set of ACL#s
+together for the purposes of a first-match lookup, and may store
+additional internal information needed to optimize the lookups
+for that particular vector of ACLs.
+
+Using ACL contexts in your code
+===============================
+
+In order to use the ACL lookup contexts, you need to include
+plugins/acl/exports.h into your code. This header includes
+all the necessary dependencies required, as well as
+the actual "meat" include file containing the necessary
+definitions - plugins/acl/public_inlines.h
+
+As you probably will invoke this code from another plugin,
+the non-inline function calls are implemented via function pointers,
+which you need to initialize by calling acl_plugin_exports_init(), which,
+if everything succeeds, returns 0 - else it will return clib_error_t with
+more information about what went wrong.
+
+When you have initialized the symbols, you also need to register yourself
+as a user of the ACL lookups - this allows to track the ACL lookup context
+ownership, as well as make the debug show outputs more user friendly.
+
+To do that, call acl_plugin_register_user_module(caller_module_string, val1_label, val2_label) -
+and record the returned value. This will bethe first parameter that you pass to create a new
+lookup context. The passed strings must be static, and are used as descriptions for the ACL
+contexts themselves, as well as labels for up to two user-supplied u32 labels, used to
+differentiate the lookup contexts for the debugging purposes.
+
+Creating a new context is done by calling acl_plugin_get_lookup_context_index(user_id, val1, val2).
+The first argument is your "user" ID obtained in a registration call earlier, the other two
+arguments are u32s with semantics that you designate. They are used purely for debugging purposes
+in the "show acl lookup context" command.
+
+To set the vector of ACL numbers to be looked up within the context, use the function
+acl_plugin_set_acl_vec_for_context(lc_index, acl_list). The first parameter specifies the context
+that you have created, the second parameter is a vector of u32s, each u32 being the index of the ACL
+which we should be looking up within this context. The comand is idempotent, i.e.
+it unapplies the previously applied list of ACLs, and then sets the new list of ACLs.
+
+Subsequent ACL updates for the already applied ACLs will cause the re-application
+on an as-needed basis. Note, that the ACL application is potentially a relatively costly operation,
+so it is only expected that these changes will be done in the control plane, NOT in the datapath.
+
+The matching within the context is done using two functions - acl_plugin_fill_5tuple() and
+acl_plugin_match_5tuple() and their corresponding inline versions, named acl_plugin_fill_5tuple_inline()
+and acl_plugin_match_5tuple_inline(). The inline and non-inline versions have the equivalent functionality,
+in that the non-inline version calls the inline version. These two variants are provided
+for debugging/maintenance reasons.
+
+When you no longer need a particular context, you can return the allocated resources by calling
+acl_plugin_put_lookup_context_index() to mark it as free. The lookup structured associated with
+the vector of ACLs set for the lookup are cleaned up automatically. However, the ACLs themselves
+are not deleted and are available for subsequent reuse by other lookup contexts if needed.
+
+Debug CLIs
+==========
+
+To see the state of the ACL lookup contexts, you can issue "show acl-plugin lookup user" to see
+all of the users which registered for the usage of the ACL plugin lookup contexts,
+and "show acl-plugin lookup context" to show the actual contexts created. You will notice
+that the latter command uses the values supplied during the module registration in order to
+make the output more friendly.
+
+The "show acl-plugin acl" and "show acl-plugin interface" commands have also acquired the
+notion of lookup context, but there it is used from the client perspective, since
+with this change the interface ACL lookup itself is a user of ACL lookup contexts.
+
diff --git a/src/plugins/acl/elog_acl_trace.h b/src/plugins/acl/elog_acl_trace.h
new file mode 100644
index 00000000000..0c4f68f7b0f
--- /dev/null
+++ b/src/plugins/acl/elog_acl_trace.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _ELOG_ACL_TRACE_H_
+#define _ELOG_ACL_TRACE_H_
+
+
+/* use like: elog_acl_cond_trace_X1(am, (x < 0), "foobar: %d", "i4", int32_value); */
+
+#define elog_acl_cond_trace_X1(am, trace_cond, acl_elog_trace_format_label, acl_elog_trace_format_args, acl_elog_val1) \
+do { \
+ if (trace_cond) { \
+ CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1)]; } *static_check); \
+ u16 thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = \
+ { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED(struct \
+ { \
+ u16 thread; \
+ typeof(acl_elog_val1) val1; \
+ }) *ed; \
+ ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ } \
+} while (0)
+
+
+/* use like: elog_acl_cond_trace_X2(am, (x<0), "foobar: %d some u64: %lu", "i4i8", int32_value, int64_value); */
+
+#define elog_acl_cond_trace_X2(am, trace_cond, acl_elog_trace_format_label, acl_elog_trace_format_args, \
+ acl_elog_val1, acl_elog_val2) \
+do { \
+ if (trace_cond) { \
+ CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1) - sizeof(acl_elog_val2)]; } *static_check); \
+ u16 thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = \
+ { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED(struct \
+ { \
+ u16 thread; \
+ typeof(acl_elog_val1) val1; \
+ typeof(acl_elog_val2) val2; \
+ }) *ed; \
+ ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ ed->val2 = acl_elog_val2; \
+ } \
+} while (0)
+
+
+/* use like: elog_acl_cond_trace_X3(am, (x<0), "foobar: %d some u64 %lu baz: %d", "i4i8i4", int32_value, u64_value, int_value); */
+
+#define elog_acl_cond_trace_X3(am, trace_cond, acl_elog_trace_format_label, acl_elog_trace_format_args, acl_elog_val1, \
+ acl_elog_val2, acl_elog_val3) \
+do { \
+ if (trace_cond) { \
+ CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1) - sizeof(acl_elog_val2) \
+ - sizeof(acl_elog_val3)]; } *static_check); \
+ u16 thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = \
+ { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED(struct \
+ { \
+ u16 thread; \
+ typeof(acl_elog_val1) val1; \
+ typeof(acl_elog_val2) val2; \
+ typeof(acl_elog_val3) val3; \
+ }) *ed; \
+ ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ ed->val2 = acl_elog_val2; \
+ ed->val3 = acl_elog_val3; \
+ } \
+} while (0)
+
+
+/* use like: elog_acl_cond_trace_X4(am, (x<0), "foobar: %d some int %d baz: %d bar: %d", "i4i4i4i4", int32_value, int32_value2, int_value, int_value); */
+
+#define elog_acl_cond_trace_X4(am, trace_cond, acl_elog_trace_format_label, acl_elog_trace_format_args, acl_elog_val1, \
+ acl_elog_val2, acl_elog_val3, acl_elog_val4) \
+do { \
+ if (trace_cond) { \
+ CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1) - sizeof(acl_elog_val2) \
+ - sizeof(acl_elog_val3) -sizeof(acl_elog_val4)]; } *static_check); \
+ u16 thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = \
+ { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED(struct \
+ { \
+ u16 thread; \
+ typeof(acl_elog_val1) val1; \
+ typeof(acl_elog_val2) val2; \
+ typeof(acl_elog_val3) val3; \
+ typeof(acl_elog_val4) val4; \
+ }) *ed; \
+ ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ ed->val2 = acl_elog_val2; \
+ ed->val3 = acl_elog_val3; \
+ ed->val4 = acl_elog_val4; \
+ } \
+} while (0)
+
+
+#endif
diff --git a/src/plugins/acl/exports.h b/src/plugins/acl/exports.h
new file mode 100644
index 00000000000..d904ad3bbae
--- /dev/null
+++ b/src/plugins/acl/exports.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_acl_exports_h
+#define included_acl_exports_h
+
+/*
+ * This file contains the declarations for external consumption,
+ * along with the necessary dependent includes.
+ */
+
+#define ACL_PLUGIN_EXTERNAL_EXPORTS
+
+#include <vlib/unix/plugin.h>
+
+#include <plugins/acl/acl.h>
+#include <plugins/acl/fa_node.h>
+#include <plugins/acl/public_inlines.h>
+
+#endif /* included_acl_exports_h */
diff --git a/src/plugins/acl/fa_node.c b/src/plugins/acl/fa_node.c
index c8e3d2d3a3e..d29576a4bce 100644
--- a/src/plugins/acl/fa_node.c
+++ b/src/plugins/acl/fa_node.c
@@ -19,20 +19,25 @@
#include <vnet/vnet.h>
#include <vnet/pg/pg.h>
#include <vppinfra/error.h>
+
+
#include <acl/acl.h>
-#include <vppinfra/bihash_40_8.h>
+#include <vnet/ip/icmp46_packet.h>
+
+#include <plugins/acl/fa_node.h>
+#include <plugins/acl/acl.h>
+#include <plugins/acl/lookup_context.h>
+#include <plugins/acl/public_inlines.h>
+#include <vppinfra/bihash_40_8.h>
#include <vppinfra/bihash_template.h>
#include <vppinfra/bihash_template.c>
-#include <vnet/ip/icmp46_packet.h>
-
-#include "fa_node.h"
-#include "hash_lookup.h"
typedef struct
{
u32 next_index;
u32 sw_if_index;
+ u32 lc_index;
u32 match_acl_in_index;
u32 match_rule_index;
u64 packet_info[6];
@@ -76,10 +81,9 @@ format_fa_5tuple (u8 * s, va_list * args)
{
fa_5tuple_t *p5t = va_arg (*args, fa_5tuple_t *);
- return format(s, "%s sw_if_index %d (lsb16 %d) l3 %s%s %U -> %U"
+ return format(s, "lc_index %d (lsb16 of sw_if_index %d) l3 %s%s %U -> %U"
" l4 proto %d l4_valid %d port %d -> %d tcp flags (%s) %02x rsvd %x",
- p5t->pkt.is_input ? "input" : "output",
- p5t->pkt.sw_if_index, p5t->l4.lsb_of_sw_if_index, p5t->pkt.is_ip6 ? "ip6" : "ip4",
+ p5t->pkt.lc_index, p5t->l4.lsb_of_sw_if_index, p5t->pkt.is_ip6 ? "ip6" : "ip4",
p5t->pkt.is_nonfirst_fragment ? " non-initial fragment" : "",
format_ip46_address, &p5t->addr[0], p5t->pkt.is_ip6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4,
format_ip46_address, &p5t->addr[1], p5t->pkt.is_ip6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4,
@@ -106,9 +110,9 @@ format_acl_fa_trace (u8 * s, va_list * args)
s =
format (s,
- "acl-plugin: sw_if_index %d, next index %d, action: %d, match: acl %d rule %d trace_bits %08x\n"
+ "acl-plugin: lc_index: %d, sw_if_index %d, next index %d, action: %d, match: acl %d rule %d trace_bits %08x\n"
" pkt info %016llx %016llx %016llx %016llx %016llx %016llx",
- t->sw_if_index, t->next_index, t->action, t->match_acl_in_index,
+ t->lc_index, t->sw_if_index, t->next_index, t->action, t->match_acl_in_index,
t->match_rule_index, t->trace_bitmap,
t->packet_info[0], t->packet_info[1], t->packet_info[2],
t->packet_info[3], t->packet_info[4], t->packet_info[5]);
@@ -144,420 +148,6 @@ static char *acl_fa_error_strings[] = {
};
/* *INDENT-ON* */
-static void *
-get_ptr_to_offset (vlib_buffer_t * b0, int offset)
-{
- u8 *p = vlib_buffer_get_current (b0) + offset;
- return p;
-}
-
-
-static int
-fa_acl_match_addr (ip46_address_t * addr1, ip46_address_t * addr2,
- int prefixlen, int is_ip6)
-{
- if (prefixlen == 0)
- {
- /* match any always succeeds */
- return 1;
- }
- if (is_ip6)
- {
- if (memcmp (addr1, addr2, prefixlen / 8))
- {
- /* If the starting full bytes do not match, no point in bittwidling the thumbs further */
- return 0;
- }
- if (prefixlen % 8)
- {
- u8 b1 = *((u8 *) addr1 + 1 + prefixlen / 8);
- u8 b2 = *((u8 *) addr2 + 1 + prefixlen / 8);
- u8 mask0 = (0xff - ((1 << (8 - (prefixlen % 8))) - 1));
- return (b1 & mask0) == b2;
- }
- else
- {
- /* The prefix fits into integer number of bytes, so nothing left to do */
- return 1;
- }
- }
- else
- {
- uint32_t a1 = ntohl (addr1->ip4.as_u32);
- uint32_t a2 = ntohl (addr2->ip4.as_u32);
- uint32_t mask0 = 0xffffffff - ((1 << (32 - prefixlen)) - 1);
- return (a1 & mask0) == a2;
- }
-}
-
-static int
-fa_acl_match_port (u16 port, u16 port_first, u16 port_last, int is_ip6)
-{
- return ((port >= port_first) && (port <= port_last));
-}
-
-int
-single_acl_match_5tuple (acl_main_t * am, u32 acl_index, fa_5tuple_t * pkt_5tuple,
- int is_ip6, u8 * r_action, u32 * r_acl_match_p,
- u32 * r_rule_match_p, u32 * trace_bitmap)
-{
- int i;
- acl_list_t *a;
- acl_rule_t *r;
-
- if (pool_is_free_index (am->acls, acl_index))
- {
- if (r_acl_match_p)
- *r_acl_match_p = acl_index;
- if (r_rule_match_p)
- *r_rule_match_p = -1;
- /* the ACL does not exist but is used for policy. Block traffic. */
- return 0;
- }
- a = am->acls + acl_index;
- for (i = 0; i < a->count; i++)
- {
- r = a->rules + i;
- if (is_ip6 != r->is_ipv6)
- {
- continue;
- }
- if (!fa_acl_match_addr
- (&pkt_5tuple->addr[1], &r->dst, r->dst_prefixlen, is_ip6))
- continue;
-
-#ifdef FA_NODE_VERBOSE_DEBUG
- clib_warning
- ("ACL_FA_NODE_DBG acl %d rule %d pkt dst addr %U match rule addr %U/%d",
- acl_index, i, format_ip46_address, &pkt_5tuple->addr[1],
- r->is_ipv6 ? IP46_TYPE_IP6: IP46_TYPE_IP4, format_ip46_address,
- &r->dst, r->is_ipv6 ? IP46_TYPE_IP6: IP46_TYPE_IP4,
- r->dst_prefixlen);
-#endif
-
- if (!fa_acl_match_addr
- (&pkt_5tuple->addr[0], &r->src, r->src_prefixlen, is_ip6))
- continue;
-
-#ifdef FA_NODE_VERBOSE_DEBUG
- clib_warning
- ("ACL_FA_NODE_DBG acl %d rule %d pkt src addr %U match rule addr %U/%d",
- acl_index, i, format_ip46_address, &pkt_5tuple->addr[0],
- r->is_ipv6 ? IP46_TYPE_IP6: IP46_TYPE_IP4, format_ip46_address,
- &r->src, r->is_ipv6 ? IP46_TYPE_IP6: IP46_TYPE_IP4,
- r->src_prefixlen);
- clib_warning
- ("ACL_FA_NODE_DBG acl %d rule %d trying to match pkt proto %d with rule %d",
- acl_index, i, pkt_5tuple->l4.proto, r->proto);
-#endif
- if (r->proto)
- {
- if (pkt_5tuple->l4.proto != r->proto)
- continue;
-
- if (PREDICT_FALSE (pkt_5tuple->pkt.is_nonfirst_fragment &&
- am->l4_match_nonfirst_fragment))
- {
- /* non-initial fragment with frag match configured - match this rule */
- *trace_bitmap |= 0x80000000;
- *r_action = r->is_permit;
- if (r_acl_match_p)
- *r_acl_match_p = acl_index;
- if (r_rule_match_p)
- *r_rule_match_p = i;
- return 1;
- }
-
- /* A sanity check just to ensure we are about to match the ports extracted from the packet */
- if (PREDICT_FALSE (!pkt_5tuple->pkt.l4_valid))
- continue;
-
-#ifdef FA_NODE_VERBOSE_DEBUG
- clib_warning
- ("ACL_FA_NODE_DBG acl %d rule %d pkt proto %d match rule %d",
- acl_index, i, pkt_5tuple->l4.proto, r->proto);
-#endif
-
- if (!fa_acl_match_port
- (pkt_5tuple->l4.port[0], r->src_port_or_type_first,
- r->src_port_or_type_last, is_ip6))
- continue;
-
-#ifdef FA_NODE_VERBOSE_DEBUG
- clib_warning
- ("ACL_FA_NODE_DBG acl %d rule %d pkt sport %d match rule [%d..%d]",
- acl_index, i, pkt_5tuple->l4.port[0], r->src_port_or_type_first,
- r->src_port_or_type_last);
-#endif
-
- if (!fa_acl_match_port
- (pkt_5tuple->l4.port[1], r->dst_port_or_code_first,
- r->dst_port_or_code_last, is_ip6))
- continue;
-
-#ifdef FA_NODE_VERBOSE_DEBUG
- clib_warning
- ("ACL_FA_NODE_DBG acl %d rule %d pkt dport %d match rule [%d..%d]",
- acl_index, i, pkt_5tuple->l4.port[1], r->dst_port_or_code_first,
- r->dst_port_or_code_last);
-#endif
- if (pkt_5tuple->pkt.tcp_flags_valid
- && ((pkt_5tuple->pkt.tcp_flags & r->tcp_flags_mask) !=
- r->tcp_flags_value))
- continue;
- }
- /* everything matches! */
-#ifdef FA_NODE_VERBOSE_DEBUG
- clib_warning ("ACL_FA_NODE_DBG acl %d rule %d FULL-MATCH, action %d",
- acl_index, i, r->is_permit);
-#endif
- *r_action = r->is_permit;
- if (r_acl_match_p)
- *r_acl_match_p = acl_index;
- if (r_rule_match_p)
- *r_rule_match_p = i;
- return 1;
- }
- return 0;
-}
-
-static u8
-linear_multi_acl_match_5tuple (u32 sw_if_index, fa_5tuple_t * pkt_5tuple, int is_l2,
- int is_ip6, int is_input, u32 * acl_match_p,
- u32 * rule_match_p, u32 * trace_bitmap)
-{
- acl_main_t *am = &acl_main;
- int i;
- u32 *acl_vector;
- u8 action = 0;
-
- if (is_input)
- {
- vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index);
- acl_vector = am->input_acl_vec_by_sw_if_index[sw_if_index];
- }
- else
- {
- vec_validate (am->output_acl_vec_by_sw_if_index, sw_if_index);
- acl_vector = am->output_acl_vec_by_sw_if_index[sw_if_index];
- }
- for (i = 0; i < vec_len (acl_vector); i++)
- {
-#ifdef FA_NODE_VERBOSE_DEBUG
- clib_warning ("ACL_FA_NODE_DBG: Trying to match ACL: %d",
- acl_vector[i]);
-#endif
- if (single_acl_match_5tuple
- (am, acl_vector[i], pkt_5tuple, is_ip6, &action,
- acl_match_p, rule_match_p, trace_bitmap))
- {
- return action;
- }
- }
- if (vec_len (acl_vector) > 0)
- {
- /* If there are ACLs and none matched, deny by default */
- return 0;
- }
-#ifdef FA_NODE_VERBOSE_DEBUG
- clib_warning ("ACL_FA_NODE_DBG: No ACL on sw_if_index %d", sw_if_index);
-#endif
- /* Deny by default. If there are no ACLs defined we should not be here. */
- return 0;
-}
-
-static u8
-multi_acl_match_5tuple (u32 sw_if_index, fa_5tuple_t * pkt_5tuple, int is_l2,
- int is_ip6, int is_input, u32 * acl_match_p,
- u32 * rule_match_p, u32 * trace_bitmap)
-{
- acl_main_t *am = &acl_main;
- if (am->use_hash_acl_matching) {
- return hash_multi_acl_match_5tuple(sw_if_index, pkt_5tuple, is_l2, is_ip6,
- is_input, acl_match_p, rule_match_p, trace_bitmap);
- } else {
- return linear_multi_acl_match_5tuple(sw_if_index, pkt_5tuple, is_l2, is_ip6,
- is_input, acl_match_p, rule_match_p, trace_bitmap);
- }
-}
-
-static int
-offset_within_packet (vlib_buffer_t * b0, int offset)
-{
- /* For the purposes of this code, "within" means we have at least 8 bytes after it */
- return (offset <= (b0->current_length - 8));
-}
-
-static void
-acl_fill_5tuple (acl_main_t * am, vlib_buffer_t * b0, int is_ip6,
- int is_input, int is_l2_path, fa_5tuple_t * p5tuple_pkt)
-{
- int l3_offset;
- int l4_offset;
- u16 ports[2];
- u16 proto;
-
- if (is_l2_path)
- {
- l3_offset = ethernet_buffer_header_size(b0);
- }
- else
- {
- if (is_input)
- l3_offset = 0;
- else
- l3_offset = vnet_buffer(b0)->ip.save_rewrite_length;
- }
-
- /* key[0..3] contains src/dst address and is cleared/set below */
- /* Remainder of the key and per-packet non-key data */
- p5tuple_pkt->kv.key[4] = 0;
- p5tuple_pkt->kv.value = 0;
-
- if (is_ip6)
- {
- clib_memcpy (&p5tuple_pkt->addr,
- get_ptr_to_offset (b0,
- offsetof (ip6_header_t,
- src_address) + l3_offset),
- sizeof (p5tuple_pkt->addr));
- proto =
- *(u8 *) get_ptr_to_offset (b0,
- offsetof (ip6_header_t,
- protocol) + l3_offset);
- l4_offset = l3_offset + sizeof (ip6_header_t);
-#ifdef FA_NODE_VERBOSE_DEBUG
- clib_warning ("ACL_FA_NODE_DBG: proto: %d, l4_offset: %d", proto,
- l4_offset);
-#endif
- /* IP6 EH handling is here, increment l4_offset if needs to, update the proto */
- int need_skip_eh = clib_bitmap_get (am->fa_ipv6_known_eh_bitmap, proto);
- if (PREDICT_FALSE (need_skip_eh))
- {
- while (need_skip_eh && offset_within_packet (b0, l4_offset))
- {
- /* Fragment header needs special handling */
- if (PREDICT_FALSE(ACL_EH_FRAGMENT == proto))
- {
- proto = *(u8 *) get_ptr_to_offset (b0, l4_offset);
- u16 frag_offset;
- clib_memcpy (&frag_offset, get_ptr_to_offset (b0, 2 + l4_offset), sizeof(frag_offset));
- frag_offset = ntohs(frag_offset) >> 3;
- if (frag_offset)
- {
- p5tuple_pkt->pkt.is_nonfirst_fragment = 1;
- /* invalidate L4 offset so we don't try to find L4 info */
- l4_offset += b0->current_length;
- }
- else
- {
- /* First fragment: skip the frag header and move on. */
- l4_offset += 8;
- }
- }
- else
- {
- u8 nwords = *(u8 *) get_ptr_to_offset (b0, 1 + l4_offset);
- proto = *(u8 *) get_ptr_to_offset (b0, l4_offset);
- l4_offset += 8 * (1 + (u16) nwords);
- }
-#ifdef FA_NODE_VERBOSE_DEBUG
- clib_warning ("ACL_FA_NODE_DBG: new proto: %d, new offset: %d",
- proto, l4_offset);
-#endif
- need_skip_eh =
- clib_bitmap_get (am->fa_ipv6_known_eh_bitmap, proto);
- }
- }
- }
- else
- {
- p5tuple_pkt->kv.key[0] = 0;
- p5tuple_pkt->kv.key[1] = 0;
- p5tuple_pkt->kv.key[2] = 0;
- p5tuple_pkt->kv.key[3] = 0;
- clib_memcpy (&p5tuple_pkt->addr[0].ip4,
- get_ptr_to_offset (b0,
- offsetof (ip4_header_t,
- src_address) + l3_offset),
- sizeof (p5tuple_pkt->addr[0].ip4));
- clib_memcpy (&p5tuple_pkt->addr[1].ip4,
- get_ptr_to_offset (b0,
- offsetof (ip4_header_t,
- dst_address) + l3_offset),
- sizeof (p5tuple_pkt->addr[1].ip4));
- proto =
- *(u8 *) get_ptr_to_offset (b0,
- offsetof (ip4_header_t,
- protocol) + l3_offset);
- l4_offset = l3_offset + sizeof (ip4_header_t);
- u16 flags_and_fragment_offset;
- clib_memcpy (&flags_and_fragment_offset,
- get_ptr_to_offset (b0,
- offsetof (ip4_header_t,
- flags_and_fragment_offset)) + l3_offset,
- sizeof(flags_and_fragment_offset));
- flags_and_fragment_offset = ntohs (flags_and_fragment_offset);
-
- /* non-initial fragments have non-zero offset */
- if ((PREDICT_FALSE(0xfff & flags_and_fragment_offset)))
- {
- p5tuple_pkt->pkt.is_nonfirst_fragment = 1;
- /* invalidate L4 offset so we don't try to find L4 info */
- l4_offset += b0->current_length;
- }
-
- }
- p5tuple_pkt->l4.proto = proto;
- if (PREDICT_TRUE (offset_within_packet (b0, l4_offset)))
- {
- p5tuple_pkt->pkt.l4_valid = 1;
- if (icmp_protos[is_ip6] == proto)
- {
- /* type */
- p5tuple_pkt->l4.port[0] =
- *(u8 *) get_ptr_to_offset (b0,
- l4_offset + offsetof (icmp46_header_t,
- type));
- /* code */
- p5tuple_pkt->l4.port[1] =
- *(u8 *) get_ptr_to_offset (b0,
- l4_offset + offsetof (icmp46_header_t,
- code));
- }
- else if ((IPPROTO_TCP == proto) || (IPPROTO_UDP == proto))
- {
- clib_memcpy (&ports,
- get_ptr_to_offset (b0,
- l4_offset + offsetof (tcp_header_t,
- src_port)),
- sizeof (ports));
- p5tuple_pkt->l4.port[0] = ntohs (ports[0]);
- p5tuple_pkt->l4.port[1] = ntohs (ports[1]);
-
- p5tuple_pkt->pkt.tcp_flags =
- *(u8 *) get_ptr_to_offset (b0,
- l4_offset + offsetof (tcp_header_t,
- flags));
- p5tuple_pkt->pkt.tcp_flags_valid = (proto == IPPROTO_TCP);
- }
- /*
- * FIXME: rather than the above conditional, here could
- * be a nice generic mechanism to extract two L4 values:
- *
- * have a per-protocol array of 4 elements like this:
- * u8 offset; to take the byte from, off L4 header
- * u8 mask; to mask it with, before storing
- *
- * this way we can describe UDP, TCP and ICMP[46] semantics,
- * and add a sort of FPM-type behavior for other protocols.
- *
- * Of course, is it faster ? and is it needed ?
- *
- */
- }
-}
-
static int
acl_fa_ifc_has_sessions (acl_main_t * am, int sw_if_index0)
{
@@ -774,6 +364,10 @@ acl_fa_conn_list_add_session (acl_main_t * am, fa_full_session_id_t sess_id, u64
ASSERT(prev_sess->thread_index == sess->thread_index);
}
pw->fa_conn_list_tail[list_id] = sess_id.session_index;
+
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning("FA-SESSION-DEBUG: add session id %d on thread %d sw_if_index %d", sess_id.session_index, thread_index, sess->sw_if_index);
+#endif
pw->serviced_sw_if_index_bitmap = clib_bitmap_set(pw->serviced_sw_if_index_bitmap, sess->sw_if_index, 1);
if (~0 == pw->fa_conn_list_head[list_id]) {
@@ -942,8 +536,8 @@ acl_fa_check_idle_sessions(acl_main_t *am, u16 thread_index, u64 now)
if ((now < sess_timeout_time) && (0 == clib_bitmap_get(pw->pending_clear_sw_if_index_bitmap, sw_if_index)))
{
#ifdef FA_NODE_VERBOSE_DEBUG
- clib_warning ("ACL_FA_NODE_CLEAN: Restarting timer for session %d",
- (int) session_index);
+ clib_warning ("ACL_FA_NODE_CLEAN: Restarting timer for session %d, sw_if_index %d",
+ (int) fsid.session_index, sess->sw_if_index);
#endif
/* There was activity on the session, so the idle timeout
has not passed. Enqueue for another time period. */
@@ -954,8 +548,8 @@ acl_fa_check_idle_sessions(acl_main_t *am, u16 thread_index, u64 now)
else
{
#ifdef FA_NODE_VERBOSE_DEBUG
- clib_warning ("ACL_FA_NODE_CLEAN: Deleting session %d",
- (int) session_index);
+ clib_warning ("ACL_FA_NODE_CLEAN: Deleting session %d, sw_if_index %d",
+ (int) fsid.session_index, sess->sw_if_index);
#endif
acl_fa_delete_session (am, sw_if_index, fsid);
pw->cnt_deleted_sessions++;
@@ -1044,9 +638,7 @@ static int
acl_fa_find_session (acl_main_t * am, u32 sw_if_index0, fa_5tuple_t * p5tuple,
clib_bihash_kv_40_8_t * pvalue_sess)
{
- return (BV (clib_bihash_search)
- (&am->fa_sessions_hash, &p5tuple->kv,
- pvalue_sess) == 0);
+ return (clib_bihash_search_40_8 (&am->fa_sessions_hash, &p5tuple->kv, pvalue_sess) == 0);
}
@@ -1090,8 +682,10 @@ acl_fa_node_fn (vlib_main_t * vm,
u32 next0 = 0;
u8 action = 0;
u32 sw_if_index0;
+ u32 lc_index0;
int acl_check_needed = 1;
u32 match_acl_in_index = ~0;
+ u32 match_acl_pos = ~0;
u32 match_rule_index = ~0;
u8 error0 = 0;
u32 valid_new_sess;
@@ -1111,26 +705,30 @@ acl_fa_node_fn (vlib_main_t * vm,
else
sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ if (is_input)
+ lc_index0 = am->input_lc_index_by_sw_if_index[sw_if_index0];
+ else
+ lc_index0 = am->output_lc_index_by_sw_if_index[sw_if_index0];
/*
* Extract the L3/L4 matching info into a 5-tuple structure,
* then create a session key whose layout is independent on forward or reverse
* direction of the packet.
*/
- acl_fill_5tuple (am, b0, is_ip6, is_input, is_l2_path, &fa_5tuple);
+ acl_plugin_fill_5tuple_inline (lc_index0, b0, is_ip6, is_input, is_l2_path, (fa_5tuple_opaque_t *)&fa_5tuple);
fa_5tuple.l4.lsb_of_sw_if_index = sw_if_index0 & 0xffff;
+ fa_5tuple.pkt.lc_index = lc_index0;
valid_new_sess = acl_make_5tuple_session_key (am, is_input, is_ip6, sw_if_index0, &fa_5tuple, &kv_sess);
- fa_5tuple.pkt.sw_if_index = sw_if_index0;
fa_5tuple.pkt.is_ip6 = is_ip6;
- fa_5tuple.pkt.is_input = is_input;
+ // XXDEL fa_5tuple.pkt.is_input = is_input;
fa_5tuple.pkt.mask_type_index_lsb = ~0;
#ifdef FA_NODE_VERBOSE_DEBUG
clib_warning
- ("ACL_FA_NODE_DBG: session 5-tuple %016llx %016llx %016llx %016llx %016llx : %016llx",
+ ("ACL_FA_NODE_DBG: session 5-tuple %016llx %016llx %016llx %016llx %016llx %016llx",
kv_sess.kv.key[0], kv_sess.kv.key[1], kv_sess.kv.key[2],
kv_sess.kv.key[3], kv_sess.kv.key[4], kv_sess.kv.value);
clib_warning
- ("ACL_FA_NODE_DBG: packet 5-tuple %016llx %016llx %016llx %016llx %016llx : %016llx",
+ ("ACL_FA_NODE_DBG: packet 5-tuple %016llx %016llx %016llx %016llx %016llx %016llx",
fa_5tuple.kv.key[0], fa_5tuple.kv.key[1], fa_5tuple.kv.key[2],
fa_5tuple.kv.key[3], fa_5tuple.kv.key[4], fa_5tuple.kv.value);
#endif
@@ -1189,9 +787,9 @@ acl_fa_node_fn (vlib_main_t * vm,
if (acl_check_needed)
{
- action =
- multi_acl_match_5tuple (sw_if_index0, &fa_5tuple, is_l2_path,
- is_ip6, is_input, &match_acl_in_index,
+ action = 0; /* deny by default */
+ acl_plugin_match_5tuple_inline (lc_index0, (fa_5tuple_opaque_t *)&fa_5tuple,
+ is_ip6, &action, &match_acl_pos, &match_acl_in_index,
&match_rule_index, &trace_bitmap);
error0 = action;
if (1 == action)
@@ -1236,12 +834,16 @@ acl_fa_node_fn (vlib_main_t * vm,
else
vnet_feature_next (sw_if_index0, &next0, b0);
}
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning("ACL_FA_NODE_DBG: sw_if_index %d lc_index %d action %d acl_index %d rule_index %d", sw_if_index0, lc_index0, action, match_acl_in_index, match_rule_index);
+#endif
if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
&& (b0->flags & VLIB_BUFFER_IS_TRACED)))
{
acl_fa_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
t->sw_if_index = sw_if_index0;
+ t->lc_index = lc_index0;
t->next_index = next0;
t->match_acl_in_index = match_acl_in_index;
t->match_rule_index = match_rule_index;
diff --git a/src/plugins/acl/fa_node.h b/src/plugins/acl/fa_node.h
index 7ef558e17ea..dc4f87f0eb1 100644
--- a/src/plugins/acl/fa_node.h
+++ b/src/plugins/acl/fa_node.h
@@ -4,6 +4,8 @@
#include <stddef.h>
#include <vppinfra/bihash_40_8.h>
+// #define FA_NODE_VERBOSE_DEBUG 3
+
#define TCP_FLAG_FIN 0x01
#define TCP_FLAG_SYN 0x02
#define TCP_FLAG_RST 0x04
@@ -22,15 +24,14 @@
typedef union {
u64 as_u64;
struct {
- u32 sw_if_index;
+ u32 lc_index;
u16 mask_type_index_lsb;
u8 tcp_flags;
u8 tcp_flags_valid:1;
- u8 is_input:1;
u8 l4_valid:1;
u8 is_nonfirst_fragment:1;
u8 is_ip6:1;
- u8 flags_reserved:3;
+ u8 flags_reserved:4;
};
} fa_packet_info_t;
@@ -53,6 +54,10 @@ typedef union {
clib_bihash_kv_40_8_t kv;
} fa_5tuple_t;
+typedef struct {
+ u8 opaque[sizeof(fa_5tuple_t)];
+} fa_5tuple_opaque_t;
+
typedef struct {
fa_5tuple_t info; /* (5+1)*8 = 48 bytes */
diff --git a/src/plugins/acl/hash_lookup.c b/src/plugins/acl/hash_lookup.c
index 2262402d52f..ad55054c3e3 100644
--- a/src/plugins/acl/hash_lookup.c
+++ b/src/plugins/acl/hash_lookup.c
@@ -33,126 +33,17 @@
#include "hash_lookup_private.h"
-static inline applied_hash_ace_entry_t **get_applied_hash_aces(acl_main_t *am, int is_input, u32 sw_if_index)
+always_inline applied_hash_ace_entry_t **get_applied_hash_aces(acl_main_t *am, u32 lc_index)
{
- applied_hash_ace_entry_t **applied_hash_aces = is_input ? vec_elt_at_index(am->input_hash_entry_vec_by_sw_if_index, sw_if_index)
+ applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, lc_index);
+
+/*is_input ? vec_elt_at_index(am->input_hash_entry_vec_by_sw_if_index, sw_if_index)
: vec_elt_at_index(am->output_hash_entry_vec_by_sw_if_index, sw_if_index);
+*/
return applied_hash_aces;
}
-
-/*
- * This returns true if there is indeed a match on the portranges.
- * With all these levels of indirections, this is not going to be very fast,
- * so, best use the individual ports or wildcard ports for performance.
- */
-static int
-match_portranges(acl_main_t *am, fa_5tuple_t *match, u32 index)
-{
-
- applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, match->pkt.is_input, match->pkt.sw_if_index);
- applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), index);
-
- acl_rule_t *r = &(am->acls[pae->acl_index].rules[pae->ace_index]);
- DBG("PORTMATCH: %d <= %d <= %d && %d <= %d <= %d ?",
- r->src_port_or_type_first, match->l4.port[0], r->src_port_or_type_last,
- r->dst_port_or_code_first, match->l4.port[1], r->dst_port_or_code_last);
-
- return ( ((r->src_port_or_type_first <= match->l4.port[0]) && r->src_port_or_type_last >= match->l4.port[0]) &&
- ((r->dst_port_or_code_first <= match->l4.port[1]) && r->dst_port_or_code_last >= match->l4.port[1]) );
-}
-
-static u32
-multi_acl_match_get_applied_ace_index(acl_main_t *am, fa_5tuple_t *match)
-{
- clib_bihash_kv_48_8_t kv;
- clib_bihash_kv_48_8_t result;
- fa_5tuple_t *kv_key = (fa_5tuple_t *)kv.key;
- hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value;
- u64 *pmatch = (u64 *)match;
- u64 *pmask;
- u64 *pkey;
- int mask_type_index;
- u32 curr_match_index = ~0;
-
- u32 sw_if_index = match->pkt.sw_if_index;
- u8 is_input = match->pkt.is_input;
- applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, is_input, sw_if_index);
- applied_hash_acl_info_t **applied_hash_acls = is_input ? &am->input_applied_hash_acl_info_by_sw_if_index :
- &am->output_applied_hash_acl_info_by_sw_if_index;
-
- DBG("TRYING TO MATCH: %016llx %016llx %016llx %016llx %016llx %016llx",
- pmatch[0], pmatch[1], pmatch[2], pmatch[3], pmatch[4], pmatch[5]);
-
- for(mask_type_index=0; mask_type_index < pool_len(am->ace_mask_type_pool); mask_type_index++) {
- if (!clib_bitmap_get(vec_elt_at_index((*applied_hash_acls), sw_if_index)->mask_type_index_bitmap, mask_type_index)) {
- /* This bit is not set. Avoid trying to match */
- continue;
- }
- ace_mask_type_entry_t *mte = vec_elt_at_index(am->ace_mask_type_pool, mask_type_index);
- pmatch = (u64 *)match;
- pmask = (u64 *)&mte->mask;
- pkey = (u64 *)kv.key;
- /*
- * unrolling the below loop results in a noticeable performance increase.
- int i;
- for(i=0; i<6; i++) {
- kv.key[i] = pmatch[i] & pmask[i];
- }
- */
-
- *pkey++ = *pmatch++ & *pmask++;
- *pkey++ = *pmatch++ & *pmask++;
- *pkey++ = *pmatch++ & *pmask++;
- *pkey++ = *pmatch++ & *pmask++;
- *pkey++ = *pmatch++ & *pmask++;
- *pkey++ = *pmatch++ & *pmask++;
-
- kv_key->pkt.mask_type_index_lsb = mask_type_index;
- DBG(" KEY %3d: %016llx %016llx %016llx %016llx %016llx %016llx", mask_type_index,
- kv.key[0], kv.key[1], kv.key[2], kv.key[3], kv.key[4], kv.key[5]);
- int res = BV (clib_bihash_search) (&am->acl_lookup_hash, &kv, &result);
- if (res == 0) {
- DBG("ACL-MATCH! result_val: %016llx", result_val->as_u64);
- if (result_val->applied_entry_index < curr_match_index) {
- if (PREDICT_FALSE(result_val->need_portrange_check)) {
- /*
- * This is going to be slow, since we can have multiple superset
- * entries for narrow-ish portranges, e.g.:
- * 0..42 100..400, 230..60000,
- * so we need to walk linearly and check if they match.
- */
-
- u32 curr_index = result_val->applied_entry_index;
- while ((curr_index != ~0) && !match_portranges(am, match, curr_index)) {
- /* while no match and there are more entries, walk... */
- applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces),curr_index);
- DBG("entry %d did not portmatch, advancing to %d", curr_index, pae->next_applied_entry_index);
- curr_index = pae->next_applied_entry_index;
- }
- if (curr_index < curr_match_index) {
- DBG("The index %d is the new candidate in portrange matches.", curr_index);
- curr_match_index = curr_index;
- } else {
- DBG("Curr portmatch index %d is too big vs. current matched one %d", curr_index, curr_match_index);
- }
- } else {
- /* The usual path is here. Found an entry in front of the current candiate - so it's a new one */
- DBG("This match is the new candidate");
- curr_match_index = result_val->applied_entry_index;
- if (!result_val->shadowed) {
- /* new result is known to not be shadowed, so no point to look up further */
- break;
- }
- }
- }
- }
- }
- DBG("MATCH-RESULT: %d", curr_match_index);
- return curr_match_index;
-}
-
static void
hashtable_add_del(acl_main_t *am, clib_bihash_kv_48_8_t *kv, int is_add)
{
@@ -165,7 +56,7 @@ hashtable_add_del(acl_main_t *am, clib_bihash_kv_48_8_t *kv, int is_add)
static void
fill_applied_hash_ace_kv(acl_main_t *am,
applied_hash_ace_entry_t **applied_hash_aces,
- u32 sw_if_index, u8 is_input,
+ u32 lc_index,
u32 new_index, clib_bihash_kv_48_8_t *kv)
{
fa_5tuple_t *kv_key = (fa_5tuple_t *)kv->key;
@@ -175,8 +66,7 @@ fill_applied_hash_ace_kv(acl_main_t *am,
memcpy(kv_key, &(vec_elt_at_index(ha->rules, pae->hash_ace_info_index)->match), sizeof(*kv_key));
/* initialize the sw_if_index and direction */
- kv_key->pkt.sw_if_index = sw_if_index;
- kv_key->pkt.is_input = is_input;
+ kv_key->pkt.lc_index = lc_index;
kv_val->as_u64 = 0;
kv_val->applied_entry_index = new_index;
kv_val->need_portrange_check = vec_elt_at_index(ha->rules, pae->hash_ace_info_index)->src_portrange_not_powerof2 ||
@@ -187,13 +77,13 @@ fill_applied_hash_ace_kv(acl_main_t *am,
static void
add_del_hashtable_entry(acl_main_t *am,
- u32 sw_if_index, u8 is_input,
+ u32 lc_index,
applied_hash_ace_entry_t **applied_hash_aces,
u32 index, int is_add)
{
clib_bihash_kv_48_8_t kv;
- fill_applied_hash_ace_kv(am, applied_hash_aces, sw_if_index, is_input, index, &kv);
+ fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, index, &kv);
hashtable_add_del(am, &kv, is_add);
}
@@ -201,16 +91,16 @@ add_del_hashtable_entry(acl_main_t *am,
static void
activate_applied_ace_hash_entry(acl_main_t *am,
- u32 sw_if_index, u8 is_input,
+ u32 lc_index,
applied_hash_ace_entry_t **applied_hash_aces,
u32 new_index)
{
clib_bihash_kv_48_8_t kv;
ASSERT(new_index != ~0);
applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
- DBG("activate_applied_ace_hash_entry sw_if_index %d is_input %d new_index %d", sw_if_index, is_input, new_index);
+ DBG("activate_applied_ace_hash_entry lc_index %d new_index %d", lc_index, new_index);
- fill_applied_hash_ace_kv(am, applied_hash_aces, sw_if_index, is_input, new_index, &kv);
+ fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, new_index, &kv);
DBG("APPLY ADD KY: %016llx %016llx %016llx %016llx %016llx %016llx",
kv.key[0], kv.key[1], kv.key[2],
@@ -272,8 +162,9 @@ hash_acl_set_heap(acl_main_t *am)
}
void
-acl_plugin_hash_acl_set_validate_heap(acl_main_t *am, int on)
+acl_plugin_hash_acl_set_validate_heap(int on)
{
+ acl_main_t *am = &acl_main;
clib_mem_set_heap(hash_acl_set_heap(am));
mheap_t *h = mheap_header (am->hash_lookup_mheap);
if (on) {
@@ -287,8 +178,9 @@ acl_plugin_hash_acl_set_validate_heap(acl_main_t *am, int on)
}
void
-acl_plugin_hash_acl_set_trace_heap(acl_main_t *am, int on)
+acl_plugin_hash_acl_set_trace_heap(int on)
{
+ acl_main_t *am = &acl_main;
clib_mem_set_heap(hash_acl_set_heap(am));
mheap_t *h = mheap_header (am->hash_lookup_mheap);
if (on) {
@@ -299,11 +191,11 @@ acl_plugin_hash_acl_set_trace_heap(acl_main_t *am, int on)
}
void
-hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index)
+hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position)
{
int i;
- DBG0("HASH ACL apply: sw_if_index %d is_input %d acl %d", sw_if_index, is_input, acl_index);
+ DBG0("HASH ACL apply: lc_index %d acl %d", lc_index, acl_index);
if (!am->acl_lookup_hash_initialized) {
BV (clib_bihash_init) (&am->acl_lookup_hash, "ACL plugin rule lookup bihash",
am->hash_lookup_hash_buckets, am->hash_lookup_hash_memory);
@@ -311,42 +203,36 @@ hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index)
}
void *oldheap = hash_acl_set_heap(am);
- if (is_input) {
- vec_validate(am->input_hash_entry_vec_by_sw_if_index, sw_if_index);
- } else {
- vec_validate(am->output_hash_entry_vec_by_sw_if_index, sw_if_index);
- }
+ vec_validate(am->hash_entry_vec_by_lc_index, lc_index);
vec_validate(am->hash_acl_infos, acl_index);
- applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, is_input, sw_if_index);
+ applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
- u32 **hash_acl_applied_sw_if_index = is_input ? &ha->inbound_sw_if_index_list
- : &ha->outbound_sw_if_index_list;
+ u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
int base_offset = vec_len(*applied_hash_aces);
/* Update the bitmap of the mask types with which the lookup
- needs to happen for the ACLs applied to this sw_if_index */
- applied_hash_acl_info_t **applied_hash_acls = is_input ? &am->input_applied_hash_acl_info_by_sw_if_index :
- &am->output_applied_hash_acl_info_by_sw_if_index;
- vec_validate((*applied_hash_acls), sw_if_index);
- applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), sw_if_index);
+ needs to happen for the ACLs applied to this lc_index */
+ applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
+ vec_validate((*applied_hash_acls), lc_index);
+ applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
/* ensure the list of applied hash acls is initialized and add this acl# to it */
u32 index = vec_search(pal->applied_acls, acl_index);
if (index != ~0) {
- clib_warning("BUG: trying to apply twice acl_index %d on sw_if_index %d is_input %d",
- acl_index, sw_if_index, is_input);
+ clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to lc",
+ acl_index, lc_index);
goto done;
}
vec_add1(pal->applied_acls, acl_index);
- u32 index2 = vec_search((*hash_acl_applied_sw_if_index), sw_if_index);
+ u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
if (index2 != ~0) {
- clib_warning("BUG: trying to apply twice acl_index %d on (sw_if_index %d) is_input %d",
- acl_index, sw_if_index, is_input);
+ clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to hash h-acl info",
+ acl_index, lc_index);
goto done;
}
- vec_add1((*hash_acl_applied_sw_if_index), sw_if_index);
+ vec_add1((*hash_acl_applied_lc_index), lc_index);
pal->mask_type_index_bitmap = clib_bitmap_or(pal->mask_type_index_bitmap,
ha->mask_type_index_bitmap);
@@ -369,6 +255,7 @@ hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index)
applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
pae->acl_index = acl_index;
pae->ace_index = ha->rules[i].ace_index;
+ pae->acl_position = acl_position;
pae->action = ha->rules[i].action;
pae->hitcount = 0;
pae->hash_ace_info_index = i;
@@ -376,7 +263,7 @@ hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index)
pae->next_applied_entry_index = ~0;
pae->prev_applied_entry_index = ~0;
pae->tail_applied_entry_index = ~0;
- activate_applied_ace_hash_entry(am, sw_if_index, is_input, applied_hash_aces, new_index);
+ activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, new_index);
}
applied_hash_entries_analyze(am, applied_hash_aces);
done:
@@ -403,7 +290,7 @@ find_head_applied_ace_index(applied_hash_ace_entry_t **applied_hash_aces, u32 cu
static void
move_applied_ace_hash_entry(acl_main_t *am,
- u32 sw_if_index, u8 is_input,
+ u32 lc_index,
applied_hash_ace_entry_t **applied_hash_aces,
u32 old_index, u32 new_index)
{
@@ -421,7 +308,7 @@ move_applied_ace_hash_entry(acl_main_t *am,
prev_pae->next_applied_entry_index = new_index;
} else {
/* first entry - so the hash points to it, update */
- add_del_hashtable_entry(am, sw_if_index, is_input,
+ add_del_hashtable_entry(am, lc_index,
applied_hash_aces, new_index, 1);
ASSERT(pae->tail_applied_entry_index != ~0);
}
@@ -448,12 +335,12 @@ move_applied_ace_hash_entry(acl_main_t *am,
static void
deactivate_applied_ace_hash_entry(acl_main_t *am,
- u32 sw_if_index, u8 is_input,
+ u32 lc_index,
applied_hash_ace_entry_t **applied_hash_aces,
u32 old_index)
{
applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
- DBG("UNAPPLY DEACTIVATE: sw_if_index %d is_input %d, applied index %d", sw_if_index, is_input, old_index);
+ DBG("UNAPPLY DEACTIVATE: lc_index %d applied index %d", lc_index, old_index);
if (pae->prev_applied_entry_index != ~0) {
DBG("UNAPPLY = index %d has prev_applied_entry_index %d", old_index, pae->prev_applied_entry_index);
@@ -483,11 +370,11 @@ deactivate_applied_ace_hash_entry(acl_main_t *am,
DBG("Resetting the hash table entry from %d to %d, setting tail index to %d", old_index, pae->next_applied_entry_index, pae->tail_applied_entry_index);
/* unlink from the next element */
next_pae->prev_applied_entry_index = ~0;
- add_del_hashtable_entry(am, sw_if_index, is_input,
+ add_del_hashtable_entry(am, lc_index,
applied_hash_aces, pae->next_applied_entry_index, 1);
} else {
/* no next entry, so just delete the entry in the hash table */
- add_del_hashtable_entry(am, sw_if_index, is_input,
+ add_del_hashtable_entry(am, lc_index,
applied_hash_aces, old_index, 0);
}
}
@@ -499,13 +386,15 @@ deactivate_applied_ace_hash_entry(acl_main_t *am,
static void
-hash_acl_build_applied_lookup_bitmap(acl_main_t *am, u32 sw_if_index, u8 is_input)
+hash_acl_build_applied_lookup_bitmap(acl_main_t *am, u32 lc_index)
{
int i;
uword *new_lookup_bitmap = 0;
- applied_hash_acl_info_t **applied_hash_acls = is_input ? &am->input_applied_hash_acl_info_by_sw_if_index
- : &am->output_applied_hash_acl_info_by_sw_if_index;
- applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), sw_if_index);
+
+ applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
+ vec_validate((*applied_hash_acls), lc_index);
+ applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
+
for(i=0; i < vec_len(pal->applied_acls); i++) {
u32 a_acl_index = *vec_elt_at_index((pal->applied_acls), i);
hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, a_acl_index);
@@ -520,37 +409,35 @@ hash_acl_build_applied_lookup_bitmap(acl_main_t *am, u32 sw_if_index, u8 is_inpu
}
void
-hash_acl_unapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index)
+hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
{
int i;
- DBG0("HASH ACL unapply: sw_if_index %d is_input %d acl %d", sw_if_index, is_input, acl_index);
- applied_hash_acl_info_t **applied_hash_acls = is_input ? &am->input_applied_hash_acl_info_by_sw_if_index
- : &am->output_applied_hash_acl_info_by_sw_if_index;
- applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), sw_if_index);
+ DBG0("HASH ACL unapply: lc_index %d acl %d", lc_index, acl_index);
+ applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
+ applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
- u32 **hash_acl_applied_sw_if_index = is_input ? &ha->inbound_sw_if_index_list
- : &ha->outbound_sw_if_index_list;
+ u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
/* remove this acl# from the list of applied hash acls */
u32 index = vec_search(pal->applied_acls, acl_index);
if (index == ~0) {
- clib_warning("BUG: trying to unapply unapplied acl_index %d on sw_if_index %d is_input %d",
- acl_index, sw_if_index, is_input);
+ clib_warning("BUG: trying to unapply unapplied acl_index %d on lc_index %d, according to lc",
+ acl_index, lc_index);
return;
}
vec_del1(pal->applied_acls, index);
- u32 index2 = vec_search((*hash_acl_applied_sw_if_index), sw_if_index);
+ u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
if (index2 == ~0) {
- clib_warning("BUG: trying to unapply twice acl_index %d on (sw_if_index %d) is_input %d",
- acl_index, sw_if_index, is_input);
+ clib_warning("BUG: trying to unapply twice acl_index %d on lc_index %d, according to h-acl info",
+ acl_index, lc_index);
return;
}
- vec_del1((*hash_acl_applied_sw_if_index), index2);
+ vec_del1((*hash_acl_applied_lc_index), index2);
- applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, is_input, sw_if_index);
+ applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
for(i=0; i < vec_len((*applied_hash_aces)); i++) {
if (vec_elt_at_index(*applied_hash_aces,i)->acl_index == acl_index) {
@@ -559,7 +446,7 @@ hash_acl_unapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index)
}
}
if (vec_len((*applied_hash_aces)) <= i) {
- DBG("Did not find applied ACL#%d at sw_if_index %d", acl_index, sw_if_index);
+ DBG("Did not find applied ACL#%d at lc_index %d", acl_index, lc_index);
/* we went all the way without finding any entries. Probably a list was empty. */
return;
}
@@ -571,14 +458,14 @@ hash_acl_unapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index)
DBG("base_offset: %d, tail_offset: %d, tail_len: %d", base_offset, tail_offset, tail_len);
for(i=0; i < vec_len(ha->rules); i ++) {
- deactivate_applied_ace_hash_entry(am, sw_if_index, is_input,
+ deactivate_applied_ace_hash_entry(am, lc_index,
applied_hash_aces, base_offset + i);
}
for(i=0; i < tail_len; i ++) {
/* move the entry at tail offset to base offset */
/* that is, from (tail_offset+i) -> (base_offset+i) */
- DBG("UNAPPLY MOVE: sw_if_index %d is_input %d, applied index %d ->", sw_if_index, is_input, tail_offset+i, base_offset + i);
- move_applied_ace_hash_entry(am, sw_if_index, is_input, applied_hash_aces, tail_offset + i, base_offset + i);
+ DBG("UNAPPLY MOVE: lc_index %d, applied index %d -> %d", lc_index, tail_offset+i, base_offset + i);
+ move_applied_ace_hash_entry(am, lc_index, applied_hash_aces, tail_offset + i, base_offset + i);
}
/* trim the end of the vector */
_vec_len((*applied_hash_aces)) -= vec_len(ha->rules);
@@ -586,7 +473,7 @@ hash_acl_unapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index)
applied_hash_entries_analyze(am, applied_hash_aces);
/* After deletion we might not need some of the mask-types anymore... */
- hash_acl_build_applied_lookup_bitmap(am, sw_if_index, is_input);
+ hash_acl_build_applied_lookup_bitmap(am, lc_index);
clib_mem_set_heap (oldheap);
}
@@ -600,24 +487,26 @@ hash_acl_unapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index)
*/
void
-hash_acl_reapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index)
+hash_acl_reapply(acl_main_t *am, u32 lc_index, int acl_index)
{
- u32 **applied_acls = is_input ? vec_elt_at_index(am->input_acl_vec_by_sw_if_index, sw_if_index)
- : vec_elt_at_index(am->output_acl_vec_by_sw_if_index, sw_if_index);
+ acl_lookup_context_t *acontext = pool_elt_at_index(am->acl_lookup_contexts, lc_index);
+ u32 **applied_acls = &acontext->acl_indices;
int i;
int start_index = vec_search((*applied_acls), acl_index);
+
+ DBG0("Start index for acl %d in lc_index %d is %d", acl_index, lc_index, start_index);
/*
* This function is called after we find out the sw_if_index where ACL is applied.
* If the by-sw_if_index vector does not have the ACL#, then it's a bug.
*/
ASSERT(start_index < vec_len(*applied_acls));
- /* unapply all the ACLs till the current one */
+ /* unapply all the ACLs at the tail side, up to the current one */
for(i = vec_len(*applied_acls) - 1; i > start_index; i--) {
- hash_acl_unapply(am, sw_if_index, is_input, *vec_elt_at_index(*applied_acls, i));
+ hash_acl_unapply(am, lc_index, *vec_elt_at_index(*applied_acls, i));
}
for(i = start_index; i < vec_len(*applied_acls); i++) {
- hash_acl_apply(am, sw_if_index, is_input, *vec_elt_at_index(*applied_acls, i));
+ hash_acl_apply(am, lc_index, *vec_elt_at_index(*applied_acls, i), i);
}
}
@@ -667,9 +556,8 @@ make_mask_and_match_from_rule(fa_5tuple_t *mask, acl_rule_t *r, hash_ace_info_t
memset(&hi->match, 0, sizeof(hi->match));
hi->action = r->is_permit;
- /* we will need to be matching based on sw_if_index, direction, and mask_type_index when applied */
- mask->pkt.sw_if_index = ~0;
- mask->pkt.is_input = 1;
+ /* we will need to be matching based on lc_index and mask_type_index when applied */
+ mask->pkt.lc_index = ~0;
/* we will assign the match of mask_type_index later when we find it*/
mask->pkt.mask_type_index_lsb = ~0;
@@ -766,6 +654,15 @@ release_mask_type_index(acl_main_t *am, u32 mask_type_index)
}
}
+int hash_acl_exists(acl_main_t *am, int acl_index)
+{
+ if (acl_index >= vec_len(am->hash_acl_infos))
+ return 0;
+
+ hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
+ return ha->hash_acl_exists;
+}
+
void hash_acl_add(acl_main_t *am, int acl_index)
{
void *oldheap = hash_acl_set_heap(am);
@@ -775,6 +672,7 @@ void hash_acl_add(acl_main_t *am, int acl_index)
vec_validate(am->hash_acl_infos, acl_index);
hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
memset(ha, 0, sizeof(*ha));
+ ha->hash_acl_exists = 1;
/* walk the newly added ACL entries and ensure that for each of them there
is a mask type, increment a reference count for that mask type */
@@ -808,16 +706,10 @@ void hash_acl_add(acl_main_t *am, int acl_index)
* if an ACL is applied somewhere, fill the corresponding lookup data structures.
* We need to take care if the ACL is not the last one in the vector of ACLs applied to the interface.
*/
- if (acl_index < vec_len(am->input_sw_if_index_vec_by_acl)) {
- u32 *sw_if_index;
- vec_foreach(sw_if_index, am->input_sw_if_index_vec_by_acl[acl_index]) {
- hash_acl_reapply(am, *sw_if_index, 1, acl_index);
- }
- }
- if (acl_index < vec_len(am->output_sw_if_index_vec_by_acl)) {
- u32 *sw_if_index;
- vec_foreach(sw_if_index, am->output_sw_if_index_vec_by_acl[acl_index]) {
- hash_acl_reapply(am, *sw_if_index, 0, acl_index);
+ if (acl_index < vec_len(am->lc_index_vec_by_acl)) {
+ u32 *lc_index;
+ vec_foreach(lc_index, am->lc_index_vec_by_acl[acl_index]) {
+ hash_acl_reapply(am, *lc_index, acl_index);
}
}
clib_mem_set_heap (oldheap);
@@ -841,18 +733,14 @@ void hash_acl_delete(acl_main_t *am, int acl_index)
* has to be handled.
*/
hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
- u32 *interface_list_copy = 0;
+ u32 *lc_list_copy = 0;
{
- u32 *sw_if_index;
- interface_list_copy = vec_dup(ha->inbound_sw_if_index_list);
- vec_foreach(sw_if_index, interface_list_copy) {
- hash_acl_unapply(am, *sw_if_index, 1, acl_index);
- }
- vec_free(interface_list_copy);
- interface_list_copy = vec_dup(ha->outbound_sw_if_index_list);
- vec_foreach(sw_if_index, interface_list_copy) {
- hash_acl_unapply(am, *sw_if_index, 0, acl_index);
+ u32 *lc_index;
+ lc_list_copy = vec_dup(ha->lc_index_list);
+ vec_foreach(lc_index, lc_list_copy) {
+ hash_acl_unapply(am, *lc_index, acl_index);
}
+ vec_free(lc_list_copy);
}
/* walk the mask types for the ACL about-to-be-deleted, and decrease
@@ -862,32 +750,153 @@ void hash_acl_delete(acl_main_t *am, int acl_index)
release_mask_type_index(am, ha->rules[i].mask_type_index);
}
clib_bitmap_free(ha->mask_type_index_bitmap);
+ ha->hash_acl_exists = 0;
vec_free(ha->rules);
clib_mem_set_heap (oldheap);
}
-u8
-hash_multi_acl_match_5tuple (u32 sw_if_index, fa_5tuple_t * pkt_5tuple, int is_l2,
- int is_ip6, int is_input, u32 * acl_match_p,
- u32 * rule_match_p, u32 * trace_bitmap)
+
+void
+show_hash_acl_hash (vlib_main_t * vm, acl_main_t *am, u32 verbose)
+{
+ vlib_cli_output(vm, "\nACL lookup hash table:\n%U\n",
+ BV (format_bihash), &am->acl_lookup_hash, verbose);
+}
+
+void
+acl_plugin_show_tables_mask_type (void)
{
acl_main_t *am = &acl_main;
- applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, is_input, sw_if_index);
- u32 match_index = multi_acl_match_get_applied_ace_index(am, pkt_5tuple);
- if (match_index < vec_len((*applied_hash_aces))) {
- applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), match_index);
- pae->hitcount++;
- *acl_match_p = pae->acl_index;
- *rule_match_p = pae->ace_index;
- return pae->action;
- }
- return 0;
+ vlib_main_t *vm = am->vlib_main;
+ ace_mask_type_entry_t *mte;
+
+ vlib_cli_output (vm, "Mask-type entries:");
+ /* *INDENT-OFF* */
+ pool_foreach(mte, am->ace_mask_type_pool,
+ ({
+ vlib_cli_output(vm, " %3d: %016llx %016llx %016llx %016llx %016llx %016llx refcount %d",
+ mte - am->ace_mask_type_pool,
+ mte->mask.kv.key[0], mte->mask.kv.key[1], mte->mask.kv.key[2],
+ mte->mask.kv.key[3], mte->mask.kv.key[4], mte->mask.kv.value, mte->refcount);
+ }));
+ /* *INDENT-ON* */
}
+void
+acl_plugin_show_tables_acl_hash_info (u32 acl_index)
+{
+ acl_main_t *am = &acl_main;
+ vlib_main_t *vm = am->vlib_main;
+ u32 i, j;
+ u64 *m;
+ vlib_cli_output (vm, "Mask-ready ACL representations\n");
+ for (i = 0; i < vec_len (am->hash_acl_infos); i++)
+ {
+ if ((acl_index != ~0) && (acl_index != i))
+ {
+ continue;
+ }
+ hash_acl_info_t *ha = &am->hash_acl_infos[i];
+ vlib_cli_output (vm, "acl-index %u bitmask-ready layout\n", i);
+ vlib_cli_output (vm, " applied lc_index list: %U\n",
+ format_vec32, ha->lc_index_list, "%d");
+ vlib_cli_output (vm, " mask type index bitmap: %U\n",
+ format_bitmap_hex, ha->mask_type_index_bitmap);
+ for (j = 0; j < vec_len (ha->rules); j++)
+ {
+ hash_ace_info_t *pa = &ha->rules[j];
+ m = (u64 *) & pa->match;
+ vlib_cli_output (vm,
+ " %4d: %016llx %016llx %016llx %016llx %016llx %016llx mask index %d acl %d rule %d action %d src/dst portrange not ^2: %d,%d\n",
+ j, m[0], m[1], m[2], m[3], m[4], m[5],
+ pa->mask_type_index, pa->acl_index, pa->ace_index,
+ pa->action, pa->src_portrange_not_powerof2,
+ pa->dst_portrange_not_powerof2);
+ }
+ }
+}
void
-show_hash_acl_hash (vlib_main_t * vm, acl_main_t *am, u32 verbose)
+acl_plugin_print_pae (vlib_main_t * vm, int j, applied_hash_ace_entry_t * pae)
{
- vlib_cli_output(vm, "\nACL lookup hash table:\n%U\n",
- BV (format_bihash), &am->acl_lookup_hash, verbose);
+ vlib_cli_output (vm,
+ " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d tail %d hitcount %lld",
+ j, pae->acl_index, pae->ace_index, pae->action,
+ pae->hash_ace_info_index, pae->next_applied_entry_index,
+ pae->prev_applied_entry_index,
+ pae->tail_applied_entry_index, pae->hitcount);
}
+
+void
+acl_plugin_show_tables_applied_info (u32 sw_if_index)
+{
+ acl_main_t *am = &acl_main;
+ vlib_main_t *vm = am->vlib_main;
+ u32 swi; //, j;
+ vlib_cli_output (vm, "Applied lookup entries for interfaces");
+
+ for (swi = 0;
+ (swi < vec_len (am->input_lc_index_by_sw_if_index))
+ || (swi < vec_len (am->output_lc_index_by_sw_if_index)); swi++)
+ {
+ if ((sw_if_index != ~0) && (sw_if_index != swi))
+ {
+ continue;
+ }
+/*
+ vlib_cli_output (vm, "sw_if_index %d:", swi);
+ if (swi < vec_len (am->input_applied_hash_acl_info_by_sw_if_index))
+ {
+ applied_hash_acl_info_t *pal =
+ &am->input_applied_hash_acl_info_by_sw_if_index[swi];
+ vlib_cli_output (vm, " input lookup mask_type_index_bitmap: %U",
+ format_bitmap_hex, pal->mask_type_index_bitmap);
+ vlib_cli_output (vm, " input applied acls: %U", format_vec32,
+ pal->applied_acls, "%d");
+ }
+ if (swi < vec_len (am->input_hash_entry_vec_by_sw_if_index))
+ {
+ vlib_cli_output (vm, " input lookup applied entries:");
+ for (j = 0;
+ j < vec_len (am->input_hash_entry_vec_by_sw_if_index[swi]);
+ j++)
+ {
+ acl_plugin_print_pae (vm, j,
+ &am->input_hash_entry_vec_by_sw_if_index
+ [swi][j]);
+ }
+ }
+
+ if (swi < vec_len (am->output_applied_hash_acl_info_by_sw_if_index))
+ {
+ applied_hash_acl_info_t *pal =
+ &am->output_applied_hash_acl_info_by_sw_if_index[swi];
+ vlib_cli_output (vm, " output lookup mask_type_index_bitmap: %U",
+ format_bitmap_hex, pal->mask_type_index_bitmap);
+ vlib_cli_output (vm, " output applied acls: %U", format_vec32,
+ pal->applied_acls, "%d");
+ }
+ if (swi < vec_len (am->output_hash_entry_vec_by_sw_if_index))
+ {
+ vlib_cli_output (vm, " output lookup applied entries:");
+ for (j = 0;
+ j < vec_len (am->output_hash_entry_vec_by_sw_if_index[swi]);
+ j++)
+ {
+ acl_plugin_print_pae (vm, j,
+ &am->output_hash_entry_vec_by_sw_if_index
+ [swi][j]);
+ }
+ }
+*/
+ }
+}
+
+void
+acl_plugin_show_tables_bihash (u32 show_bihash_verbose)
+{
+ acl_main_t *am = &acl_main;
+ vlib_main_t *vm = am->vlib_main;
+ show_hash_acl_hash (vm, am, show_bihash_verbose);
+}
+
diff --git a/src/plugins/acl/hash_lookup.h b/src/plugins/acl/hash_lookup.h
index 2d7058e80ee..e401d3cbc8b 100644
--- a/src/plugins/acl/hash_lookup.h
+++ b/src/plugins/acl/hash_lookup.h
@@ -19,6 +19,7 @@
#define _ACL_HASH_LOOKUP_H_
#include <stddef.h>
+#include "lookup_context.h"
#include "acl.h"
/*
@@ -26,11 +27,11 @@
* during the packet processing
*/
-void hash_acl_apply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index);
+void hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position);
-/* Remove the ACL from the packet processing lookups on a given interface */
+/* Remove the ACL from the packet processing in a given lookup context */
-void hash_acl_unapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_index);
+void hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index);
/*
* Add an ACL or delete an ACL. ACL may already have been referenced elsewhere,
@@ -40,25 +41,7 @@ void hash_acl_unapply(acl_main_t *am, u32 sw_if_index, u8 is_input, int acl_inde
void hash_acl_add(acl_main_t *am, int acl_index);
void hash_acl_delete(acl_main_t *am, int acl_index);
-/*
- * Do the work required to match a given 5-tuple from the packet,
- * and return the action as well as populate the values pointed
- * to by the *_match_p pointers and maybe trace_bitmap.
- */
-
-u8
-hash_multi_acl_match_5tuple (u32 sw_if_index, fa_5tuple_t * pkt_5tuple, int is_l2,
- int is_ip6, int is_input, u32 * acl_match_p,
- u32 * rule_match_p, u32 * trace_bitmap);
-
-
-/*
- * The debug function to show the contents of the ACL lookup hash
- */
-void show_hash_acl_hash(vlib_main_t * vm, acl_main_t *am, u32 verbose);
-
-/* Debug functions to turn validate/trace on and off */
-void acl_plugin_hash_acl_set_validate_heap(acl_main_t *am, int on);
-void acl_plugin_hash_acl_set_trace_heap(acl_main_t *am, int on);
+/* return if there is already a filled-in hash acl info */
+int hash_acl_exists(acl_main_t *am, int acl_index);
#endif
diff --git a/src/plugins/acl/hash_lookup_types.h b/src/plugins/acl/hash_lookup_types.h
index 1fa197ec978..1a20ebff8f8 100644
--- a/src/plugins/acl/hash_lookup_types.h
+++ b/src/plugins/acl/hash_lookup_types.h
@@ -38,10 +38,11 @@ typedef struct {
typedef struct {
/* The mask types present in this ACL */
uword *mask_type_index_bitmap;
- /* hash ACL applied on these interfaces */
- u32 *inbound_sw_if_index_list;
- u32 *outbound_sw_if_index_list;
+ /* hash ACL applied on these lookup contexts */
+ u32 *lc_index_list;
hash_ace_info_t *rules;
+ /* a boolean flag set when the hash acl info is initialized */
+ int hash_acl_exists;
} hash_acl_info_t;
typedef struct {
@@ -69,6 +70,10 @@ typedef struct {
*/
u64 hitcount;
/*
+ * acl position in vector of ACLs within lookup context
+ */
+ u32 acl_position;
+ /*
* Action of this applied ACE
*/
u8 action;
diff --git a/src/plugins/acl/lookup_context.c b/src/plugins/acl/lookup_context.c
new file mode 100644
index 00000000000..a4c9647776a
--- /dev/null
+++ b/src/plugins/acl/lookup_context.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/acl/acl.h>
+#include <plugins/acl/fa_node.h>
+#include <plugins/acl/public_inlines.h>
+#include <vlib/unix/plugin.h>
+#include "hash_lookup.h"
+#include "elog_acl_trace.h"
+
+/* check if a given ACL exists */
+u8 acl_plugin_acl_exists (u32 acl_index);
+
+static u32 get_acl_user_id(acl_main_t *am, char *user_module_name, char *val1_label, char *val2_label)
+{
+ acl_lookup_context_user_t *auser;
+
+ pool_foreach (auser, am->acl_users,
+ ({
+ if (0 == strcmp(auser->user_module_name, user_module_name)) {
+ return (auser - am->acl_users);
+ }
+ }));
+
+ pool_get(am->acl_users, auser);
+ auser->user_module_name = user_module_name;
+ auser->val1_label = val1_label;
+ auser->val2_label = val2_label;
+ return (auser - am->acl_users);
+}
+
+static int acl_user_id_valid(acl_main_t *am, u32 acl_user_id)
+{
+
+ if (pool_is_free_index (am->acl_users, acl_user_id))
+ return 0;
+
+ return 1;
+}
+
+static int acl_lc_index_valid(acl_main_t *am, u32 lc_index)
+{
+
+ if (pool_is_free_index (am->acl_lookup_contexts, lc_index))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * If you are using ACL plugin, get this unique ID first,
+ * so you can identify yourself when creating the lookup contexts.
+ */
+
+u32 acl_plugin_register_user_module (char *user_module_name, char *val1_label, char *val2_label)
+{
+ acl_main_t *am = &acl_main;
+ u32 user_id = get_acl_user_id(am, user_module_name, val1_label, val2_label);
+ return user_id;
+}
+
+/*
+ * Allocate a new lookup context index.
+ * Supply the id assigned to your module during registration,
+ * and two values of your choice identifying instances
+ * of use within your module. They are useful for debugging.
+ * If >= 0 - context id. If < 0 - error code.
+ */
+
+int acl_plugin_get_lookup_context_index (u32 acl_user_id, u32 val1, u32 val2)
+{
+ acl_main_t *am = &acl_main;
+ acl_lookup_context_t *acontext;
+
+ if (!acl_user_id_valid(am, acl_user_id))
+ return VNET_API_ERROR_INVALID_REGISTRATION;
+
+ pool_get(am->acl_lookup_contexts, acontext);
+ acontext->acl_indices = 0;
+ acontext->context_user_id = acl_user_id;
+ acontext->user_val1 = val1;
+ acontext->user_val2 = val2;
+
+ u32 new_context_id = acontext - am->acl_lookup_contexts;
+ vec_add1(am->acl_users[acl_user_id].lookup_contexts, new_context_id);
+ return new_context_id;
+}
+
+static void
+lock_acl(acl_main_t *am, u32 acl, u32 lc_index)
+{
+ vec_validate(am->lc_index_vec_by_acl, acl);
+ elog_acl_cond_trace_X2(am, (am->trace_acl), "lock acl %d in lc_index %d", "i4i4", acl, lc_index);
+ vec_add1(am->lc_index_vec_by_acl[acl], lc_index);
+}
+
+static void
+lock_acl_vec(u32 lc_index, u32 *acls)
+{
+ int i;
+ acl_main_t *am = &acl_main;
+ for(i=0; i<vec_len(acls); i++) {
+ lock_acl(am, acls[i], lc_index);
+ }
+}
+
+static void
+unlock_acl(acl_main_t *am, u32 acl, u32 lc_index)
+{
+ vec_validate(am->lc_index_vec_by_acl, acl);
+ elog_acl_cond_trace_X2(am, (am->trace_acl), "unlock acl %d in lc_index %d", "i4i4", acl, lc_index);
+ u32 index = vec_search(am->lc_index_vec_by_acl[acl], lc_index);
+ if (index != ~0)
+ vec_del1(am->lc_index_vec_by_acl[acl], index);
+ else
+ clib_warning("BUG: can not unlock acl %d lc_index %d", acl, lc_index);
+}
+
+static void
+unlock_acl_vec(u32 lc_index, u32 *acls)
+{
+ int i;
+ acl_main_t *am = &acl_main;
+ for(i=0; i<vec_len(acls); i++)
+ unlock_acl(am, acls[i], lc_index);
+}
+
+
+static void
+apply_acl_vec(u32 lc_index, u32 *acls)
+{
+ int i;
+ acl_main_t *am = &acl_main;
+
+ for(i=0; i<vec_len(acls); i++)
+ hash_acl_apply(am, lc_index, acls[i], i);
+}
+
+
+static void
+unapply_acl_vec(u32 lc_index, u32 *acls)
+{
+ int i;
+ acl_main_t *am = &acl_main;
+ if (vec_len(acls) == 0)
+ return;
+ for(i=vec_len(acls); i > 0; i--)
+ hash_acl_unapply(am, lc_index, acls[i-1]);
+}
+
+/*
+ * Release the lookup context index and destroy
+ * any asssociated data structures.
+ */
+void acl_plugin_put_lookup_context_index (u32 lc_index)
+{
+ acl_main_t *am = &acl_main;
+ elog_acl_cond_trace_X1(am, (am->trace_acl), "LOOKUP-CONTEXT: put-context lc_index %d", "i4", lc_index);
+ if (!acl_lc_index_valid(am, lc_index)) {
+ clib_warning("BUG: lc_index %d is not valid", lc_index);
+ return;
+ }
+ acl_lookup_context_t *acontext = pool_elt_at_index(am->acl_lookup_contexts, lc_index);
+
+ u32 index = vec_search(am->acl_users[acontext->context_user_id].lookup_contexts, lc_index);
+ ASSERT(index != ~0);
+
+ vec_del1(am->acl_users[acontext->context_user_id].lookup_contexts, index);
+ unapply_acl_vec(lc_index, acontext->acl_indices);
+ unlock_acl_vec(lc_index, acontext->acl_indices);
+ vec_free(acontext->acl_indices);
+ pool_put(am->acl_lookup_contexts, acontext);
+}
+
+/*
+ * Prepare the sequential vector of ACL#s to lookup within a given context.
+ * Any existing list will be overwritten. acl_list is a vector.
+ */
+int acl_plugin_set_acl_vec_for_context (u32 lc_index, u32 *acl_list)
+{
+ acl_main_t *am = &acl_main;
+ acl_lookup_context_t *acontext;
+ if (am->trace_acl) {
+ u32 i;
+ elog_acl_cond_trace_X1(am, (1), "LOOKUP-CONTEXT: set-acl-list lc_index %d", "i4", lc_index);
+ for(i=0; i<vec_len(acl_list); i++) {
+ elog_acl_cond_trace_X2(am, (1), " acl-list[%d]: %d", "i4i4", i, acl_list[i]);
+ }
+ }
+ if (!acl_lc_index_valid(am, lc_index)) {
+ clib_warning("BUG: lc_index %d is not valid", lc_index);
+ return -1;
+ }
+ acontext = pool_elt_at_index(am->acl_lookup_contexts, lc_index);
+ u32 *old_acl_vector = acontext->acl_indices;
+ acontext->acl_indices = vec_dup(acl_list);
+
+ unapply_acl_vec(lc_index, old_acl_vector);
+ unlock_acl_vec(lc_index, old_acl_vector);
+ lock_acl_vec(lc_index, acontext->acl_indices);
+ apply_acl_vec(lc_index, acontext->acl_indices);
+
+ vec_free(old_acl_vector);
+ return 0;
+}
+
+
+void acl_plugin_lookup_context_notify_acl_change(u32 acl_num)
+{
+ acl_main_t *am = &acl_main;
+ if (acl_plugin_acl_exists(acl_num)) {
+ if (hash_acl_exists(am, acl_num)) {
+ /* this is a modification, clean up the older entries */
+ hash_acl_delete(am, acl_num);
+ }
+ hash_acl_add(am, acl_num);
+ } else {
+ /* this is a deletion notification */
+ hash_acl_delete(am, acl_num);
+ }
+}
+
+
+/* Fill the 5-tuple from the packet */
+
+void acl_plugin_fill_5tuple (u32 lc_index, vlib_buffer_t * b0, int is_ip6, int is_input,
+ int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt)
+{
+ acl_plugin_fill_5tuple_inline(lc_index, b0, is_ip6, is_input, is_l2_path, p5tuple_pkt);
+}
+
+int acl_plugin_match_5tuple (u32 lc_index,
+ fa_5tuple_opaque_t * pkt_5tuple,
+ int is_ip6, u8 * r_action,
+ u32 * r_acl_pos_p,
+ u32 * r_acl_match_p,
+ u32 * r_rule_match_p,
+ u32 * trace_bitmap)
+{
+ return acl_plugin_match_5tuple_inline (lc_index, pkt_5tuple, is_ip6, r_action, r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
+}
+
+
+void
+acl_plugin_show_lookup_user (u32 user_index)
+{
+ acl_main_t *am = &acl_main;
+ vlib_main_t *vm = am->vlib_main;
+ acl_lookup_context_user_t *auser;
+
+ pool_foreach (auser, am->acl_users,
+ ({
+ u32 curr_user_index = (auser - am->acl_users);
+ if (user_index == ~0 || (curr_user_index == user_index)) {
+ vlib_cli_output (vm, "index %d:%s:%s:%s", curr_user_index, auser->user_module_name, auser->val1_label, auser->val2_label);
+ }
+ }));
+}
+
+
+void
+acl_plugin_show_lookup_context (u32 lc_index)
+{
+ acl_main_t *am = &acl_main;
+ vlib_main_t *vm = am->vlib_main;
+ acl_lookup_context_t *acontext;
+ // clib_warning("LOOKUP-CONTEXT: lc_index %d acl_list [ %U ]", lc_index, format_vec32, acl_list, "%d");
+ if (!am->acl_lookup_contexts)
+ {
+ vlib_cli_output(vm, "ACL lookup contexts are not initialized");
+ return;
+ }
+
+ pool_foreach (acontext, am->acl_lookup_contexts,
+ ({
+ u32 curr_lc_index = (acontext - am->acl_lookup_contexts);
+ if ((lc_index == ~0) || (curr_lc_index == lc_index)) {
+ if (acl_user_id_valid(am, acontext->context_user_id)) {
+ acl_lookup_context_user_t *auser = pool_elt_at_index(am->acl_users, acontext->context_user_id);
+ vlib_cli_output (vm, "index %d:%s %s: %d %s: %d, acl_indices: %U",
+ curr_lc_index, auser->user_module_name, auser->val1_label,
+ acontext->user_val1, auser->val2_label, acontext->user_val2,
+ format_vec32, acontext->acl_indices, "%d");
+ } else {
+ vlib_cli_output (vm, "index %d: user_id: %d user_val1: %d user_val2: %d, acl_indices: %U",
+ curr_lc_index, acontext->context_user_id,
+ acontext->user_val1, acontext->user_val2,
+ format_vec32, acontext->acl_indices, "%d");
+ }
+ }
+ }));
+}
diff --git a/src/plugins/acl/lookup_context.h b/src/plugins/acl/lookup_context.h
new file mode 100644
index 00000000000..f5888a9c6fb
--- /dev/null
+++ b/src/plugins/acl/lookup_context.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_acl_lookup_context_h
+#define included_acl_lookup_context_h
+
+typedef struct {
+ /* A name of the portion of the code using the ACL infra */
+ char *user_module_name;
+ /* text label for the first u32 user value assigned to context */
+ char *val1_label;
+ /* text label for the second u32 user value assigned to context */
+ char *val2_label;
+ /* vector of lookup contexts of this user */
+ u32 *lookup_contexts;
+} acl_lookup_context_user_t;
+
+typedef struct {
+ /* vector of acl #s within this context */
+ u32 *acl_indices;
+ /* index of corresponding acl_lookup_context_user_t */
+ u32 context_user_id;
+ /* per-instance user value 1 */
+ u32 user_val1;
+ /* per-instance user value 2 */
+ u32 user_val2;
+} acl_lookup_context_t;
+
+void acl_plugin_lookup_context_notify_acl_change(u32 acl_num);
+
+void acl_plugin_show_lookup_context (u32 lc_index);
+void acl_plugin_show_lookup_user (u32 user_index);
+
+
+/* These are in the hash matching for now */
+void acl_plugin_show_tables_mask_type (void);
+void acl_plugin_show_tables_acl_hash_info (u32 acl_index);
+void acl_plugin_show_tables_applied_info (u32 sw_if_index);
+void acl_plugin_show_tables_bihash (u32 show_bihash_verbose);
+
+/* Debug functions to turn validate/trace on and off */
+void acl_plugin_hash_acl_set_validate_heap(int on);
+void acl_plugin_hash_acl_set_trace_heap(int on);
+
+
+
+#endif
+
diff --git a/src/plugins/acl/public_inlines.h b/src/plugins/acl/public_inlines.h
new file mode 100644
index 00000000000..4878d15a95c
--- /dev/null
+++ b/src/plugins/acl/public_inlines.h
@@ -0,0 +1,731 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_acl_inlines_h
+#define included_acl_inlines_h
+
+
+#include <plugins/acl/acl.h>
+#include <plugins/acl/fa_node.h>
+#include <plugins/acl/hash_lookup_private.h>
+
+
+/* check if a given ACL exists */
+
+#ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
+u8 (*acl_plugin_acl_exists) (u32 acl_index);
+#else
+u8 acl_plugin_acl_exists (u32 acl_index);
+#endif
+
+
+/*
+ * If you are using ACL plugin, get this unique ID first,
+ * so you can identify yourself when creating the lookup contexts.
+ */
+
+#ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
+u32 (*acl_plugin_register_user_module) (char *caller_module_string, char *val1_label, char *val2_label);
+#else
+u32 acl_plugin_register_user_module (char *caller_module_string, char *val1_label, char *val2_label);
+#endif
+
+/*
+ * Allocate a new lookup context index.
+ * Supply the id assigned to your module during registration,
+ * and two values of your choice identifying instances
+ * of use within your module. They are useful for debugging.
+ */
+#ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
+int (*acl_plugin_get_lookup_context_index) (u32 acl_user_id, u32 val1, u32 val2);
+#else
+int acl_plugin_get_lookup_context_index (u32 acl_user_id, u32 val1, u32 val2);
+#endif
+
+/*
+ * Release the lookup context index and destroy
+ * any asssociated data structures.
+ */
+#ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
+void (*acl_plugin_put_lookup_context_index) (u32 lc_index);
+#else
+void acl_plugin_put_lookup_context_index (u32 lc_index);
+#endif
+
+/*
+ * Prepare the sequential vector of ACL#s to lookup within a given context.
+ * Any existing list will be overwritten. acl_list is a vector.
+ */
+#ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
+int (*acl_plugin_set_acl_vec_for_context) (u32 lc_index, u32 *acl_list);
+#else
+int acl_plugin_set_acl_vec_for_context (u32 lc_index, u32 *acl_list);
+#endif
+
+/* Fill the 5-tuple from the packet */
+
+#ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
+void (*acl_plugin_fill_5tuple) (u32 lc_index, vlib_buffer_t * b0, int is_ip6, int is_input,
+ int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt);
+#else
+void acl_plugin_fill_5tuple (u32 lc_index, vlib_buffer_t * b0, int is_ip6, int is_input,
+ int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt);
+#endif
+
+#ifdef ACL_PLUGIN_DEFINED_BELOW_IN_FILE
+static inline
+void acl_plugin_fill_5tuple_inline (u32 lc_index, vlib_buffer_t * b0, int is_ip6, int is_input,
+ int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt) {
+ /* FIXME: normally the inlined version of filling in the 5-tuple. But for now just call the non-inlined version */
+ acl_plugin_fill_5tuple(lc_index, b0, is_ip6, is_input, is_l2_path, p5tuple_pkt);
+}
+#endif
+
+
+#ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
+int (*acl_plugin_match_5tuple) (u32 lc_index,
+ fa_5tuple_opaque_t * pkt_5tuple,
+ int is_ip6, u8 * r_action,
+ u32 * r_acl_pos_p,
+ u32 * r_acl_match_p,
+ u32 * r_rule_match_p,
+ u32 * trace_bitmap);
+#else
+int acl_plugin_match_5tuple (u32 lc_index,
+ fa_5tuple_opaque_t * pkt_5tuple,
+ int is_ip6, u8 * r_action,
+ u32 * r_acl_pos_p,
+ u32 * r_acl_match_p,
+ u32 * r_rule_match_p,
+ u32 * trace_bitmap);
+#endif
+
+#ifdef ACL_PLUGIN_DEFINED_BELOW_IN_FILE
+static inline int
+acl_plugin_match_5tuple_inline (u32 lc_index,
+ fa_5tuple_opaque_t * pkt_5tuple,
+ int is_ip6, u8 * r_action,
+ u32 * r_acl_pos_p,
+ u32 * r_acl_match_p,
+ u32 * r_rule_match_p,
+ u32 * trace_bitmap) {
+ return acl_plugin_match_5tuple(lc_index, pkt_5tuple, is_ip6, r_action, r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
+}
+#endif
+
+#ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
+
+#define LOAD_SYMBOL_FROM_PLUGIN(p, s) \
+({ \
+ s = vlib_get_plugin_symbol(p, #s); \
+ if (!s) \
+ return clib_error_return(0, \
+ "Plugin %s and/or symbol %s not found.", p, #s); \
+})
+
+#define LOAD_SYMBOL(s) LOAD_SYMBOL_FROM_PLUGIN("acl_plugin.so", s)
+
+static inline clib_error_t * acl_plugin_exports_init (void)
+{
+ LOAD_SYMBOL(acl_plugin_acl_exists);
+ LOAD_SYMBOL(acl_plugin_register_user_module);
+ LOAD_SYMBOL(acl_plugin_get_lookup_context_index);
+ LOAD_SYMBOL(acl_plugin_put_lookup_context_index);
+ LOAD_SYMBOL(acl_plugin_set_acl_vec_for_context);
+ LOAD_SYMBOL(acl_plugin_fill_5tuple);
+ LOAD_SYMBOL(acl_plugin_match_5tuple);
+ return 0;
+}
+
+#endif
+
+
+
+always_inline void *
+get_ptr_to_offset (vlib_buffer_t * b0, int offset)
+{
+ u8 *p = vlib_buffer_get_current (b0) + offset;
+ return p;
+}
+
+always_inline int
+offset_within_packet (vlib_buffer_t * b0, int offset)
+{
+ /* For the purposes of this code, "within" means we have at least 8 bytes after it */
+ return (offset <= (b0->current_length - 8));
+}
+
+always_inline void
+acl_fill_5tuple (acl_main_t * am, vlib_buffer_t * b0, int is_ip6,
+ int is_input, int is_l2_path, fa_5tuple_t * p5tuple_pkt)
+{
+ /* IP4 and IP6 protocol numbers of ICMP */
+ static u8 icmp_protos_v4v6[] = { IP_PROTOCOL_ICMP, IP_PROTOCOL_ICMP6 };
+
+ int l3_offset;
+ int l4_offset;
+ u16 ports[2];
+ u16 proto;
+
+ if (is_l2_path)
+ {
+ l3_offset = ethernet_buffer_header_size(b0);
+ }
+ else
+ {
+ if (is_input)
+ l3_offset = 0;
+ else
+ l3_offset = vnet_buffer(b0)->ip.save_rewrite_length;
+ }
+
+ /* key[0..3] contains src/dst address and is cleared/set below */
+ /* Remainder of the key and per-packet non-key data */
+ p5tuple_pkt->kv.key[4] = 0;
+ p5tuple_pkt->kv.value = 0;
+
+ if (is_ip6)
+ {
+ clib_memcpy (&p5tuple_pkt->addr,
+ get_ptr_to_offset (b0,
+ offsetof (ip6_header_t,
+ src_address) + l3_offset),
+ sizeof (p5tuple_pkt->addr));
+ proto =
+ *(u8 *) get_ptr_to_offset (b0,
+ offsetof (ip6_header_t,
+ protocol) + l3_offset);
+ l4_offset = l3_offset + sizeof (ip6_header_t);
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning ("ACL_FA_NODE_DBG: proto: %d, l4_offset: %d", proto,
+ l4_offset);
+#endif
+ /* IP6 EH handling is here, increment l4_offset if needs to, update the proto */
+ int need_skip_eh = clib_bitmap_get (am->fa_ipv6_known_eh_bitmap, proto);
+ if (PREDICT_FALSE (need_skip_eh))
+ {
+ while (need_skip_eh && offset_within_packet (b0, l4_offset))
+ {
+ /* Fragment header needs special handling */
+ if (PREDICT_FALSE(ACL_EH_FRAGMENT == proto))
+ {
+ proto = *(u8 *) get_ptr_to_offset (b0, l4_offset);
+ u16 frag_offset;
+ clib_memcpy (&frag_offset, get_ptr_to_offset (b0, 2 + l4_offset), sizeof(frag_offset));
+ frag_offset = clib_net_to_host_u16(frag_offset) >> 3;
+ if (frag_offset)
+ {
+ p5tuple_pkt->pkt.is_nonfirst_fragment = 1;
+ /* invalidate L4 offset so we don't try to find L4 info */
+ l4_offset += b0->current_length;
+ }
+ else
+ {
+ /* First fragment: skip the frag header and move on. */
+ l4_offset += 8;
+ }
+ }
+ else
+ {
+ u8 nwords = *(u8 *) get_ptr_to_offset (b0, 1 + l4_offset);
+ proto = *(u8 *) get_ptr_to_offset (b0, l4_offset);
+ l4_offset += 8 * (1 + (u16) nwords);
+ }
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning ("ACL_FA_NODE_DBG: new proto: %d, new offset: %d",
+ proto, l4_offset);
+#endif
+ need_skip_eh =
+ clib_bitmap_get (am->fa_ipv6_known_eh_bitmap, proto);
+ }
+ }
+ }
+ else
+ {
+ p5tuple_pkt->kv.key[0] = 0;
+ p5tuple_pkt->kv.key[1] = 0;
+ p5tuple_pkt->kv.key[2] = 0;
+ p5tuple_pkt->kv.key[3] = 0;
+ clib_memcpy (&p5tuple_pkt->addr[0].ip4,
+ get_ptr_to_offset (b0,
+ offsetof (ip4_header_t,
+ src_address) + l3_offset),
+ sizeof (p5tuple_pkt->addr[0].ip4));
+ clib_memcpy (&p5tuple_pkt->addr[1].ip4,
+ get_ptr_to_offset (b0,
+ offsetof (ip4_header_t,
+ dst_address) + l3_offset),
+ sizeof (p5tuple_pkt->addr[1].ip4));
+ proto =
+ *(u8 *) get_ptr_to_offset (b0,
+ offsetof (ip4_header_t,
+ protocol) + l3_offset);
+ l4_offset = l3_offset + sizeof (ip4_header_t);
+ u16 flags_and_fragment_offset;
+ clib_memcpy (&flags_and_fragment_offset,
+ get_ptr_to_offset (b0,
+ offsetof (ip4_header_t,
+ flags_and_fragment_offset)) + l3_offset,
+ sizeof(flags_and_fragment_offset));
+ flags_and_fragment_offset = clib_net_to_host_u16 (flags_and_fragment_offset);
+
+ /* non-initial fragments have non-zero offset */
+ if ((PREDICT_FALSE(0xfff & flags_and_fragment_offset)))
+ {
+ p5tuple_pkt->pkt.is_nonfirst_fragment = 1;
+ /* invalidate L4 offset so we don't try to find L4 info */
+ l4_offset += b0->current_length;
+ }
+
+ }
+ p5tuple_pkt->l4.proto = proto;
+ if (PREDICT_TRUE (offset_within_packet (b0, l4_offset)))
+ {
+ p5tuple_pkt->pkt.l4_valid = 1;
+ if (icmp_protos_v4v6[is_ip6] == proto)
+ {
+ /* type */
+ p5tuple_pkt->l4.port[0] =
+ *(u8 *) get_ptr_to_offset (b0,
+ l4_offset + offsetof (icmp46_header_t,
+ type));
+ /* code */
+ p5tuple_pkt->l4.port[1] =
+ *(u8 *) get_ptr_to_offset (b0,
+ l4_offset + offsetof (icmp46_header_t,
+ code));
+ }
+ else if ((IP_PROTOCOL_TCP == proto) || (IP_PROTOCOL_UDP == proto))
+ {
+ clib_memcpy (&ports,
+ get_ptr_to_offset (b0,
+ l4_offset + offsetof (tcp_header_t,
+ src_port)),
+ sizeof (ports));
+ p5tuple_pkt->l4.port[0] = clib_net_to_host_u16 (ports[0]);
+ p5tuple_pkt->l4.port[1] = clib_net_to_host_u16 (ports[1]);
+
+ p5tuple_pkt->pkt.tcp_flags =
+ *(u8 *) get_ptr_to_offset (b0,
+ l4_offset + offsetof (tcp_header_t,
+ flags));
+ p5tuple_pkt->pkt.tcp_flags_valid = (proto == IP_PROTOCOL_TCP);
+ }
+ /*
+ * FIXME: rather than the above conditional, here could
+ * be a nice generic mechanism to extract two L4 values:
+ *
+ * have a per-protocol array of 4 elements like this:
+ * u8 offset; to take the byte from, off L4 header
+ * u8 mask; to mask it with, before storing
+ *
+ * this way we can describe UDP, TCP and ICMP[46] semantics,
+ * and add a sort of FPM-type behavior for other protocols.
+ *
+ * Of course, is it faster ? and is it needed ?
+ *
+ */
+ }
+}
+
+always_inline void
+acl_plugin_fill_5tuple_inline (u32 lc_index, vlib_buffer_t * b0, int is_ip6,
+ int is_input, int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt)
+{
+ acl_main_t *am = &acl_main;
+ acl_fill_5tuple(am, b0, is_ip6, is_input, is_l2_path, (fa_5tuple_t *)p5tuple_pkt);
+}
+
+
+
+always_inline int
+fa_acl_match_addr (ip46_address_t * addr1, ip46_address_t * addr2,
+ int prefixlen, int is_ip6)
+{
+ if (prefixlen == 0)
+ {
+ /* match any always succeeds */
+ return 1;
+ }
+ if (is_ip6)
+ {
+ if (memcmp (addr1, addr2, prefixlen / 8))
+ {
+ /* If the starting full bytes do not match, no point in bittwidling the thumbs further */
+ return 0;
+ }
+ if (prefixlen % 8)
+ {
+ u8 b1 = *((u8 *) addr1 + 1 + prefixlen / 8);
+ u8 b2 = *((u8 *) addr2 + 1 + prefixlen / 8);
+ u8 mask0 = (0xff - ((1 << (8 - (prefixlen % 8))) - 1));
+ return (b1 & mask0) == b2;
+ }
+ else
+ {
+ /* The prefix fits into integer number of bytes, so nothing left to do */
+ return 1;
+ }
+ }
+ else
+ {
+ uint32_t a1 = clib_net_to_host_u32 (addr1->ip4.as_u32);
+ uint32_t a2 = clib_net_to_host_u32 (addr2->ip4.as_u32);
+ uint32_t mask0 = 0xffffffff - ((1 << (32 - prefixlen)) - 1);
+ return (a1 & mask0) == a2;
+ }
+}
+
+always_inline int
+fa_acl_match_port (u16 port, u16 port_first, u16 port_last, int is_ip6)
+{
+ return ((port >= port_first) && (port <= port_last));
+}
+
+always_inline int
+single_acl_match_5tuple (acl_main_t * am, u32 acl_index, fa_5tuple_t * pkt_5tuple,
+ int is_ip6, u8 * r_action, u32 * r_acl_match_p,
+ u32 * r_rule_match_p, u32 * trace_bitmap)
+{
+ int i;
+ acl_list_t *a;
+ acl_rule_t *r;
+
+ if (pool_is_free_index (am->acls, acl_index))
+ {
+ if (r_acl_match_p)
+ *r_acl_match_p = acl_index;
+ if (r_rule_match_p)
+ *r_rule_match_p = -1;
+ /* the ACL does not exist but is used for policy. Block traffic. */
+ return 0;
+ }
+ a = am->acls + acl_index;
+ for (i = 0; i < a->count; i++)
+ {
+ r = a->rules + i;
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning("ACL_FA_NODE_DBG acl %d rule %d tag %s", acl_index, i, a->tag);
+#endif
+ if (is_ip6 != r->is_ipv6)
+ {
+ continue;
+ }
+ if (!fa_acl_match_addr
+ (&pkt_5tuple->addr[1], &r->dst, r->dst_prefixlen, is_ip6))
+ continue;
+
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning
+ ("ACL_FA_NODE_DBG acl %d rule %d pkt dst addr %U match rule addr %U/%d",
+ acl_index, i, format_ip46_address, &pkt_5tuple->addr[1],
+ r->is_ipv6 ? IP46_TYPE_IP6: IP46_TYPE_IP4, format_ip46_address,
+ &r->dst, r->is_ipv6 ? IP46_TYPE_IP6: IP46_TYPE_IP4,
+ r->dst_prefixlen);
+#endif
+
+ if (!fa_acl_match_addr
+ (&pkt_5tuple->addr[0], &r->src, r->src_prefixlen, is_ip6))
+ continue;
+
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning
+ ("ACL_FA_NODE_DBG acl %d rule %d pkt src addr %U match rule addr %U/%d",
+ acl_index, i, format_ip46_address, &pkt_5tuple->addr[0],
+ r->is_ipv6 ? IP46_TYPE_IP6: IP46_TYPE_IP4, format_ip46_address,
+ &r->src, r->is_ipv6 ? IP46_TYPE_IP6: IP46_TYPE_IP4,
+ r->src_prefixlen);
+ clib_warning
+ ("ACL_FA_NODE_DBG acl %d rule %d trying to match pkt proto %d with rule %d",
+ acl_index, i, pkt_5tuple->l4.proto, r->proto);
+#endif
+ if (r->proto)
+ {
+ if (pkt_5tuple->l4.proto != r->proto)
+ continue;
+
+ if (PREDICT_FALSE (pkt_5tuple->pkt.is_nonfirst_fragment &&
+ am->l4_match_nonfirst_fragment))
+ {
+ /* non-initial fragment with frag match configured - match this rule */
+ *trace_bitmap |= 0x80000000;
+ *r_action = r->is_permit;
+ if (r_acl_match_p)
+ *r_acl_match_p = acl_index;
+ if (r_rule_match_p)
+ *r_rule_match_p = i;
+ return 1;
+ }
+
+ /* A sanity check just to ensure we are about to match the ports extracted from the packet */
+ if (PREDICT_FALSE (!pkt_5tuple->pkt.l4_valid))
+ continue;
+
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning
+ ("ACL_FA_NODE_DBG acl %d rule %d pkt proto %d match rule %d",
+ acl_index, i, pkt_5tuple->l4.proto, r->proto);
+#endif
+
+ if (!fa_acl_match_port
+ (pkt_5tuple->l4.port[0], r->src_port_or_type_first,
+ r->src_port_or_type_last, is_ip6))
+ continue;
+
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning
+ ("ACL_FA_NODE_DBG acl %d rule %d pkt sport %d match rule [%d..%d]",
+ acl_index, i, pkt_5tuple->l4.port[0], r->src_port_or_type_first,
+ r->src_port_or_type_last);
+#endif
+
+ if (!fa_acl_match_port
+ (pkt_5tuple->l4.port[1], r->dst_port_or_code_first,
+ r->dst_port_or_code_last, is_ip6))
+ continue;
+
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning
+ ("ACL_FA_NODE_DBG acl %d rule %d pkt dport %d match rule [%d..%d]",
+ acl_index, i, pkt_5tuple->l4.port[1], r->dst_port_or_code_first,
+ r->dst_port_or_code_last);
+#endif
+ if (pkt_5tuple->pkt.tcp_flags_valid
+ && ((pkt_5tuple->pkt.tcp_flags & r->tcp_flags_mask) !=
+ r->tcp_flags_value))
+ continue;
+ }
+ /* everything matches! */
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning ("ACL_FA_NODE_DBG acl %d rule %d FULL-MATCH, action %d",
+ acl_index, i, r->is_permit);
+#endif
+ *r_action = r->is_permit;
+ if (r_acl_match_p)
+ *r_acl_match_p = acl_index;
+ if (r_rule_match_p)
+ *r_rule_match_p = i;
+ return 1;
+ }
+ return 0;
+}
+
+always_inline int
+acl_plugin_single_acl_match_5tuple (u32 acl_index, fa_5tuple_t * pkt_5tuple,
+ int is_ip6, u8 * r_action, u32 * r_acl_match_p,
+ u32 * r_rule_match_p, u32 * trace_bitmap)
+{
+ acl_main_t * am = &acl_main;
+ return single_acl_match_5tuple(am, acl_index, pkt_5tuple, is_ip6, r_action,
+ r_acl_match_p, r_rule_match_p, trace_bitmap);
+}
+
+always_inline int
+linear_multi_acl_match_5tuple (u32 lc_index, fa_5tuple_t * pkt_5tuple,
+ int is_ip6, u8 *r_action, u32 *acl_pos_p, u32 * acl_match_p,
+ u32 * rule_match_p, u32 * trace_bitmap)
+{
+ acl_main_t *am = &acl_main;
+ int i;
+ u32 *acl_vector;
+ u8 action = 0;
+ acl_lookup_context_t *acontext = pool_elt_at_index(am->acl_lookup_contexts, lc_index);
+
+ acl_vector = acontext->acl_indices;
+
+ for (i = 0; i < vec_len (acl_vector); i++)
+ {
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning ("ACL_FA_NODE_DBG: Trying to match ACL: %d",
+ acl_vector[i]);
+#endif
+ if (single_acl_match_5tuple
+ (am, acl_vector[i], pkt_5tuple, is_ip6, &action,
+ acl_match_p, rule_match_p, trace_bitmap))
+ {
+ *r_action = action;
+ *acl_pos_p = i;
+ return 1;
+ }
+ }
+ if (vec_len (acl_vector) > 0)
+ {
+ return 0;
+ }
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning ("ACL_FA_NODE_DBG: No ACL on lc_index %d", lc_index);
+#endif
+ /* If there are no ACLs defined we should not be here. */
+ return 0;
+}
+
+
+
+/*
+ * This returns true if there is indeed a match on the portranges.
+ * With all these levels of indirections, this is not going to be very fast,
+ * so, best use the individual ports or wildcard ports for performance.
+ */
+always_inline int
+match_portranges(acl_main_t *am, fa_5tuple_t *match, u32 index)
+{
+
+ applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, match->pkt.lc_index);
+ applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), index);
+
+ acl_rule_t *r = &(am->acls[pae->acl_index].rules[pae->ace_index]);
+
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning("PORTMATCH: %d <= %d <= %d && %d <= %d <= %d ?",
+ r->src_port_or_type_first, match->l4.port[0], r->src_port_or_type_last,
+ r->dst_port_or_code_first, match->l4.port[1], r->dst_port_or_code_last);
+#endif
+
+ return ( ((r->src_port_or_type_first <= match->l4.port[0]) && r->src_port_or_type_last >= match->l4.port[0]) &&
+ ((r->dst_port_or_code_first <= match->l4.port[1]) && r->dst_port_or_code_last >= match->l4.port[1]) );
+}
+
+always_inline u32
+multi_acl_match_get_applied_ace_index(acl_main_t *am, fa_5tuple_t *match)
+{
+ clib_bihash_kv_48_8_t kv;
+ clib_bihash_kv_48_8_t result;
+ fa_5tuple_t *kv_key = (fa_5tuple_t *)kv.key;
+ hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value;
+ u64 *pmatch = (u64 *)match;
+ u64 *pmask;
+ u64 *pkey;
+ int mask_type_index;
+ u32 curr_match_index = ~0;
+
+ u32 lc_index = match->pkt.lc_index;
+ applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, match->pkt.lc_index);
+ applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
+
+ DBG("TRYING TO MATCH: %016llx %016llx %016llx %016llx %016llx %016llx",
+ pmatch[0], pmatch[1], pmatch[2], pmatch[3], pmatch[4], pmatch[5]);
+
+ for(mask_type_index=0; mask_type_index < pool_len(am->ace_mask_type_pool); mask_type_index++) {
+ if (!clib_bitmap_get(vec_elt_at_index((*applied_hash_acls), lc_index)->mask_type_index_bitmap, mask_type_index)) {
+ /* This bit is not set. Avoid trying to match */
+ continue;
+ }
+ ace_mask_type_entry_t *mte = vec_elt_at_index(am->ace_mask_type_pool, mask_type_index);
+ pmatch = (u64 *)match;
+ pmask = (u64 *)&mte->mask;
+ pkey = (u64 *)kv.key;
+ /*
+ * unrolling the below loop results in a noticeable performance increase.
+ int i;
+ for(i=0; i<6; i++) {
+ kv.key[i] = pmatch[i] & pmask[i];
+ }
+ */
+
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+
+ kv_key->pkt.mask_type_index_lsb = mask_type_index;
+ DBG(" KEY %3d: %016llx %016llx %016llx %016llx %016llx %016llx", mask_type_index,
+ kv.key[0], kv.key[1], kv.key[2], kv.key[3], kv.key[4], kv.key[5]);
+ int res = clib_bihash_search_48_8 (&am->acl_lookup_hash, &kv, &result);
+ if (res == 0) {
+ DBG("ACL-MATCH! result_val: %016llx", result_val->as_u64);
+ if (result_val->applied_entry_index < curr_match_index) {
+ if (PREDICT_FALSE(result_val->need_portrange_check)) {
+ /*
+ * This is going to be slow, since we can have multiple superset
+ * entries for narrow-ish portranges, e.g.:
+ * 0..42 100..400, 230..60000,
+ * so we need to walk linearly and check if they match.
+ */
+
+ u32 curr_index = result_val->applied_entry_index;
+ while ((curr_index != ~0) && !match_portranges(am, match, curr_index)) {
+ /* while no match and there are more entries, walk... */
+ applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces),curr_index);
+ DBG("entry %d did not portmatch, advancing to %d", curr_index, pae->next_applied_entry_index);
+ curr_index = pae->next_applied_entry_index;
+ }
+ if (curr_index < curr_match_index) {
+ DBG("The index %d is the new candidate in portrange matches.", curr_index);
+ curr_match_index = curr_index;
+ } else {
+ DBG("Curr portmatch index %d is too big vs. current matched one %d", curr_index, curr_match_index);
+ }
+ } else {
+ /* The usual path is here. Found an entry in front of the current candiate - so it's a new one */
+ DBG("This match is the new candidate");
+ curr_match_index = result_val->applied_entry_index;
+ if (!result_val->shadowed) {
+ /* new result is known to not be shadowed, so no point to look up further */
+ break;
+ }
+ }
+ }
+ }
+ }
+ DBG("MATCH-RESULT: %d", curr_match_index);
+ return curr_match_index;
+}
+
+always_inline int
+hash_multi_acl_match_5tuple (u32 lc_index, fa_5tuple_t * pkt_5tuple,
+ int is_ip6, u8 *action, u32 *acl_pos_p, u32 * acl_match_p,
+ u32 * rule_match_p, u32 * trace_bitmap)
+{
+ acl_main_t *am = &acl_main;
+ applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, lc_index);
+ u32 match_index = multi_acl_match_get_applied_ace_index(am, pkt_5tuple);
+ if (match_index < vec_len((*applied_hash_aces))) {
+ applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), match_index);
+ pae->hitcount++;
+ *acl_pos_p = pae->acl_position;
+ *acl_match_p = pae->acl_index;
+ *rule_match_p = pae->ace_index;
+ *action = pae->action;
+ return 1;
+ }
+ return 0;
+}
+
+
+
+always_inline int
+acl_plugin_match_5tuple_inline (u32 lc_index,
+ fa_5tuple_opaque_t * pkt_5tuple,
+ int is_ip6, u8 * r_action,
+ u32 * r_acl_pos_p,
+ u32 * r_acl_match_p,
+ u32 * r_rule_match_p,
+ u32 * trace_bitmap)
+{
+ acl_main_t *am = &acl_main;
+ if (am->use_hash_acl_matching) {
+ return hash_multi_acl_match_5tuple(lc_index, (fa_5tuple_t *)pkt_5tuple, is_ip6, r_action,
+ r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
+ } else {
+ return linear_multi_acl_match_5tuple(lc_index, (fa_5tuple_t *)pkt_5tuple, is_ip6, r_action,
+ r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
+ }
+}
+
+
+
+#endif