summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/plugins/acl.am1
-rw-r--r--src/plugins/acl/acl.c351
-rw-r--r--src/plugins/acl/acl.h122
-rw-r--r--src/plugins/acl/bihash_40_8.h89
-rw-r--r--src/plugins/acl/fa_node.c1444
-rw-r--r--src/plugins/acl/fa_node.h99
-rw-r--r--test/test_acl_plugin_l2l3.py722
7 files changed, 2809 insertions, 19 deletions
diff --git a/src/plugins/acl.am b/src/plugins/acl.am
index efed31c2912..03328f6d007 100644
--- a/src/plugins/acl.am
+++ b/src/plugins/acl.am
@@ -18,6 +18,7 @@ acl_plugin_la_SOURCES = \
acl/acl.c \
acl/node_in.c \
acl/node_out.c \
+ acl/fa_node.c \
acl/l2sess.c \
acl/l2sess_node.c \
acl/l2sess.h \
diff --git a/src/plugins/acl/acl.c b/src/plugins/acl/acl.c
index 0d06531d6f4..476fbc33eae 100644
--- a/src/plugins/acl/acl.c
+++ b/src/plugins/acl/acl.c
@@ -54,6 +54,7 @@
#include "node_in.h"
#include "node_out.h"
+#include "fa_node.h"
acl_main_t acl_main;
@@ -382,7 +383,7 @@ acl_unhook_l2_input_classify (acl_main_t * am, u32 sw_if_index)
am->acl_ip4_input_classify_table_by_sw_if_index[sw_if_index] = ~0;
acl_classify_add_del_table_big (cm, ip4_5tuple_mask,
sizeof (ip4_5tuple_mask) - 1, ~0,
- am->l2_input_classify_next_acl,
+ am->l2_input_classify_next_acl_ip4,
&ip4_table_index, 0);
}
if (am->acl_ip6_input_classify_table_by_sw_if_index[sw_if_index] != ~0)
@@ -392,7 +393,7 @@ acl_unhook_l2_input_classify (acl_main_t * am, u32 sw_if_index)
am->acl_ip6_input_classify_table_by_sw_if_index[sw_if_index] = ~0;
acl_classify_add_del_table_big (cm, ip6_5tuple_mask,
sizeof (ip6_5tuple_mask) - 1, ~0,
- am->l2_input_classify_next_acl,
+ am->l2_input_classify_next_acl_ip6,
&ip6_table_index, 0);
}
@@ -420,7 +421,7 @@ acl_unhook_l2_output_classify (acl_main_t * am, u32 sw_if_index)
am->acl_ip4_output_classify_table_by_sw_if_index[sw_if_index] = ~0;
acl_classify_add_del_table_big (cm, ip4_5tuple_mask,
sizeof (ip4_5tuple_mask) - 1, ~0,
- am->l2_output_classify_next_acl,
+ am->l2_output_classify_next_acl_ip4,
&ip4_table_index, 0);
}
if (am->acl_ip6_output_classify_table_by_sw_if_index[sw_if_index] != ~0)
@@ -430,7 +431,7 @@ acl_unhook_l2_output_classify (acl_main_t * am, u32 sw_if_index)
am->acl_ip6_output_classify_table_by_sw_if_index[sw_if_index] = ~0;
acl_classify_add_del_table_big (cm, ip6_5tuple_mask,
sizeof (ip6_5tuple_mask) - 1, ~0,
- am->l2_output_classify_next_acl,
+ am->l2_output_classify_next_acl_ip6,
&ip6_table_index, 0);
}
@@ -450,20 +451,20 @@ acl_hook_l2_input_classify (acl_main_t * am, u32 sw_if_index)
rv =
acl_classify_add_del_table_big (cm, ip4_5tuple_mask,
sizeof (ip4_5tuple_mask) - 1, ~0,
- am->l2_input_classify_next_acl,
+ am->l2_input_classify_next_acl_ip4,
&ip4_table_index, 1);
if (rv)
return rv;
rv =
acl_classify_add_del_table_big (cm, ip6_5tuple_mask,
sizeof (ip6_5tuple_mask) - 1, ~0,
- am->l2_input_classify_next_acl,
+ am->l2_input_classify_next_acl_ip6,
&ip6_table_index, 1);
if (rv)
{
acl_classify_add_del_table_big (cm, ip4_5tuple_mask,
sizeof (ip4_5tuple_mask) - 1, ~0,
- am->l2_input_classify_next_acl,
+ am->l2_input_classify_next_acl_ip4,
&ip4_table_index, 0);
return rv;
}
@@ -477,11 +478,11 @@ acl_hook_l2_input_classify (acl_main_t * am, u32 sw_if_index)
{
acl_classify_add_del_table_big (cm, ip6_5tuple_mask,
sizeof (ip6_5tuple_mask) - 1, ~0,
- am->l2_input_classify_next_acl,
+ am->l2_input_classify_next_acl_ip6,
&ip6_table_index, 0);
acl_classify_add_del_table_big (cm, ip4_5tuple_mask,
sizeof (ip4_5tuple_mask) - 1, ~0,
- am->l2_input_classify_next_acl,
+ am->l2_input_classify_next_acl_ip4,
&ip4_table_index, 0);
return rv;
}
@@ -508,20 +509,20 @@ acl_hook_l2_output_classify (acl_main_t * am, u32 sw_if_index)
rv =
acl_classify_add_del_table_big (cm, ip4_5tuple_mask,
sizeof (ip4_5tuple_mask) - 1, ~0,
- am->l2_output_classify_next_acl,
+ am->l2_output_classify_next_acl_ip4,
&ip4_table_index, 1);
if (rv)
return rv;
rv =
acl_classify_add_del_table_big (cm, ip6_5tuple_mask,
sizeof (ip6_5tuple_mask) - 1, ~0,
- am->l2_output_classify_next_acl,
+ am->l2_output_classify_next_acl_ip6,
&ip6_table_index, 1);
if (rv)
{
acl_classify_add_del_table_big (cm, ip4_5tuple_mask,
sizeof (ip4_5tuple_mask) - 1, ~0,
- am->l2_output_classify_next_acl,
+ am->l2_output_classify_next_acl_ip4,
&ip4_table_index, 0);
return rv;
}
@@ -535,11 +536,11 @@ acl_hook_l2_output_classify (acl_main_t * am, u32 sw_if_index)
{
acl_classify_add_del_table_big (cm, ip6_5tuple_mask,
sizeof (ip6_5tuple_mask) - 1, ~0,
- am->l2_output_classify_next_acl,
+ am->l2_output_classify_next_acl_ip6,
&ip6_table_index, 0);
acl_classify_add_del_table_big (cm, ip4_5tuple_mask,
sizeof (ip4_5tuple_mask) - 1, ~0,
- am->l2_output_classify_next_acl,
+ am->l2_output_classify_next_acl_ip4,
&ip4_table_index, 0);
return rv;
}
@@ -554,6 +555,7 @@ acl_hook_l2_output_classify (acl_main_t * am, u32 sw_if_index)
}
+
int
acl_interface_in_enable_disable (acl_main_t * am, u32 sw_if_index,
int enable_disable)
@@ -565,6 +567,8 @@ acl_interface_in_enable_disable (acl_main_t * am, u32 sw_if_index,
sw_if_index))
return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ acl_fa_enable_disable(sw_if_index, 1, enable_disable);
+
if (enable_disable)
{
rv = acl_hook_l2_input_classify (am, sw_if_index);
@@ -588,6 +592,8 @@ acl_interface_out_enable_disable (acl_main_t * am, u32 sw_if_index,
sw_if_index))
return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ acl_fa_enable_disable(sw_if_index, 0, enable_disable);
+
if (enable_disable)
{
rv = acl_hook_l2_output_classify (am, sw_if_index);
@@ -1820,10 +1826,10 @@ acl_setup_nodes (void)
vlib_node_t *n;
n = vlib_get_node_by_name (vm, (u8 *) "l2-input-classify");
- am->l2_input_classify_next_acl =
+ am->l2_input_classify_next_acl_old =
vlib_node_add_next_with_slot (vm, n->index, acl_in_node.index, ~0);
n = vlib_get_node_by_name (vm, (u8 *) "l2-output-classify");
- am->l2_output_classify_next_acl =
+ am->l2_output_classify_next_acl_old =
vlib_node_add_next_with_slot (vm, n->index, acl_out_node.index, ~0);
feat_bitmap_init_next_nodes (vm, acl_in_node.index, L2INPUT_N_FEAT,
@@ -1844,11 +1850,299 @@ acl_setup_nodes (void)
sizeof (am->acl_out_ip6_match_next));
am->n_match_actions = 0;
+ am->l2_input_classify_next_acl_ip4 = am->l2_input_classify_next_acl_old;
+ am->l2_input_classify_next_acl_ip6 = am->l2_input_classify_next_acl_old;
+ am->l2_output_classify_next_acl_ip4 = am->l2_output_classify_next_acl_old;
+ am->l2_output_classify_next_acl_ip6 = am->l2_output_classify_next_acl_old;
+
register_match_action_nexts (0, 0, 0, 0); /* drop */
register_match_action_nexts (~0, ~0, ~0, ~0); /* permit */
register_match_action_nexts (ACL_IN_L2S_INPUT_IP4_ADD, ACL_IN_L2S_INPUT_IP6_ADD, ACL_OUT_L2S_OUTPUT_IP4_ADD, ACL_OUT_L2S_OUTPUT_IP6_ADD); /* permit + create session */
}
+void
+acl_setup_fa_nodes (void)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ acl_main_t *am = &acl_main;
+ vlib_node_t *n, *n4, *n6;
+
+ n = vlib_get_node_by_name (vm, (u8 *) "l2-input-classify");
+ n4 = vlib_get_node_by_name (vm, (u8 *) "acl-plugin-in-ip4-l2");
+ n6 = vlib_get_node_by_name (vm, (u8 *) "acl-plugin-in-ip6-l2");
+
+
+ am->fa_l2_input_classify_next_acl_ip4 =
+ vlib_node_add_next_with_slot (vm, n->index, n4->index, ~0);
+ am->fa_l2_input_classify_next_acl_ip6 =
+ vlib_node_add_next_with_slot (vm, n->index, n6->index, ~0);
+
+ feat_bitmap_init_next_nodes (vm, n4->index, L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ am->fa_acl_in_ip4_l2_node_feat_next_node_index);
+
+ feat_bitmap_init_next_nodes (vm, n6->index, L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ am->fa_acl_in_ip6_l2_node_feat_next_node_index);
+
+
+ n = vlib_get_node_by_name (vm, (u8 *) "l2-output-classify");
+ n4 = vlib_get_node_by_name (vm, (u8 *) "acl-plugin-out-ip4-l2");
+ n6 = vlib_get_node_by_name (vm, (u8 *) "acl-plugin-out-ip6-l2");
+
+ am->fa_l2_output_classify_next_acl_ip4 =
+ vlib_node_add_next_with_slot (vm, n->index, n4->index, ~0);
+ am->fa_l2_output_classify_next_acl_ip6 =
+ vlib_node_add_next_with_slot (vm, n->index, n6->index, ~0);
+
+ feat_bitmap_init_next_nodes (vm, n4->index, L2OUTPUT_N_FEAT,
+ l2output_get_feat_names (),
+ am->fa_acl_out_ip4_l2_node_feat_next_node_index);
+
+ feat_bitmap_init_next_nodes (vm, n6->index, L2OUTPUT_N_FEAT,
+ l2output_get_feat_names (),
+ am->fa_acl_out_ip6_l2_node_feat_next_node_index);
+
+ am->l2_input_classify_next_acl_ip4 = am->fa_l2_input_classify_next_acl_ip4;
+ am->l2_input_classify_next_acl_ip6 = am->fa_l2_input_classify_next_acl_ip6;
+ am->l2_output_classify_next_acl_ip4 = am->fa_l2_output_classify_next_acl_ip4;
+ am->l2_output_classify_next_acl_ip6 = am->fa_l2_output_classify_next_acl_ip6;
+
+}
+
+void
+acl_set_timeout_sec(int timeout_type, u32 value)
+{
+ acl_main_t *am = &acl_main;
+ l2sess_main_t *sm = &l2sess_main;
+ clib_time_t *ct = &am->vlib_main->clib_time;
+
+ if (timeout_type < ACL_N_TIMEOUTS) {
+ am->session_timeout_sec[timeout_type] = value;
+ } else {
+ clib_warning("Unknown timeout type %d", timeout_type);
+ return;
+ }
+
+ switch(timeout_type) {
+ case ACL_TIMEOUT_UDP_IDLE:
+ sm->udp_session_idle_timeout = (u64)(((f64)value)/ct->seconds_per_clock);
+ break;
+ case ACL_TIMEOUT_TCP_IDLE:
+ sm->tcp_session_idle_timeout = (u64)(((f64)value)/ct->seconds_per_clock);
+ break;
+ case ACL_TIMEOUT_TCP_TRANSIENT:
+ sm->tcp_session_transient_timeout = (u64)(((f64)value)/ct->seconds_per_clock);
+ break;
+ default:
+ clib_warning("Unknown timeout type %d", timeout_type);
+ }
+}
+
+void
+acl_set_session_max_entries(u32 value)
+{
+ acl_main_t *am = &acl_main;
+ am->fa_conn_table_max_entries = value;
+}
+
+int
+acl_set_skip_ipv6_eh(u32 eh, u32 value)
+{
+ acl_main_t *am = &acl_main;
+ if ((eh < 256) && (value < 2))
+ {
+ am->fa_ipv6_known_eh_bitmap = clib_bitmap_set(am->fa_ipv6_known_eh_bitmap, eh, value);
+ return 1;
+ }
+ else
+ return 0;
+}
+
+
+static clib_error_t *
+acl_sw_interface_add_del (vnet_main_t * vnm, u32 sw_if_index, u32 is_add)
+{
+ acl_main_t *am = &acl_main;
+ if (0 == is_add) {
+ vlib_process_signal_event (am->vlib_main, am->fa_cleaner_node_index,
+ ACL_FA_CLEANER_DELETE_BY_SW_IF_INDEX, sw_if_index);
+ }
+ return 0;
+}
+
+VNET_SW_INTERFACE_ADD_DEL_FUNCTION (acl_sw_interface_add_del);
+
+static clib_error_t *
+acl_set_aclplugin_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *error = 0;
+ u32 timeout = 0;
+ u32 val = 0;
+ u32 eh_val = 0;
+ uword memory_size = 0;
+ acl_main_t *am = &acl_main;
+
+ /* The new datapath is the default. This command exists out of precaution and for comparing the two */
+ if (unformat (input, "l2-datapath")) {
+ if (unformat(input, "old")) {
+ am->l2_input_classify_next_acl_ip4 = am->l2_input_classify_next_acl_old;
+ am->l2_input_classify_next_acl_ip6 = am->l2_input_classify_next_acl_old;
+ am->l2_output_classify_next_acl_ip4 = am->l2_output_classify_next_acl_old;
+ am->l2_output_classify_next_acl_ip6 = am->l2_output_classify_next_acl_old;
+ goto done;
+ }
+ if (unformat(input, "new")) {
+ am->l2_input_classify_next_acl_ip4 = am->fa_l2_input_classify_next_acl_ip4;
+ am->l2_input_classify_next_acl_ip6 = am->fa_l2_input_classify_next_acl_ip6;
+ am->l2_output_classify_next_acl_ip4 = am->fa_l2_output_classify_next_acl_ip4;
+ am->l2_output_classify_next_acl_ip6 = am->fa_l2_output_classify_next_acl_ip6;
+ goto done;
+ }
+ goto done;
+ }
+ if (unformat (input, "skip-ipv6-extension-header %u %u", &eh_val, &val)) {
+ if(!acl_set_skip_ipv6_eh(eh_val, val)) {
+ error = clib_error_return(0, "expecting eh=0..255, value=0..1");
+ }
+ goto done;
+ }
+ if (unformat (input, "session")) {
+ if (unformat (input, "clear")) {
+ acl_main_t *am = &acl_main;
+ vlib_process_signal_event (am->vlib_main, am->fa_cleaner_node_index,
+ ACL_FA_CLEANER_DELETE_BY_SW_IF_INDEX, ~0);
+ goto done;
+ }
+ if (unformat (input, "table")) {
+ /* The commands here are for tuning/testing. No user-serviceable parts inside */
+ if (unformat (input, "max-entries")) {
+ if (!unformat(input, "%u", &val)) {
+ error = clib_error_return(0,
+ "expecting maximum number of entries, got `%U`",
+ format_unformat_error, input);
+ goto done;
+ } else {
+ acl_set_session_max_entries(val);
+ goto done;
+ }
+ }
+ if (unformat (input, "hash-table-buckets")) {
+ if (!unformat(input, "%u", &val)) {
+ error = clib_error_return(0,
+ "expecting maximum number of hash table buckets, got `%U`",
+ format_unformat_error, input);
+ goto done;
+ } else {
+ am->fa_conn_table_hash_num_buckets = val;
+ goto done;
+ }
+ }
+ if (unformat (input, "hash-table-memory")) {
+ if (!unformat(input, "%U", unformat_memory_size, &memory_size)) {
+ error = clib_error_return(0,
+ "expecting maximum amount of hash table memory, got `%U`",
+ format_unformat_error, input);
+ goto done;
+ } else {
+ am->fa_conn_table_hash_memory_size = memory_size;
+ goto done;
+ }
+ }
+ goto done;
+ }
+ if (unformat (input, "timeout")) {
+ if (unformat(input, "udp")) {
+ if(unformat(input, "idle")) {
+ if (!unformat(input, "%u", &timeout)) {
+ error = clib_error_return(0,
+ "expecting timeout value in seconds, got `%U`",
+ format_unformat_error, input);
+ goto done;
+ } else {
+ acl_set_timeout_sec(ACL_TIMEOUT_UDP_IDLE, timeout);
+ goto done;
+ }
+ }
+ }
+ if (unformat(input, "tcp")) {
+ if(unformat(input, "idle")) {
+ if (!unformat(input, "%u", &timeout)) {
+ error = clib_error_return(0,
+ "expecting timeout value in seconds, got `%U`",
+ format_unformat_error, input);
+ goto done;
+ } else {
+ acl_set_timeout_sec(ACL_TIMEOUT_TCP_IDLE, timeout);
+ goto done;
+ }
+ }
+ if(unformat(input, "transient")) {
+ if (!unformat(input, "%u", &timeout)) {
+ error = clib_error_return(0,
+ "expecting timeout value in seconds, got `%U`",
+ format_unformat_error, input);
+ goto done;
+ } else {
+ acl_set_timeout_sec(ACL_TIMEOUT_TCP_TRANSIENT, timeout);
+ goto done;
+ }
+ }
+ }
+ goto done;
+ }
+ }
+done:
+ return error;
+}
+
+static clib_error_t *
+acl_show_aclplugin_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *error = 0;
+ acl_main_t *am = &acl_main;
+ vnet_interface_main_t *im = &am->vnet_main->interface_main;
+
+ vnet_sw_interface_t *swif;
+
+ if (unformat (input, "sessions"))
+ {
+ u8 * out0 = 0;
+ pool_foreach (swif, im->sw_interfaces,
+ ({
+ u32 sw_if_index = swif->sw_if_index;
+ u64 n_adds = sw_if_index < vec_len(am->fa_session_adds_by_sw_if_index) ? am->fa_session_adds_by_sw_if_index[sw_if_index] : 0;
+ u64 n_dels = sw_if_index < vec_len(am->fa_session_dels_by_sw_if_index) ? am->fa_session_dels_by_sw_if_index[sw_if_index] : 0;
+ out0 = format(out0, "sw_if_index %d: add %lu - del %lu = %lu\n", sw_if_index, n_adds, n_dels, n_adds - n_dels);
+ }));
+ vlib_cli_output(vm, "\n\n%s\n\n", out0);
+ vlib_cli_output(vm, "Sessions per interval: min %lu max %lu increment: %f ms current: %f ms",
+ am->fa_min_deleted_sessions_per_interval, am->fa_max_deleted_sessions_per_interval,
+ am->fa_cleaner_wait_time_increment * 1000.0, ((f64)am->fa_current_cleaner_timer_wait_interval) * 1000.0/(f64)vm->clib_time.clocks_per_second);
+ vec_free(out0);
+ }
+ return error;
+}
+
+
+ /* *INDENT-OFF* */
+VLIB_CLI_COMMAND (aclplugin_set_command, static) = {
+ .path = "set acl-plugin",
+ .short_help = "set acl-plugin session timeout {{udp idle}|tcp {idle|transient}} <seconds>",
+ .function = acl_set_aclplugin_fn,
+};
+
+VLIB_CLI_COMMAND (aclplugin_show_command, static) = {
+ .path = "show acl-plugin",
+ .short_help = "show acl-plugin sessions",
+ .function = acl_show_aclplugin_fn,
+};
+/* *INDENT-ON* */
+
static clib_error_t *
@@ -1874,6 +2168,31 @@ acl_init (vlib_main_t * vm)
vec_free (name);
+ acl_setup_fa_nodes();
+ am->session_timeout_sec[ACL_TIMEOUT_TCP_TRANSIENT] = TCP_SESSION_TRANSIENT_TIMEOUT_SEC;
+ am->session_timeout_sec[ACL_TIMEOUT_TCP_IDLE] = TCP_SESSION_IDLE_TIMEOUT_SEC;
+ am->session_timeout_sec[ACL_TIMEOUT_UDP_IDLE] = UDP_SESSION_IDLE_TIMEOUT_SEC;
+
+ am->fa_conn_table_hash_num_buckets = ACL_FA_CONN_TABLE_DEFAULT_HASH_NUM_BUCKETS;
+ am->fa_conn_table_hash_memory_size = ACL_FA_CONN_TABLE_DEFAULT_HASH_MEMORY_SIZE;
+ am->fa_conn_table_max_entries = ACL_FA_CONN_TABLE_DEFAULT_MAX_ENTRIES;
+
+ {
+ u8 tt;
+ for(tt = 0; tt < ACL_N_TIMEOUTS; tt++) {
+ am->fa_conn_list_head[tt] = ~0;
+ am->fa_conn_list_tail[tt] = ~0;
+ }
+ }
+
+ am->fa_min_deleted_sessions_per_interval = ACL_FA_DEFAULT_MIN_DELETED_SESSIONS_PER_INTERVAL;
+ am->fa_max_deleted_sessions_per_interval = ACL_FA_DEFAULT_MAX_DELETED_SESSIONS_PER_INTERVAL;
+ am->fa_cleaner_wait_time_increment = ACL_FA_DEFAULT_CLEANER_WAIT_TIME_INCREMENT;
+
+#define _(N, v, s) am->fa_ipv6_known_eh_bitmap = clib_bitmap_set(am->fa_ipv6_known_eh_bitmap, v, 1);
+ foreach_acl_eh
+#undef _
+
return error;
}
diff --git a/src/plugins/acl/acl.h b/src/plugins/acl/acl.h
index 0252ff388d8..47523636d48 100644
--- a/src/plugins/acl/acl.h
+++ b/src/plugins/acl/acl.h
@@ -22,10 +22,13 @@
#include <vppinfra/hash.h>
#include <vppinfra/error.h>
+#include <vppinfra/bitmap.h>
#include <vppinfra/elog.h>
+#include "bihash_40_8.h"
+#include "fa_node.h"
#define ACL_PLUGIN_VERSION_MAJOR 1
-#define ACL_PLUGIN_VERSION_MINOR 1
+#define ACL_PLUGIN_VERSION_MINOR 2
extern vlib_node_registration_t acl_in_node;
extern vlib_node_registration_t acl_out_node;
@@ -33,6 +36,14 @@ extern vlib_node_registration_t acl_out_node;
void input_acl_packet_match(u32 sw_if_index, vlib_buffer_t * b0, u32 *nextp, u32 *acl_match_p, u32 *rule_match_p, u32 *trace_bitmap);
void output_acl_packet_match(u32 sw_if_index, vlib_buffer_t * b0, u32 *nextp, u32 *acl_match_p, u32 *rule_match_p, u32 *trace_bitmap);
+enum acl_timeout_e {
+ ACL_TIMEOUT_UDP_IDLE = 0,
+ ACL_TIMEOUT_TCP_IDLE,
+ ACL_TIMEOUT_TCP_TRANSIENT,
+ ACL_N_TIMEOUTS
+};
+
+
enum address_e { IP4, IP6 };
typedef struct
{
@@ -118,8 +129,8 @@ typedef struct {
u32 *macip_acl_by_sw_if_index;
/* next indices for our nodes in the l2-classify tables */
- u32 l2_input_classify_next_acl;
- u32 l2_output_classify_next_acl;
+ u32 l2_input_classify_next_acl_old;
+ u32 l2_output_classify_next_acl_old;
/* next node indices for feature bitmap */
u32 acl_in_node_feat_next_node_index[32];
@@ -133,12 +144,117 @@ typedef struct {
u32 acl_out_ip6_match_next[256];
u32 n_match_actions;
+ /* bitmaps when set the processing is enabled on the interface */
+ uword *fa_in_acl_on_sw_if_index;
+ uword *fa_out_acl_on_sw_if_index;
+ /* bitmap, when set the hash is initialized */
+ uword *fa_sessions_on_sw_if_index;
+ clib_bihash_40_8_t *fa_sessions_by_sw_if_index;
+ /* pool for FA session data. See fa_node.h */
+ fa_session_t *fa_sessions_pool;
+ /* The process node which is responsible to deleting the sessions */
+ u32 fa_cleaner_node_index;
+ /* FA session timeouts, in seconds */
+ u32 session_timeout_sec[ACL_N_TIMEOUTS];
+ /* session add/delete counters */
+ u64 *fa_session_adds_by_sw_if_index;
+ u64 *fa_session_dels_by_sw_if_index;
+
+ /* L2 datapath glue */
+
+ /* active next indices within L2 classifiers - switch old/new path */
+ u32 l2_input_classify_next_acl_ip4;
+ u32 l2_input_classify_next_acl_ip6;
+ u32 l2_output_classify_next_acl_ip4;
+ u32 l2_output_classify_next_acl_ip6;
+ /* saved next indices within L2 classifiers for ip4/ip6 fa L2 nodes */
+ u32 fa_l2_input_classify_next_acl_ip4;
+ u32 fa_l2_input_classify_next_acl_ip6;
+ u32 fa_l2_output_classify_next_acl_ip4;
+ u32 fa_l2_output_classify_next_acl_ip6;
+ /* next node indices for L2 dispatch */
+ u32 fa_acl_in_ip4_l2_node_feat_next_node_index[32];
+ u32 fa_acl_in_ip6_l2_node_feat_next_node_index[32];
+ u32 fa_acl_out_ip4_l2_node_feat_next_node_index[32];
+ u32 fa_acl_out_ip6_l2_node_feat_next_node_index[32];
+
+ /* EH values that we can skip over */
+ uword *fa_ipv6_known_eh_bitmap;
+
+ /* conn table per-interface conn table parameters */
+ u32 fa_conn_table_hash_num_buckets;
+ uword fa_conn_table_hash_memory_size;
+ u64 fa_conn_table_max_entries;
+
+ /*
+ * If the cleaner has to delete more than this number
+ * of connections, it halves the sleep time.
+ */
+
+#define ACL_FA_DEFAULT_MAX_DELETED_SESSIONS_PER_INTERVAL 100
+ u64 fa_max_deleted_sessions_per_interval;
+
+ /*
+ * If the cleaner deletes less than these connections,
+ * it increases the wait time by the "increment"
+ */
+
+#define ACL_FA_DEFAULT_MIN_DELETED_SESSIONS_PER_INTERVAL 1
+ u64 fa_min_deleted_sessions_per_interval;
+
+#define ACL_FA_DEFAULT_CLEANER_WAIT_TIME_INCREMENT 0.1
+ f64 fa_cleaner_wait_time_increment;
+
+ u64 fa_current_cleaner_timer_wait_interval;
+ u32 fa_conn_list_head[ACL_N_TIMEOUTS];
+ u32 fa_conn_list_tail[ACL_N_TIMEOUTS];
+
/* convenience */
vlib_main_t * vlib_main;
vnet_main_t * vnet_main;
} acl_main_t;
+#define foreach_acl_eh \
+ _(HOPBYHOP , 0 , "IPv6ExtHdrHopByHop") \
+ _(ROUTING , 43 , "IPv6ExtHdrRouting") \
+ _(DESTOPT , 60 , "IPv6ExtHdrDestOpt") \
+ _(MOBILITY , 135, "Mobility Header") \
+ _(HIP , 139, "Experimental use Host Identity Protocol") \
+ _(SHIM6 , 140, "Shim6 Protocol") \
+ _(EXP1 , 253, "Use for experimentation and testing") \
+ _(EXP2 , 254, "Use for experimentation and testing")
+
+/*
+
+ "No Next Header" is not a header.
+ Also, Fragment header needs special processing.
+
+ _(NONEXT , 59 , "NoNextHdr") \
+ _(FRAGMENT , 44 , "IPv6ExtHdrFragment") \
+
+
+ESP is hiding its internal format, so no point in trying to go past it.
+
+ _(ESP , 50 , "EncapsulatingSecurityPayload") \
+
+
+AH has a special treatment of its length, it is in 32-bit words, not 64-bit words like the rest.
+
+ _(AUTH , 51 , "Authentication Header") \
+
+
+*/
+
+
+ typedef enum {
+ #define _(N, v, s) ACL_EH_##N = v,
+ foreach_acl_eh
+ #undef _
+ } acl_eh_t;
+
+
+
extern acl_main_t acl_main;
diff --git a/src/plugins/acl/bihash_40_8.h b/src/plugins/acl/bihash_40_8.h
new file mode 100644
index 00000000000..ba3dfbeaaf0
--- /dev/null
+++ b/src/plugins/acl/bihash_40_8.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#undef BIHASH_TYPE
+
+#define BIHASH_TYPE _40_8
+#define BIHASH_KVP_PER_PAGE 4
+
+#ifndef __included_bihash_40_8_h__
+#define __included_bihash_40_8_h__
+
+#include <vppinfra/heap.h>
+#include <vppinfra/format.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/xxhash.h>
+
+typedef struct
+{
+ u64 key[5];
+ u64 value;
+} clib_bihash_kv_40_8_t;
+
+static inline int
+clib_bihash_is_free_40_8 (const clib_bihash_kv_40_8_t * v)
+{
+ /* Free values are memset to 0xff, check a bit... */
+ if (v->key[0] == ~0ULL && v->value == ~0ULL)
+ return 1;
+ return 0;
+}
+
+static inline u64
+clib_bihash_hash_40_8 (const clib_bihash_kv_40_8_t * v)
+{
+#if __SSE4_2__
+ u32 value = 0;
+ value = _mm_crc32_u64 (value, v->key[0]);
+ value = _mm_crc32_u64 (value, v->key[1]);
+ value = _mm_crc32_u64 (value, v->key[2]);
+ value = _mm_crc32_u64 (value, v->key[3]);
+ value = _mm_crc32_u64 (value, v->key[4]);
+ return value;
+#else
+ u64 tmp = v->key[0] ^ v->key[1] ^ v->key[2] ^ v->key[3] ^ v->key[4];
+ return clib_xxhash (tmp);
+#endif
+}
+
+static inline u8 *
+format_bihash_kvp_40_8 (u8 * s, va_list * args)
+{
+ clib_bihash_kv_40_8_t *v = va_arg (*args, clib_bihash_kv_40_8_t *);
+
+ s = format (s, "key %llu %llu %llu %llu %llu value %llu",
+ v->key[0], v->key[1], v->key[2], v->key[3], v->key[4],
+ v->value);
+ return s;
+}
+
+static inline int
+clib_bihash_key_compare_40_8 (const u64 * a, const u64 * b)
+{
+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2]) | (a[3] ^ b[3]) |
+ (a[4] ^ b[4])) == 0;
+}
+
+#undef __included_bihash_template_h__
+#include <vppinfra/bihash_template.h>
+
+#endif /* __included_bihash_40_8_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/acl/fa_node.c b/src/plugins/acl/fa_node.c
new file mode 100644
index 00000000000..ac619a7298b
--- /dev/null
+++ b/src/plugins/acl/fa_node.c
@@ -0,0 +1,1444 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <stddef.h>
+#include <netinet/in.h>
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vppinfra/error.h>
+#include <acl/acl.h>
+#include "bihash_40_8.h"
+
+#include <vppinfra/bihash_template.h>
+#include <vppinfra/bihash_template.c>
+
+#include "fa_node.h"
+
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u32 match_acl_in_index;
+ u32 match_rule_index;
+ u64 packet_info[6];
+ u32 trace_bitmap;
+ u8 action;
+} acl_fa_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_acl_fa_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ acl_fa_trace_t *t = va_arg (*args, acl_fa_trace_t *);
+
+ s =
+ format (s,
+ "acl-plugin: sw_if_index %d, next index %d, action: %d, match: acl %d rule %d trace_bits %08x\n"
+ " pkt info %016llx %016llx %016llx %016llx %016llx %016llx",
+ t->sw_if_index, t->next_index, t->action, t->match_acl_in_index,
+ t->match_rule_index, t->trace_bitmap,
+ t->packet_info[0], t->packet_info[1], t->packet_info[2],
+ t->packet_info[3], t->packet_info[4], t->packet_info[5]);
+ return s;
+}
+
+/* *INDENT-OFF* */
+#define foreach_acl_fa_error \
+_(ACL_DROP, "ACL deny packets") \
+_(ACL_PERMIT, "ACL permit packets") \
+_(ACL_NEW_SESSION, "new sessions added") \
+_(ACL_EXIST_SESSION, "existing session packets") \
+_(ACL_CHECK, "checked packets") \
+_(ACL_RESTART_SESSION_TIMER, "restart session timer") \
+_(ACL_TOO_MANY_SESSIONS, "too many sessions to add new") \
+/* end of errors */
+
+typedef enum
+{
+#define _(sym,str) ACL_FA_ERROR_##sym,
+ foreach_acl_fa_error
+#undef _
+ ACL_FA_N_ERROR,
+} acl_fa_error_t;
+
+static char *acl_fa_error_strings[] = {
+#define _(sym,string) string,
+ foreach_acl_fa_error
+#undef _
+};
+/* *INDENT-ON* */
+
+static void *
+get_ptr_to_offset (vlib_buffer_t * b0, int offset)
+{
+ u8 *p = vlib_buffer_get_current (b0) + offset;
+ return p;
+}
+
+
+static int
+fa_acl_match_addr (ip46_address_t * addr1, ip46_address_t * addr2,
+ int prefixlen, int is_ip6)
+{
+ if (prefixlen == 0)
+ {
+ /* match any always succeeds */
+ return 1;
+ }
+ if (is_ip6)
+ {
+ if (memcmp (addr1, addr2, prefixlen / 8))
+ {
+ /* If the starting full bytes do not match, no point in bittwidling the thumbs further */
+ return 0;
+ }
+ if (prefixlen % 8)
+ {
+ u8 b1 = *((u8 *) addr1 + 1 + prefixlen / 8);
+ u8 b2 = *((u8 *) addr2 + 1 + prefixlen / 8);
+ u8 mask0 = (0xff - ((1 << (8 - (prefixlen % 8))) - 1));
+ return (b1 & mask0) == b2;
+ }
+ else
+ {
+ /* The prefix fits into integer number of bytes, so nothing left to do */
+ return 1;
+ }
+ }
+ else
+ {
+ uint32_t a1 = ntohl (addr1->ip4.as_u32);
+ uint32_t a2 = ntohl (addr2->ip4.as_u32);
+ uint32_t mask0 = 0xffffffff - ((1 << (32 - prefixlen)) - 1);
+ return (a1 & mask0) == a2;
+ }
+}
+
+static int
+fa_acl_match_port (u16 port, u16 port_first, u16 port_last, int is_ip6)
+{
+ return ((port >= port_first) && (port <= port_last));
+}
+
+int
+acl_match_5tuple (acl_main_t * am, u32 acl_index, fa_5tuple_t * pkt_5tuple,
+ int is_ip6, u8 * r_action, u32 * r_acl_match_p,
+ u32 * r_rule_match_p, u32 * trace_bitmap)
+{
+ int i;
+ acl_list_t *a;
+ acl_rule_t *r;
+
+ if (pool_is_free_index (am->acls, acl_index))
+ {
+ if (r_acl_match_p)
+ *r_acl_match_p = acl_index;
+ if (r_rule_match_p)
+ *r_rule_match_p = -1;
+ /* the ACL does not exist but is used for policy. Block traffic. */
+ return 0;
+ }
+ a = am->acls + acl_index;
+ for (i = 0; i < a->count; i++)
+ {
+ r = a->rules + i;
+ if (is_ip6 != r->is_ipv6)
+ {
+ continue;
+ }
+ if (!fa_acl_match_addr
+ (&pkt_5tuple->addr[1], &r->dst, r->dst_prefixlen, is_ip6))
+ continue;
+
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning
+ ("ACL_FA_NODE_DBG acl %d rule %d pkt dst addr %U match rule addr %U/%d",
+ acl_index, i, format_ip46_address, &pkt_5tuple->addr[1],
+ IP46_TYPE_ANY, format_ip46_address, &r->dst, IP46_TYPE_ANY,
+ r->dst_prefixlen);
+#endif
+
+ if (!fa_acl_match_addr
+ (&pkt_5tuple->addr[0], &r->src, r->src_prefixlen, is_ip6))
+ continue;
+
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning
+ ("ACL_FA_NODE_DBG acl %d rule %d pkt src addr %U match rule addr %U/%d",
+ acl_index, i, format_ip46_address, &pkt_5tuple->addr[0],
+ IP46_TYPE_ANY, format_ip46_address, &r->src, IP46_TYPE_ANY,
+ r->src_prefixlen);
+ clib_warning
+ ("ACL_FA_NODE_DBG acl %d rule %d trying to match pkt proto %d with rule %d",
+ acl_index, i, pkt_5tuple->l4.proto, r->proto);
+#endif
+ if (r->proto)
+ {
+ if (pkt_5tuple->l4.proto != r->proto)
+ continue;
+ /* A sanity check just to ensure what we jave just matched was a valid L4 extracted from the packet */
+ if (PREDICT_FALSE (!pkt_5tuple->pkt.l4_valid))
+ continue;
+
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning
+ ("ACL_FA_NODE_DBG acl %d rule %d pkt proto %d match rule %d",
+ acl_index, i, pkt_5tuple->l4.proto, r->proto);
+#endif
+
+ if (!fa_acl_match_port
+ (pkt_5tuple->l4.port[0], r->src_port_or_type_first,
+ r->src_port_or_type_last, is_ip6))
+ continue;
+
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning
+ ("ACL_FA_NODE_DBG acl %d rule %d pkt sport %d match rule [%d..%d]",
+ acl_index, i, pkt_5tuple->l4.port[0], r->src_port_or_type_first,
+ r->src_port_or_type_last);
+#endif
+
+ if (!fa_acl_match_port
+ (pkt_5tuple->l4.port[1], r->dst_port_or_code_first,
+ r->dst_port_or_code_last, is_ip6))
+ continue;
+
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning
+ ("ACL_FA_NODE_DBG acl %d rule %d pkt dport %d match rule [%d..%d]",
+ acl_index, i, pkt_5tuple->l4.port[1], r->dst_port_or_code_first,
+ r->dst_port_or_code_last);
+#endif
+ if (pkt_5tuple->pkt.tcp_flags_valid
+ && ((pkt_5tuple->pkt.tcp_flags & r->tcp_flags_mask) !=
+ r->tcp_flags_value))
+ continue;
+ }
+ /* everything matches! */
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning ("ACL_FA_NODE_DBG acl %d rule %d FULL-MATCH, action %d",
+ acl_index, i, r->is_permit);
+#endif
+ *r_action = r->is_permit;
+ if (r_acl_match_p)
+ *r_acl_match_p = acl_index;
+ if (r_rule_match_p)
+ *r_rule_match_p = i;
+ return 1;
+ }
+ return 0;
+}
+
+static u8
+full_acl_match_5tuple (u32 sw_if_index, fa_5tuple_t * pkt_5tuple, int is_l2,
+ int is_ip6, int is_input, u32 * acl_match_p,
+ u32 * rule_match_p, u32 * trace_bitmap)
+{
+ acl_main_t *am = &acl_main;
+ int i;
+ u32 *acl_vector;
+ u8 action = 0;
+
+ if (is_input)
+ {
+ vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index);
+ acl_vector = am->input_acl_vec_by_sw_if_index[sw_if_index];
+ }
+ else
+ {
+ vec_validate (am->output_acl_vec_by_sw_if_index, sw_if_index);
+ acl_vector = am->output_acl_vec_by_sw_if_index[sw_if_index];
+ }
+ for (i = 0; i < vec_len (acl_vector); i++)
+ {
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning ("ACL_FA_NODE_DBG: Trying to match ACL: %d",
+ acl_vector[i]);
+#endif
+ if (acl_match_5tuple
+ (am, acl_vector[i], pkt_5tuple, is_ip6, &action,
+ acl_match_p, rule_match_p, trace_bitmap))
+ {
+ return action;
+ }
+ }
+ if (vec_len (acl_vector) > 0)
+ {
+ /* If there are ACLs and none matched, deny by default */
+ return 0;
+ }
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning ("ACL_FA_NODE_DBG: No ACL on sw_if_index %d", sw_if_index);
+#endif
+ /* Deny by default. If there are no ACLs defined we should not be here. */
+ return 0;
+}
+
+static int
+offset_within_packet (vlib_buffer_t * b0, int offset)
+{
+ /* For the purposes of this code, "within" means we have at least 8 bytes after it */
+ return (offset < (b0->current_length - 8));
+}
+
+static void
+acl_fill_5tuple (acl_main_t * am, vlib_buffer_t * b0, int is_ip6,
+ int is_input, int is_l2_path, fa_5tuple_t * p5tuple_pkt)
+{
+ int l3_offset = 14;
+ int l4_offset;
+ u16 ports[2];
+ u16 proto;
+ /* IP4 and IP6 protocol numbers of ICMP */
+ static u8 icmp_protos[] = { IP_PROTOCOL_ICMP, IP_PROTOCOL_ICMP6 };
+
+ if (is_input && !(is_l2_path))
+ {
+ l3_offset = 0;
+ }
+
+
+ if (is_ip6)
+ {
+ clib_memcpy (&p5tuple_pkt->addr,
+ get_ptr_to_offset (b0,
+ offsetof (ip6_header_t,
+ src_address) + l3_offset),
+ sizeof (p5tuple_pkt->addr));
+ proto =
+ *(u8 *) get_ptr_to_offset (b0,
+ offsetof (ip6_header_t,
+ protocol) + l3_offset);
+ l4_offset = l3_offset + sizeof (ip6_header_t);
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning ("ACL_FA_NODE_DBG: proto: %d, l4_offset: %d", proto,
+ l4_offset);
+#endif
+ /* IP6 EH handling is here, increment l4_offset if needs to, update the proto */
+ int need_skip_eh = clib_bitmap_get (am->fa_ipv6_known_eh_bitmap, proto);
+ if (PREDICT_FALSE (need_skip_eh))
+ {
+ /* FIXME: add fragment header special handling. Currently causes treated as unknown header. */
+ while (need_skip_eh && offset_within_packet (b0, l4_offset))
+ {
+ u8 nwords = *(u8 *) get_ptr_to_offset (b0, 1 + l4_offset);
+ proto = *(u8 *) get_ptr_to_offset (b0, l4_offset);
+ l4_offset += 8 * (1 + (u16) nwords);
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning ("ACL_FA_NODE_DBG: new proto: %d, new offset: %d",
+ proto, l4_offset);
+#endif
+ need_skip_eh =
+ clib_bitmap_get (am->fa_ipv6_known_eh_bitmap, proto);
+ }
+ }
+ }
+ else
+ {
+ p5tuple_pkt->kv.key[0] = 0;
+ p5tuple_pkt->kv.key[1] = 0;
+ p5tuple_pkt->kv.key[2] = 0;
+ p5tuple_pkt->kv.key[3] = 0;
+ clib_memcpy (&p5tuple_pkt->addr[0].ip4,
+ get_ptr_to_offset (b0,
+ offsetof (ip4_header_t,
+ src_address) + l3_offset),
+ sizeof (p5tuple_pkt->addr[0].ip4));
+ clib_memcpy (&p5tuple_pkt->addr[1].ip4,
+ get_ptr_to_offset (b0,
+ offsetof (ip4_header_t,
+ dst_address) + l3_offset),
+ sizeof (p5tuple_pkt->addr[1].ip4));
+ proto =
+ *(u8 *) get_ptr_to_offset (b0,
+ offsetof (ip4_header_t,
+ protocol) + l3_offset);
+ l4_offset = l3_offset + sizeof (ip4_header_t);
+ }
+ /* Remainder of the key and per-packet non-key data */
+ p5tuple_pkt->kv.key[4] = 0;
+ p5tuple_pkt->kv.value = 0;
+ if (PREDICT_TRUE (offset_within_packet (b0, l4_offset)))
+ {
+ p5tuple_pkt->l4.proto = proto;
+ p5tuple_pkt->pkt.l4_valid = 1;
+ if (icmp_protos[is_ip6] == proto)
+ {
+ /* type */
+ p5tuple_pkt->l4.port[0] =
+ *(u8 *) get_ptr_to_offset (b0,
+ l4_offset + offsetof (icmp46_header_t,
+ type));
+ /* code */
+ p5tuple_pkt->l4.port[1] =
+ *(u8 *) get_ptr_to_offset (b0,
+ l4_offset + offsetof (icmp46_header_t,
+ code));
+ }
+ else if ((IPPROTO_TCP == proto) || (IPPROTO_UDP == proto))
+ {
+ clib_memcpy (&ports,
+ get_ptr_to_offset (b0,
+ l4_offset + offsetof (tcp_header_t,
+ src_port)),
+ sizeof (ports));
+ p5tuple_pkt->l4.port[0] = ntohs (ports[0]);
+ p5tuple_pkt->l4.port[1] = ntohs (ports[1]);
+
+ p5tuple_pkt->pkt.tcp_flags =
+ *(u8 *) get_ptr_to_offset (b0,
+ l4_offset + offsetof (tcp_header_t,
+ flags));
+ p5tuple_pkt->pkt.tcp_flags_valid = (proto == IPPROTO_TCP);
+ }
+ /*
+ * FIXME: rather than the above conditional, here could
+ * be a nice generic mechanism to extract two L4 values:
+ *
+ * have a per-protocol array of 4 elements like this:
+ * u8 offset; to take the byte from, off L4 header
+ * u8 mask; to mask it with, before storing
+ *
+ * this way we can describe UDP, TCP and ICMP[46] semantics,
+ * and add a sort of FPM-type behavior for other protocols.
+ *
+ * Of course, is it faster ? and is it needed ?
+ *
+ */
+ }
+}
+
+
+/* Session keys match the packets received, and mirror the packets sent */
+static void
+acl_make_5tuple_session_key (int is_input, fa_5tuple_t * p5tuple_pkt,
+ fa_5tuple_t * p5tuple_sess)
+{
+ int src_index = is_input ? 0 : 1;
+ int dst_index = is_input ? 1 : 0;
+ p5tuple_sess->addr[src_index] = p5tuple_pkt->addr[0];
+ p5tuple_sess->addr[dst_index] = p5tuple_pkt->addr[1];
+ p5tuple_sess->l4.as_u64 = p5tuple_pkt->l4.as_u64;
+ p5tuple_sess->l4.port[src_index] = p5tuple_pkt->l4.port[0];
+ p5tuple_sess->l4.port[dst_index] = p5tuple_pkt->l4.port[1];
+}
+
+
+static int
+acl_fa_ifc_has_sessions (acl_main_t * am, int sw_if_index0)
+{
+ int has_sessions =
+ clib_bitmap_get (am->fa_sessions_on_sw_if_index, sw_if_index0);
+ return has_sessions;
+}
+
+static int
+acl_fa_ifc_has_in_acl (acl_main_t * am, int sw_if_index0)
+{
+ int it_has = clib_bitmap_get (am->fa_in_acl_on_sw_if_index, sw_if_index0);
+ return it_has;
+}
+
+static int
+acl_fa_ifc_has_out_acl (acl_main_t * am, int sw_if_index0)
+{
+ int it_has = clib_bitmap_get (am->fa_out_acl_on_sw_if_index, sw_if_index0);
+ return it_has;
+}
+
+
+static int
+fa_session_get_timeout_type (acl_main_t * am, fa_session_t * sess)
+{
+ /* seen both SYNs and ACKs but not FINs means we are in establshed state */
+ u16 masked_flags =
+ sess->tcp_flags_seen.as_u16 & ((TCP_FLAGS_RSTFINACKSYN << 8) +
+ TCP_FLAGS_RSTFINACKSYN);
+ switch (sess->info.l4.proto)
+ {
+ case IPPROTO_TCP:
+ if (((TCP_FLAGS_ACKSYN << 8) + TCP_FLAGS_ACKSYN) == masked_flags)
+ {
+ return ACL_TIMEOUT_TCP_IDLE;
+ }
+ else
+ {
+ return ACL_TIMEOUT_TCP_TRANSIENT;
+ }
+ break;
+ case IPPROTO_UDP:
+ return ACL_TIMEOUT_UDP_IDLE;
+ break;
+ default:
+ return ACL_TIMEOUT_UDP_IDLE;
+ }
+}
+
+
+static u64
+fa_session_get_timeout (acl_main_t * am, fa_session_t * sess)
+{
+ u64 timeout = am->vlib_main->clib_time.clocks_per_second;
+ int timeout_type = fa_session_get_timeout_type (am, sess);
+ timeout *= am->session_timeout_sec[timeout_type];
+ return timeout;
+}
+
+static void
+acl_fa_ifc_init_sessions (acl_main_t * am, int sw_if_index0)
+{
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning
+ ("Initializing bihash for sw_if_index %d num buckets %lu memory size %llu",
+ sw_if_index0, am->fa_conn_table_hash_num_buckets,
+ am->fa_conn_table_hash_memory_size);
+#endif
+ vec_validate (am->fa_sessions_by_sw_if_index, sw_if_index0);
+ BV (clib_bihash_init) (&am->fa_sessions_by_sw_if_index
+ [sw_if_index0], "ACL plugin FA session bihash",
+ am->fa_conn_table_hash_num_buckets,
+ am->fa_conn_table_hash_memory_size);
+ am->fa_sessions_on_sw_if_index =
+ clib_bitmap_set (am->fa_sessions_on_sw_if_index, sw_if_index0, 1);
+}
+
+static void
+acl_fa_conn_list_add_session (acl_main_t * am, u32 sess_id)
+{
+ fa_session_t *sess = am->fa_sessions_pool + sess_id;
+ u8 list_id = fa_session_get_timeout_type(am, sess);
+ sess->link_list_id = list_id;
+ sess->link_next_idx = ~0;
+ sess->link_prev_idx = am->fa_conn_list_tail[list_id];
+ if (~0 != am->fa_conn_list_tail[list_id]) {
+ fa_session_t *prev_sess = am->fa_sessions_pool + am->fa_conn_list_tail[list_id];
+ prev_sess->link_next_idx = sess_id;
+ }
+ am->fa_conn_list_tail[list_id] = sess_id;
+
+ if (~0 == am->fa_conn_list_head[list_id]) {
+ am->fa_conn_list_head[list_id] = sess_id;
+ }
+}
+
+static void
+acl_fa_conn_list_delete_session (acl_main_t *am, u32 sess_id)
+{
+ fa_session_t *sess = am->fa_sessions_pool + sess_id;
+ if (~0 != sess->link_prev_idx) {
+ fa_session_t *prev_sess = am->fa_sessions_pool + sess->link_prev_idx;
+ prev_sess->link_next_idx = sess->link_next_idx;
+ if (prev_sess->link_list_id != sess->link_list_id)
+ clib_warning("(prev_sess->link_list_id != sess->link_list_id)");
+ }
+ if (~0 != sess->link_next_idx) {
+ fa_session_t *next_sess = am->fa_sessions_pool + sess->link_next_idx;
+ next_sess->link_prev_idx = sess->link_prev_idx;
+ if (next_sess->link_list_id != sess->link_list_id)
+ clib_warning("(next_sess->link_list_id != sess->link_list_id)");
+ }
+ if (am->fa_conn_list_head[sess->link_list_id] == sess_id) {
+ am->fa_conn_list_head[sess->link_list_id] = sess->link_next_idx;
+ }
+ if (am->fa_conn_list_tail[sess->link_list_id] == sess_id) {
+ am->fa_conn_list_tail[sess->link_list_id] = sess->link_next_idx;
+ }
+}
+
+
+int
+acl_fa_session_is_dead (acl_main_t * am, u32 sw_if_index, u64 now,
+ u32 sess_id)
+{
+ return 0;
+}
+
+static void
+acl_fa_restart_timer_for_session (acl_main_t * am, u64 now, u32 sess_id)
+{
+ // fa_session_t *sess = am->fa_sessions_pool + sess_id;
+ acl_fa_conn_list_delete_session(am, sess_id);
+ acl_fa_conn_list_add_session(am, sess_id);
+}
+
+
+static u8
+acl_fa_track_session (acl_main_t * am, int is_input, u32 sw_if_index, u64 now,
+ fa_session_t * sess, fa_5tuple_t * pkt_5tuple)
+{
+ sess->last_active_time = now;
+ if (pkt_5tuple->pkt.tcp_flags_valid)
+ {
+ sess->tcp_flags_seen.as_u8[is_input] |= pkt_5tuple->pkt.tcp_flags;
+ }
+ return 3;
+}
+
+
+static void
+acl_fa_delete_session (acl_main_t * am, u32 sw_if_index, u32 sess_id)
+{
+ fa_session_t *sess = (fa_session_t *) am->fa_sessions_pool + sess_id;
+ BV (clib_bihash_add_del) (&am->fa_sessions_by_sw_if_index[sw_if_index],
+ &sess->info.kv, 0);
+ pool_put_index (am->fa_sessions_pool, sess_id);
+ /* Deleting from timer wheel not needed, as the cleaner deals with the timers. */
+ vec_validate (am->fa_session_dels_by_sw_if_index, sw_if_index);
+ am->fa_session_dels_by_sw_if_index[sw_if_index]++;
+}
+
+static int
+acl_fa_can_add_session (acl_main_t * am, int is_input, u32 sw_if_index)
+{
+ u64 curr_sess;
+ vec_validate (am->fa_session_adds_by_sw_if_index, sw_if_index);
+ vec_validate (am->fa_session_dels_by_sw_if_index, sw_if_index);
+ curr_sess =
+ am->fa_session_adds_by_sw_if_index[sw_if_index] -
+ am->fa_session_dels_by_sw_if_index[sw_if_index];
+ return (curr_sess < am->fa_conn_table_max_entries);
+}
+
+always_inline void
+acl_fa_try_recycle_session (acl_main_t * am, int is_input, u32 sw_if_index)
+{
+ /* try to recycle a TCP transient session */
+ u8 timeout_type = ACL_TIMEOUT_TCP_TRANSIENT;
+ u32 sess_id = am->fa_conn_list_head[timeout_type];
+ if (~0 != sess_id) {
+ acl_fa_conn_list_delete_session(am, sess_id);
+ acl_fa_delete_session(am, sw_if_index, sess_id);
+ }
+}
+
+static void
+acl_fa_add_session (acl_main_t * am, int is_input, u32 sw_if_index, u64 now,
+ fa_5tuple_t * p5tuple)
+{
+ clib_bihash_kv_40_8_t *pkv = &p5tuple->kv;
+ clib_bihash_kv_40_8_t kv;
+ u32 sess_id;
+ fa_session_t *sess;
+
+ pool_get (am->fa_sessions_pool, sess);
+ sess_id = sess - am->fa_sessions_pool;
+
+
+ kv.key[0] = pkv->key[0];
+ kv.key[1] = pkv->key[1];
+ kv.key[2] = pkv->key[2];
+ kv.key[3] = pkv->key[3];
+ kv.key[4] = pkv->key[4];
+ kv.value = sess_id;
+
+ memcpy (sess, pkv, sizeof (pkv->key));
+ sess->last_active_time = now;
+ sess->sw_if_index = sw_if_index;
+ sess->tcp_flags_seen.as_u16 = 0;
+ sess->reserved1 = 0;
+ sess->link_list_id = ~0;
+ sess->link_prev_idx = ~0;
+ sess->link_next_idx = ~0;
+
+
+
+ if (!acl_fa_ifc_has_sessions (am, sw_if_index))
+ {
+ acl_fa_ifc_init_sessions (am, sw_if_index);
+ }
+
+ BV (clib_bihash_add_del) (&am->fa_sessions_by_sw_if_index[sw_if_index],
+ &kv, 1);
+ acl_fa_conn_list_add_session(am, sess_id);
+
+ vec_validate (am->fa_session_adds_by_sw_if_index, sw_if_index);
+ am->fa_session_adds_by_sw_if_index[sw_if_index]++;
+}
+
+static int
+acl_fa_find_session (acl_main_t * am, u32 sw_if_index0, fa_5tuple_t * p5tuple,
+ clib_bihash_kv_40_8_t * pvalue_sess)
+{
+ return (BV (clib_bihash_search)
+ (&am->fa_sessions_by_sw_if_index[sw_if_index0], &p5tuple->kv,
+ pvalue_sess) == 0);
+}
+
+
+always_inline uword
+acl_fa_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame, int is_ip6,
+ int is_input, int is_l2_path, u32 * l2_feat_next_node_index,
+ vlib_node_registration_t * acl_fa_node)
+{
+ u32 n_left_from, *from, *to_next;
+ acl_fa_next_t next_index;
+ u32 pkts_acl_checked = 0;
+ u32 pkts_new_session = 0;
+ u32 pkts_exist_session = 0;
+ u32 pkts_acl_permit = 0;
+ u32 pkts_restart_session_timer = 0;
+ u32 trace_bitmap = 0;
+ u32 feature_bitmap0;
+ acl_main_t *am = &acl_main;
+ fa_5tuple_t fa_5tuple, kv_sess;
+ clib_bihash_kv_40_8_t value_sess;
+ vlib_node_runtime_t *error_node;
+ u64 now = clib_cpu_time_now ();
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ error_node = vlib_node_get_runtime (vm, acl_fa_node->index);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0 = 0;
+ u8 action = 0;
+ u32 sw_if_index0;
+ int acl_check_needed = 1;
+ u32 match_acl_in_index = ~0;
+ u32 match_rule_index = ~0;
+ u8 error0 = 0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ if (is_input)
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ else
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ if (is_l2_path)
+ feature_bitmap0 = vnet_buffer (b0)->l2.feature_bitmap;
+
+ /*
+ * Extract the L3/L4 matching info into a 5-tuple structure,
+ * then create a session key whose layout is independent on forward or reverse
+ * direction of the packet.
+ */
+
+ acl_fill_5tuple (am, b0, is_ip6, is_input, is_l2_path, &fa_5tuple);
+ acl_make_5tuple_session_key (is_input, &fa_5tuple, &kv_sess);
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning
+ ("ACL_FA_NODE_DBG: session 5-tuple %016llx %016llx %016llx %016llx %016llx : %016llx",
+ kv_sess.kv.key[0], kv_sess.kv.key[1], kv_sess.kv.key[2],
+ kv_sess.kv.key[3], kv_sess.kv.key[4], kv_sess.kv.value);
+ clib_warning
+ ("ACL_FA_NODE_DBG: packet 5-tuple %016llx %016llx %016llx %016llx %016llx : %016llx",
+ fa_5tuple.kv.key[0], fa_5tuple.kv.key[1], fa_5tuple.kv.key[2],
+ fa_5tuple.kv.key[3], fa_5tuple.kv.key[4], fa_5tuple.kv.value);
+#endif
+
+ /* Try to match an existing session first */
+
+ if (acl_fa_ifc_has_sessions (am, sw_if_index0))
+ {
+ if (acl_fa_find_session
+ (am, sw_if_index0, &kv_sess, &value_sess))
+ {
+ trace_bitmap |= 0x80000000;
+ error0 = ACL_FA_ERROR_ACL_EXIST_SESSION;
+ // FIXME assert(value_sess.value == (0xffffffff & value_sess.value));
+ u32 sess_id = value_sess.value;
+ fa_session_t *sess = am->fa_sessions_pool + sess_id;
+ int old_timeout_type =
+ fa_session_get_timeout_type (am, sess);
+ action =
+ acl_fa_track_session (am, is_input, sw_if_index0, now,
+ sess, &fa_5tuple);
+ /* expose the session id to the tracer */
+ match_rule_index = sess_id;
+ int new_timeout_type =
+ fa_session_get_timeout_type (am, sess);
+ acl_check_needed = 0;
+ pkts_exist_session += 1;
+ /* Tracking might have changed the session timeout type, e.g. from transient to established */
+ if (PREDICT_FALSE (old_timeout_type != new_timeout_type))
+ {
+ acl_fa_restart_timer_for_session (am, now, sess_id);
+ pkts_restart_session_timer++;
+ trace_bitmap |=
+ 0x00010000 + ((0xff & old_timeout_type) << 8) +
+ (0xff & new_timeout_type);
+ }
+ }
+ }
+
+ if (acl_check_needed)
+ {
+ action =
+ full_acl_match_5tuple (sw_if_index0, &fa_5tuple, is_l2_path,
+ is_ip6, is_input, &match_acl_in_index,
+ &match_rule_index, &trace_bitmap);
+ error0 = action;
+ if (1 == action)
+ pkts_acl_permit += 1;
+ if (2 == action)
+ {
+ if (!acl_fa_can_add_session (am, is_input, sw_if_index0))
+ acl_fa_try_recycle_session (am, is_input, sw_if_index0);
+
+ if (acl_fa_can_add_session (am, is_input, sw_if_index0))
+ {
+ acl_fa_add_session (am, is_input, sw_if_index0, now,
+ &kv_sess);
+ pkts_new_session += 1;
+ }
+ else
+ {
+ action = 0;
+ error0 = ACL_FA_ERROR_ACL_TOO_MANY_SESSIONS;
+ }
+ }
+ }
+
+
+
+ if (action > 0)
+ {
+ if (is_l2_path)
+ next0 =
+ feat_bitmap_get_next_node_index (l2_feat_next_node_index,
+ feature_bitmap0);
+ else
+ vnet_feature_next (sw_if_index0, &next0, b0);
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ acl_fa_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ t->match_acl_in_index = match_acl_in_index;
+ t->match_rule_index = match_rule_index;
+ t->packet_info[0] = fa_5tuple.kv.key[0];
+ t->packet_info[1] = fa_5tuple.kv.key[1];
+ t->packet_info[2] = fa_5tuple.kv.key[2];
+ t->packet_info[3] = fa_5tuple.kv.key[3];
+ t->packet_info[4] = fa_5tuple.kv.key[4];
+ t->packet_info[5] = fa_5tuple.kv.value;
+ t->action = action;
+ t->trace_bitmap = trace_bitmap;
+ }
+
+ next0 = next0 < node->n_next_nodes ? next0 : 0;
+ if (0 == next0)
+ b0->error = error_node->errors[error0];
+
+ pkts_acl_checked += 1;
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, bi0,
+ next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, acl_fa_node->index,
+ ACL_FA_ERROR_ACL_CHECK, pkts_acl_checked);
+ vlib_node_increment_counter (vm, acl_fa_node->index,
+ ACL_FA_ERROR_ACL_PERMIT, pkts_acl_permit);
+ vlib_node_increment_counter (vm, acl_fa_node->index,
+ ACL_FA_ERROR_ACL_NEW_SESSION,
+ pkts_new_session);
+ vlib_node_increment_counter (vm, acl_fa_node->index,
+ ACL_FA_ERROR_ACL_EXIST_SESSION,
+ pkts_exist_session);
+ vlib_node_increment_counter (vm, acl_fa_node->index,
+ ACL_FA_ERROR_ACL_RESTART_SESSION_TIMER,
+ pkts_restart_session_timer);
+ return frame->n_vectors;
+}
+
+
+vlib_node_registration_t acl_in_l2_ip6_node;
+static uword
+acl_in_ip6_l2_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ acl_main_t *am = &acl_main;
+ return acl_fa_node_fn (vm, node, frame, 1, 1, 1,
+ am->fa_acl_in_ip6_l2_node_feat_next_node_index,
+ &acl_in_l2_ip6_node);
+}
+
+vlib_node_registration_t acl_in_l2_ip4_node;
+static uword
+acl_in_ip4_l2_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ acl_main_t *am = &acl_main;
+ return acl_fa_node_fn (vm, node, frame, 0, 1, 1,
+ am->fa_acl_in_ip4_l2_node_feat_next_node_index,
+ &acl_in_l2_ip4_node);
+}
+
+vlib_node_registration_t acl_out_l2_ip6_node;
+static uword
+acl_out_ip6_l2_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ acl_main_t *am = &acl_main;
+ return acl_fa_node_fn (vm, node, frame, 1, 0, 1,
+ am->fa_acl_out_ip6_l2_node_feat_next_node_index,
+ &acl_out_l2_ip6_node);
+}
+
+vlib_node_registration_t acl_out_l2_ip4_node;
+static uword
+acl_out_ip4_l2_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ acl_main_t *am = &acl_main;
+ return acl_fa_node_fn (vm, node, frame, 0, 0, 1,
+ am->fa_acl_out_ip4_l2_node_feat_next_node_index,
+ &acl_out_l2_ip4_node);
+}
+
+
+/**** L3 processing path nodes ****/
+
+
+vlib_node_registration_t acl_in_fa_ip6_node;
+static uword
+acl_in_ip6_fa_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return acl_fa_node_fn (vm, node, frame, 1, 1, 0, 0, &acl_in_fa_ip6_node);
+}
+
+vlib_node_registration_t acl_in_fa_ip4_node;
+static uword
+acl_in_ip4_fa_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return acl_fa_node_fn (vm, node, frame, 0, 1, 0, 0, &acl_in_fa_ip4_node);
+}
+
+vlib_node_registration_t acl_out_fa_ip6_node;
+static uword
+acl_out_ip6_fa_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return acl_fa_node_fn (vm, node, frame, 1, 0, 0, 0, &acl_out_fa_ip6_node);
+}
+
+vlib_node_registration_t acl_out_fa_ip4_node;
+static uword
+acl_out_ip4_fa_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return acl_fa_node_fn (vm, node, frame, 0, 0, 0, 0, &acl_out_fa_ip4_node);
+}
+
+/*
+ * This process performs all the connection clean up - both for idle connections,
+ * as well as receiving the signals to clean up the connections in case of sw_if_index deletion,
+ * or (maybe in the future) the connection deletion due to policy reasons.
+ *
+ * The previous iteration (l2sess) attempted to clean up the connections in small increments,
+ * in-band, but the problem it tried to preemptively address (process starvation) is yet to be seen.
+ *
+ * The approach with a single thread deleting the connections is simpler, thus we use it until
+ * there is a real starvation problem to solve.
+ *
+ */
+
+
+/* *INDENT-OFF* */
+#define foreach_acl_fa_cleaner_error \
+_(EVENT_CYCLE, "event processing cycle") \
+_(TIMER_RESTARTED, "restarted session timers") \
+_(DELETED_SESSIONS, "deleted sessions") \
+_(ALREADY_DELETED, "timer event for already deleted session") \
+_(DELETE_BY_SW_IF_INDEX, "delete by sw_if_index event") \
+_(DELETE_BY_SW_IF_INDEX_OK, "delete by sw_if_index completed ok") \
+_(WAIT_WITHOUT_TIMEOUT, "process waits without timeout") \
+_(WAIT_WITH_TIMEOUT, "process waits with timeout") \
+_(UNKNOWN_EVENT, "unknown event received") \
+/* end of errors */
+
+typedef enum
+{
+#define _(sym,str) ACL_FA_CLEANER_ERROR_##sym,
+ foreach_acl_fa_cleaner_error
+#undef _
+ ACL_FA_CLEANER_N_ERROR,
+} acl_fa_cleaner_error_t;
+
+static char *acl_fa_cleaner_error_strings[] = {
+#define _(sym,string) string,
+ foreach_acl_fa_cleaner_error
+#undef _
+};
+
+static int
+acl_fa_clean_sessions_by_sw_if_index (acl_main_t *am, u32 sw_if_index, u32 *count)
+{
+
+ int undeleted = 0;
+ fa_session_t *sess;
+ uword *dv = NULL;
+ uword *ii;
+
+ pool_foreach(sess, am->fa_sessions_pool, ({
+ if ( (~0 == sw_if_index) || (sw_if_index == sess->sw_if_index) )
+ vec_add1(dv, sess-am->fa_sessions_pool);
+ }));
+ vec_foreach(ii, dv)
+ {
+ sess = pool_elt_at_index(am->fa_sessions_pool, *ii);
+ acl_fa_delete_session(am, sess->sw_if_index, *ii);
+ (*count)++;
+ }
+
+ pool_foreach(sess, am->fa_sessions_pool, ({
+ if ( (~0 == sw_if_index) || (sw_if_index == sess->sw_if_index) )
+ undeleted++;
+ }));
+ if (undeleted == 0)
+ {
+ if (~0 == sw_if_index)
+ {
+ /* FIXME: clean-up tables ? */
+ }
+ else
+ {
+ /* FIXME: clean-up tables ? */
+ }
+ }
+ return (undeleted == 0);
+}
+/* *INDENT-ON* */
+
+static vlib_node_registration_t acl_fa_session_cleaner_process_node;
+
+static int
+acl_fa_conn_has_timed_out (acl_main_t *am, u64 now, u32 session_index)
+{
+ fa_session_t *sess = am->fa_sessions_pool + session_index;
+ u64 sess_timeout_time =
+ sess->last_active_time + fa_session_get_timeout (am, sess);
+ return (sess_timeout_time < now);
+}
+
+
+static uword
+acl_fa_session_cleaner_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
+ vlib_frame_t * f)
+{
+ acl_main_t *am = &acl_main;
+ u64 now = clib_cpu_time_now ();
+ f64 cpu_cps = vm->clib_time.clocks_per_second;
+ u64 next_expire;
+ /* We should call timer wheel at least twice a second */
+ u64 max_timer_wait_interval = cpu_cps / 2;
+ am->fa_current_cleaner_timer_wait_interval = max_timer_wait_interval;
+
+ u32 *expired = NULL;
+ uword event_type, *event_data = 0;
+
+ am->fa_cleaner_node_index = acl_fa_session_cleaner_process_node.index;
+
+ while (1)
+ {
+ u32 count_deleted_sessions = 0;
+ u32 count_already_deleted = 0;
+ u32 count_timer_restarted = 0;
+ now = clib_cpu_time_now ();
+ next_expire = now + am->fa_current_cleaner_timer_wait_interval;
+
+ {
+ f64 timeout = ((i64) next_expire - (i64) now) / cpu_cps;
+ if (timeout <= 0)
+ {
+ /* skip waiting altogether */
+ event_type = ~0;
+ }
+ else
+ {
+ /* Timing wheel code is happier if it is called regularly */
+ if (timeout > 0.5)
+ timeout = 0.5;
+ vlib_node_increment_counter (vm,
+ acl_fa_session_cleaner_process_node.
+ index,
+ ACL_FA_CLEANER_ERROR_WAIT_WITH_TIMEOUT,
+ 1);
+ (void) vlib_process_wait_for_event_or_clock (vm, timeout);
+ event_type = vlib_process_get_events (vm, &event_data);
+ }
+ }
+
+ now = clib_cpu_time_now ();
+ switch (event_type)
+ {
+ case ~0:
+ /* nothing to do */
+ break;
+ case ACL_FA_CLEANER_RESCHEDULE:
+ /* Nothing to do. */
+ break;
+ case ACL_FA_CLEANER_DELETE_BY_SW_IF_INDEX:
+ {
+ uword *sw_if_index0;
+ vec_foreach (sw_if_index0, event_data)
+ {
+ vlib_node_increment_counter (vm,
+ acl_fa_session_cleaner_process_node.
+ index,
+ ACL_FA_CLEANER_ERROR_DELETE_BY_SW_IF_INDEX,
+ 1);
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning
+ ("ACL_FA_NODE_CLEAN: ACL_FA_CLEANER_DELETE_BY_SW_IF_INDEX: %d",
+ *sw_if_index0);
+#endif
+ u32 count = 0;
+ int result =
+ acl_fa_clean_sessions_by_sw_if_index (am, *sw_if_index0,
+ &count);
+ count_deleted_sessions += count;
+ vlib_node_increment_counter (vm,
+ acl_fa_session_cleaner_process_node.
+ index,
+ ACL_FA_CLEANER_ERROR_DELETE_BY_SW_IF_INDEX_OK,
+ result);
+ }
+ }
+ break;
+ default:
+#ifdef FA_NODE_VERBOSE_DEBUG
+ clib_warning ("ACL plugin connection cleaner: unknown event %u",
+ event_type);
+#endif
+ vlib_node_increment_counter (vm,
+ acl_fa_session_cleaner_process_node.
+ index,
+ ACL_FA_CLEANER_ERROR_UNKNOWN_EVENT, 1);
+ break;
+ }
+
+ {
+ u8 tt = 0;
+ for(tt = 0; tt < ACL_N_TIMEOUTS; tt++) {
+ while((vec_len(expired) < 2*am->fa_max_deleted_sessions_per_interval) && (~0 != am->fa_conn_list_head[tt]) && (acl_fa_conn_has_timed_out(am, now, am->fa_conn_list_head[tt]))) {
+ u32 sess_id = am->fa_conn_list_head[tt];
+ vec_add1(expired, sess_id);
+ acl_fa_conn_list_delete_session(am, sess_id);
+ }
+ }
+ }
+
+
+ u32 *psid = NULL;
+ vec_foreach (psid, expired)
+ {
+ u32 session_index = *psid;
+ if (!pool_is_free_index (am->fa_sessions_pool, session_index))
+ {
+ fa_session_t *sess = am->fa_sessions_pool + session_index;
+ u32 sw_if_index = sess->sw_if_index;
+ u64 sess_timeout_time =
+ sess->last_active_time + fa_session_get_timeout (am, sess);
+ if (now < sess_timeout_time)
+ {
+ /* clib_warning ("ACL_FA_NODE_CLEAN: Restarting timer for session %d",
+ (int) session_index); */
+
+ /* Pretend we did this in the past, at last_active moment */
+ count_timer_restarted++;
+ }
+ else
+ {
+ /* clib_warning ("ACL_FA_NODE_CLEAN: Deleting session %d",
+ (int) session_index); */
+ acl_fa_delete_session (am, sw_if_index, session_index);
+ count_deleted_sessions++;
+ }
+ }
+ else
+ {
+ count_already_deleted++;
+ }
+ }
+ if (expired)
+ _vec_len (expired) = 0;
+ if (event_data)
+ _vec_len (event_data) = 0;
+
+ if (count_deleted_sessions > am->fa_max_deleted_sessions_per_interval) {
+ /* if there was too many sessions to delete, do less waiting around next time */
+ am->fa_current_cleaner_timer_wait_interval /= 2;
+ } else if (count_deleted_sessions < am->fa_min_deleted_sessions_per_interval) {
+ /* Too few deleted sessions, slowly increase the amount of sleep up to a limit */
+ if (am->fa_current_cleaner_timer_wait_interval < max_timer_wait_interval)
+ am->fa_current_cleaner_timer_wait_interval += cpu_cps * am->fa_cleaner_wait_time_increment;
+ }
+
+ vlib_node_increment_counter (vm,
+ acl_fa_session_cleaner_process_node.index,
+ ACL_FA_CLEANER_ERROR_EVENT_CYCLE, 1);
+ vlib_node_increment_counter (vm,
+ acl_fa_session_cleaner_process_node.index,
+ ACL_FA_CLEANER_ERROR_TIMER_RESTARTED,
+ count_timer_restarted);
+ vlib_node_increment_counter (vm,
+ acl_fa_session_cleaner_process_node.index,
+ ACL_FA_CLEANER_ERROR_DELETED_SESSIONS,
+ count_deleted_sessions);
+ vlib_node_increment_counter (vm,
+ acl_fa_session_cleaner_process_node.index,
+ ACL_FA_CLEANER_ERROR_ALREADY_DELETED,
+ count_already_deleted);
+ }
+ /* NOT REACHED */
+ return 0;
+}
+
+
+void
+acl_fa_enable_disable (u32 sw_if_index, int is_input, int enable_disable)
+{
+ acl_main_t *am = &acl_main;
+ if (is_input)
+ {
+ vnet_feature_enable_disable ("ip4-unicast", "acl-plugin-in-ip4-fa",
+ sw_if_index, enable_disable, 0, 0);
+ vnet_feature_enable_disable ("ip6-unicast", "acl-plugin-in-ip6-fa",
+ sw_if_index, enable_disable, 0, 0);
+ am->fa_in_acl_on_sw_if_index =
+ clib_bitmap_set (am->fa_in_acl_on_sw_if_index, sw_if_index,
+ enable_disable);
+ }
+ else
+ {
+ vnet_feature_enable_disable ("ip4-output", "acl-plugin-out-ip4-fa",
+ sw_if_index, enable_disable, 0, 0);
+ vnet_feature_enable_disable ("ip6-output", "acl-plugin-out-ip6-fa",
+ sw_if_index, enable_disable, 0, 0);
+ am->fa_out_acl_on_sw_if_index =
+ clib_bitmap_set (am->fa_out_acl_on_sw_if_index, sw_if_index,
+ enable_disable);
+ }
+ if ((!enable_disable) && (!acl_fa_ifc_has_in_acl (am, sw_if_index))
+ && (!acl_fa_ifc_has_out_acl (am, sw_if_index)))
+ {
+ vlib_process_signal_event (am->vlib_main, am->fa_cleaner_node_index,
+ ACL_FA_CLEANER_DELETE_BY_SW_IF_INDEX,
+ sw_if_index);
+ }
+}
+
+
+
+/* *INDENT-OFF* */
+
+
+VLIB_REGISTER_NODE (acl_fa_session_cleaner_process_node, static) = {
+ .function = acl_fa_session_cleaner_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "acl-plugin-fa-cleaner-process",
+ .n_errors = ARRAY_LEN (acl_fa_cleaner_error_strings),
+ .error_strings = acl_fa_cleaner_error_strings,
+ .n_next_nodes = 0,
+ .next_nodes = {},
+};
+
+
+VLIB_REGISTER_NODE (acl_in_l2_ip6_node) =
+{
+ .function = acl_in_ip6_l2_node_fn,
+ .name = "acl-plugin-in-ip6-l2",
+ .vector_size = sizeof (u32),
+ .format_trace = format_acl_fa_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (acl_fa_error_strings),
+ .error_strings = acl_fa_error_strings,
+ .n_next_nodes = ACL_FA_N_NEXT,
+ .next_nodes =
+ {
+ [ACL_FA_ERROR_DROP] = "error-drop",
+ }
+};
+
+VLIB_REGISTER_NODE (acl_in_l2_ip4_node) =
+{
+ .function = acl_in_ip4_l2_node_fn,
+ .name = "acl-plugin-in-ip4-l2",
+ .vector_size = sizeof (u32),
+ .format_trace = format_acl_fa_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (acl_fa_error_strings),
+ .error_strings = acl_fa_error_strings,
+ .n_next_nodes = ACL_FA_N_NEXT,
+ .next_nodes =
+ {
+ [ACL_FA_ERROR_DROP] = "error-drop",
+ }
+};
+
+VLIB_REGISTER_NODE (acl_out_l2_ip6_node) =
+{
+ .function = acl_out_ip6_l2_node_fn,
+ .name = "acl-plugin-out-ip6-l2",
+ .vector_size = sizeof (u32),
+ .format_trace = format_acl_fa_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (acl_fa_error_strings),
+ .error_strings = acl_fa_error_strings,
+ .n_next_nodes = ACL_FA_N_NEXT,
+ .next_nodes =
+ {
+ [ACL_FA_ERROR_DROP] = "error-drop",
+ }
+};
+
+VLIB_REGISTER_NODE (acl_out_l2_ip4_node) =
+{
+ .function = acl_out_ip4_l2_node_fn,
+ .name = "acl-plugin-out-ip4-l2",
+ .vector_size = sizeof (u32),
+ .format_trace = format_acl_fa_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (acl_fa_error_strings),
+ .error_strings = acl_fa_error_strings,
+ .n_next_nodes = ACL_FA_N_NEXT,
+ .next_nodes =
+ {
+ [ACL_FA_ERROR_DROP] = "error-drop",
+ }
+};
+
+
+VLIB_REGISTER_NODE (acl_in_fa_ip6_node) =
+{
+ .function = acl_in_ip6_fa_node_fn,
+ .name = "acl-plugin-in-ip6-fa",
+ .vector_size = sizeof (u32),
+ .format_trace = format_acl_fa_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (acl_fa_error_strings),
+ .error_strings = acl_fa_error_strings,
+ .n_next_nodes = ACL_FA_N_NEXT,
+ .next_nodes =
+ {
+ [ACL_FA_ERROR_DROP] = "error-drop",
+ }
+};
+
+VNET_FEATURE_INIT (acl_in_ip6_fa_feature, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "acl-plugin-in-ip6-fa",
+ .runs_before = VNET_FEATURES ("ip6-flow-classify"),
+};
+
+VLIB_REGISTER_NODE (acl_in_fa_ip4_node) =
+{
+ .function = acl_in_ip4_fa_node_fn,
+ .name = "acl-plugin-in-ip4-fa",
+ .vector_size = sizeof (u32),
+ .format_trace = format_acl_fa_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (acl_fa_error_strings),
+ .error_strings = acl_fa_error_strings,
+ .n_next_nodes = ACL_FA_N_NEXT,
+ .next_nodes =
+ {
+ [ACL_FA_ERROR_DROP] = "error-drop",
+ }
+};
+
+VNET_FEATURE_INIT (acl_in_ip4_fa_feature, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "acl-plugin-in-ip4-fa",
+ .runs_before = VNET_FEATURES ("ip4-flow-classify"),
+};
+
+
+VLIB_REGISTER_NODE (acl_out_fa_ip6_node) =
+{
+ .function = acl_out_ip6_fa_node_fn,
+ .name = "acl-plugin-out-ip6-fa",
+ .vector_size = sizeof (u32),
+ .format_trace = format_acl_fa_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (acl_fa_error_strings),
+ .error_strings = acl_fa_error_strings,
+ .n_next_nodes = ACL_FA_N_NEXT,
+ .next_nodes =
+ {
+ [ACL_FA_ERROR_DROP] = "error-drop",
+ }
+};
+
+VNET_FEATURE_INIT (acl_out_ip6_fa_feature, static) =
+{
+ .arc_name = "ip6-output",
+ .node_name = "acl-plugin-out-ip6-fa",
+ .runs_before = VNET_FEATURES ("interface-output"),
+};
+
+VLIB_REGISTER_NODE (acl_out_fa_ip4_node) =
+{
+ .function = acl_out_ip4_fa_node_fn,
+ .name = "acl-plugin-out-ip4-fa",
+ .vector_size = sizeof (u32),
+ .format_trace = format_acl_fa_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (acl_fa_error_strings),
+ .error_strings = acl_fa_error_strings,
+ .n_next_nodes = ACL_FA_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [ACL_FA_ERROR_DROP] = "error-drop",
+ }
+};
+
+VNET_FEATURE_INIT (acl_out_ip4_fa_feature, static) =
+{
+ .arc_name = "ip4-output",
+ .node_name = "acl-plugin-out-ip4-fa",
+ .runs_before = VNET_FEATURES ("interface-output"),
+};
+
+
+/* *INDENT-ON* */
diff --git a/src/plugins/acl/fa_node.h b/src/plugins/acl/fa_node.h
new file mode 100644
index 00000000000..76a40a38486
--- /dev/null
+++ b/src/plugins/acl/fa_node.h
@@ -0,0 +1,99 @@
+#ifndef _FA_NODE_H_
+#define _FA_NODE_H_
+
+#include <stddef.h>
+#include "bihash_40_8.h"
+
+#define TCP_FLAG_FIN 0x01
+#define TCP_FLAG_SYN 0x02
+#define TCP_FLAG_RST 0x04
+#define TCP_FLAG_PUSH 0x08
+#define TCP_FLAG_ACK 0x10
+#define TCP_FLAG_URG 0x20
+#define TCP_FLAG_ECE 0x40
+#define TCP_FLAG_CWR 0x80
+#define TCP_FLAGS_RSTFINACKSYN (TCP_FLAG_RST + TCP_FLAG_FIN + TCP_FLAG_SYN + TCP_FLAG_ACK)
+#define TCP_FLAGS_ACKSYN (TCP_FLAG_SYN + TCP_FLAG_ACK)
+
+#define ACL_FA_CONN_TABLE_DEFAULT_HASH_NUM_BUCKETS (64 * 1024)
+#define ACL_FA_CONN_TABLE_DEFAULT_HASH_MEMORY_SIZE (1<<30)
+#define ACL_FA_CONN_TABLE_DEFAULT_MAX_ENTRIES 1000000
+
+typedef union {
+ u64 as_u64;
+ struct {
+ u8 tcp_flags_valid;
+ u8 tcp_flags;
+ u8 is_input;
+ u8 l4_valid;
+ };
+} fa_packet_info_t;
+
+typedef union {
+ u64 as_u64;
+ struct {
+ u16 port[2];
+ u16 proto;
+ u16 rsvd;
+ };
+} fa_session_l4_key_t;
+
+typedef union {
+ struct {
+ ip46_address_t addr[2];
+ fa_session_l4_key_t l4;
+ /* This field should align with u64 value in bihash_40_8 keyvalue struct */
+ fa_packet_info_t pkt;
+ };
+ clib_bihash_kv_40_8_t kv;
+} fa_5tuple_t;
+
+
+typedef struct {
+ fa_5tuple_t info; /* (5+1)*8 = 48 bytes */
+ u64 last_active_time; /* +8 bytes = 56 */
+ u32 sw_if_index; /* +4 bytes = 60 */
+ union {
+ u8 as_u8[2];
+ u16 as_u16;
+ } tcp_flags_seen; ; /* +2 bytes = 62 */
+ u8 link_list_id; /* +1 bytes = 63 */
+ u8 reserved1; /* +1 bytes = 64 */
+ u32 link_prev_idx;
+ u32 link_next_idx;
+ u64 reserved2[7];
+} fa_session_t;
+
+
+/*
+ * A few compile-time constraints on the size and the layout of the union, to ensure
+ * it makes sense both for bihash and for us.
+ */
+
+#define CT_ASSERT_EQUAL(name, x,y) typedef int assert_ ## name ## _compile_time_assertion_failed[((x) == (y))-1]
+CT_ASSERT_EQUAL(fa_l3_key_size_is_40, offsetof(fa_5tuple_t, pkt), offsetof(clib_bihash_kv_40_8_t, value));
+CT_ASSERT_EQUAL(fa_l4_key_t_is_8, sizeof(fa_session_l4_key_t), sizeof(u64));
+CT_ASSERT_EQUAL(fa_packet_info_t_is_8, sizeof(fa_packet_info_t), sizeof(u64));
+CT_ASSERT_EQUAL(fa_l3_kv_size_is_48, sizeof(fa_5tuple_t), sizeof(clib_bihash_kv_40_8_t));
+
+/* Let's try to fit within the cacheline */
+CT_ASSERT_EQUAL(fa_session_t_size_is_64, sizeof(fa_session_t), 128);
+#undef CT_ASSERT_EQUAL
+
+
+typedef enum {
+ ACL_FA_ERROR_DROP,
+ ACL_FA_N_NEXT,
+} acl_fa_next_t;
+
+
+enum
+{
+ ACL_FA_CLEANER_RESCHEDULE = 1,
+ ACL_FA_CLEANER_DELETE_BY_SW_IF_INDEX,
+} acl_fa_cleaner_process_event_e;
+
+void acl_fa_enable_disable(u32 sw_if_index, int is_input, int enable_disable);
+
+
+#endif
diff --git a/test/test_acl_plugin_l2l3.py b/test/test_acl_plugin_l2l3.py
new file mode 100644
index 00000000000..346825fceec
--- /dev/null
+++ b/test/test_acl_plugin_l2l3.py
@@ -0,0 +1,722 @@
+#!/usr/bin/env python
+"""ACL IRB Test Case HLD:
+
+**config**
+ - L2 MAC learning enabled in l2bd
+ - 2 routed interfaces untagged, bvi (Bridge Virtual Interface)
+ - 2 bridged interfaces in l2bd with bvi
+
+**test**
+ - sending ip4 eth pkts between routed interfaces
+ - 2 routed interfaces
+ - 2 bridged interfaces
+
+ - 64B, 512B, 1518B, 9200B (ether_size)
+
+ - burst of pkts per interface
+ - 257pkts per burst
+ - routed pkts hitting different FIB entries
+ - bridged pkts hitting different MAC entries
+
+**verify**
+ - all packets received correctly
+
+"""
+
+import unittest
+from socket import inet_pton, AF_INET, AF_INET6
+from random import choice
+from pprint import pprint
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP, ICMP, TCP
+from scapy.layers.inet6 import IPv6, ICMPv6Unknown, ICMPv6EchoRequest
+from scapy.layers.inet6 import ICMPv6EchoReply, IPv6ExtHdrRouting
+
+from framework import VppTestCase, VppTestRunner
+import time
+
+
+class TestIpIrb(VppTestCase):
+ """IRB Test Case"""
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ #. Create BD with MAC learning enabled and put interfaces to this BD.
+ #. Configure IPv4 addresses on loopback interface and routed interface.
+ #. Configure MAC address binding to IPv4 neighbors on loop0.
+ #. Configure MAC address on pg2.
+ #. Loopback BVI interface has remote hosts, one half of hosts are
+ behind pg0 second behind pg1.
+ """
+ super(TestIpIrb, cls).setUpClass()
+
+ cls.pg_if_packet_sizes = [64, 512, 1518, 9018] # packet sizes
+ cls.bd_id = 10
+ cls.remote_hosts_count = 250
+
+ # create 3 pg interfaces, 1 loopback interface
+ cls.create_pg_interfaces(range(3))
+ cls.create_loopback_interfaces(range(1))
+
+ cls.interfaces = list(cls.pg_interfaces)
+ cls.interfaces.extend(cls.lo_interfaces)
+
+ for i in cls.interfaces:
+ i.admin_up()
+
+ # Create BD with MAC learning enabled and put interfaces to this BD
+ cls.vapi.sw_interface_set_l2_bridge(
+ cls.loop0.sw_if_index, bd_id=cls.bd_id, bvi=1)
+ cls.vapi.sw_interface_set_l2_bridge(
+ cls.pg0.sw_if_index, bd_id=cls.bd_id)
+ cls.vapi.sw_interface_set_l2_bridge(
+ cls.pg1.sw_if_index, bd_id=cls.bd_id)
+
+ # Configure IPv4 addresses on loopback interface and routed interface
+ cls.loop0.config_ip4()
+ cls.loop0.config_ip6()
+ cls.pg2.config_ip4()
+ cls.pg2.config_ip6()
+
+ # Configure MAC address binding to IPv4 neighbors on loop0
+ cls.loop0.generate_remote_hosts(cls.remote_hosts_count)
+ cls.loop0.configure_ipv4_neighbors()
+ cls.loop0.configure_ipv6_neighbors()
+ # configure MAC address on pg2
+ cls.pg2.resolve_arp()
+ cls.pg2.resolve_ndp()
+
+ cls.WITHOUT_EH = False
+ cls.WITH_EH = True
+
+ # Loopback BVI interface has remote hosts, one half of hosts are behind
+ # pg0 second behind pg1
+ half = cls.remote_hosts_count // 2
+ cls.pg0.remote_hosts = cls.loop0.remote_hosts[:half]
+ cls.pg1.remote_hosts = cls.loop0.remote_hosts[half:]
+
+ def tearDown(self):
+ """Run standard test teardown and log ``show l2patch``,
+ ``show l2fib verbose``,``show bridge-domain <bd_id> detail``,
+ ``show ip arp``.
+ """
+ super(TestIpIrb, self).tearDown()
+ if not self.vpp_dead:
+ self.logger.info(self.vapi.cli("show l2patch"))
+ self.logger.info(self.vapi.cli("show classify tables"))
+ self.logger.info(self.vapi.cli("show vlib graph"))
+ self.logger.info(self.vapi.cli("show l2fib verbose"))
+ self.logger.info(self.vapi.cli("show bridge-domain %s detail" %
+ self.bd_id))
+ self.logger.info(self.vapi.cli("show ip arp"))
+ self.logger.info(self.vapi.cli("show ip6 neighbors"))
+ self.logger.info(self.vapi.cli("show acl-plugin sessions"))
+
+ def api_acl_add_replace(self, acl_index, r, count, tag="",
+ expected_retval=0):
+ """Add/replace an ACL
+
+ :param int acl_index: ACL index to replace, 4294967295 to create new.
+ :param acl_rule r: ACL rules array.
+ :param str tag: symbolic tag (description) for this ACL.
+ :param int count: number of rules.
+ """
+ return self.vapi.api(self.vapi.papi.acl_add_replace,
+ {'acl_index': acl_index,
+ 'r': r,
+ 'count': count,
+ 'tag': tag
+ }, expected_retval=expected_retval)
+
+ def api_acl_interface_set_acl_list(self, sw_if_index, count, n_input, acls,
+ expected_retval=0):
+ return self.vapi.api(self.vapi.papi.acl_interface_set_acl_list,
+ {'sw_if_index': sw_if_index,
+ 'count': count,
+ 'n_input': n_input,
+ 'acls': acls
+ }, expected_retval=expected_retval)
+
+ def api_acl_dump(self, acl_index, expected_retval=0):
+ return self.vapi.api(self.vapi.papi.acl_dump,
+ {'acl_index': acl_index},
+ expected_retval=expected_retval)
+
+ def create_stream(self, src_ip_if, dst_ip_if, reverse, packet_sizes,
+ is_ip6, expect_blocked, expect_established,
+ add_extension_header):
+ pkts = []
+ rules = []
+ permit_rules = []
+ permit_and_reflect_rules = []
+ total_packet_count = 8
+ for i in range(0, total_packet_count):
+ modulo = (i//2) % 2
+ can_reflect_this_packet = (modulo == 0)
+ is_permit = i % 2
+ remote_dst_index = i % len(dst_ip_if.remote_hosts)
+ remote_dst_host = dst_ip_if.remote_hosts[remote_dst_index]
+ if is_permit == 1:
+ info = self.create_packet_info(src_ip_if, dst_ip_if)
+ payload = self.info_to_payload(info)
+ else:
+ to_be_blocked = False
+ if (expect_blocked and not expect_established):
+ to_be_blocked = True
+ if (not can_reflect_this_packet):
+ to_be_blocked = True
+ if to_be_blocked:
+ payload = "to be blocked"
+ else:
+ info = self.create_packet_info(src_ip_if, dst_ip_if)
+ payload = self.info_to_payload(info)
+ if reverse:
+ dst_mac = 'de:ad:00:00:00:00'
+ src_mac = remote_dst_host._mac
+ dst_ip6 = src_ip_if.remote_ip6
+ src_ip6 = remote_dst_host.ip6
+ dst_ip4 = src_ip_if.remote_ip4
+ src_ip4 = remote_dst_host.ip4
+ dst_l4 = 1234 + i
+ src_l4 = 4321 + i
+ else:
+ dst_mac = src_ip_if.local_mac
+ src_mac = src_ip_if.remote_mac
+ src_ip6 = src_ip_if.remote_ip6
+ dst_ip6 = remote_dst_host.ip6
+ src_ip4 = src_ip_if.remote_ip4
+ dst_ip4 = remote_dst_host.ip4
+ src_l4 = 1234 + i
+ dst_l4 = 4321 + i
+
+ # default ULP should be something we do not use in tests
+ ulp_l4 = TCP(sport=src_l4, dport=dst_l4)
+ # potentially a chain of protocols leading to ULP
+ ulp = ulp_l4
+
+ if can_reflect_this_packet:
+ if is_ip6:
+ ulp_l4 = UDP(sport=src_l4, dport=dst_l4)
+ if add_extension_header:
+ # prepend some extension headers
+ ulp = (IPv6ExtHdrRouting() / IPv6ExtHdrRouting() /
+ IPv6ExtHdrRouting() / ulp_l4)
+ # uncomment below to test invalid ones
+ # ulp = IPv6ExtHdrRouting(len = 200) / ulp_l4
+ else:
+ ulp = ulp_l4
+ p = (Ether(dst=dst_mac, src=src_mac) /
+ IPv6(src=src_ip6, dst=dst_ip6) /
+ ulp /
+ Raw(payload))
+ else:
+ ulp_l4 = UDP(sport=src_l4, dport=dst_l4)
+ # IPv4 does not allow extension headers
+ ulp = ulp_l4
+ p = (Ether(dst=dst_mac, src=src_mac) /
+ IP(src=src_ip4, dst=dst_ip4) /
+ ulp /
+ Raw(payload))
+ elif modulo == 1:
+ if is_ip6:
+ ulp_l4 = ICMPv6Unknown(type=128 + (i % 2), code=i % 2)
+ ulp = ulp_l4
+ p = (Ether(dst=dst_mac, src=src_mac) /
+ IPv6(src=src_ip6, dst=dst_ip6) /
+ ulp /
+ Raw(payload))
+ else:
+ ulp_l4 = ICMP(type=8 + (i % 2), code=i % 2)
+ ulp = ulp_l4
+ p = (Ether(dst=dst_mac, src=src_mac) /
+ IP(src=src_ip4, dst=dst_ip4) /
+ ulp /
+ Raw(payload))
+
+ if i % 2 == 1:
+ info.data = p.copy()
+ size = packet_sizes[(i // 2) % len(packet_sizes)]
+ self.extend_packet(p, size)
+ pkts.append(p)
+
+ rule_family = AF_INET6 if p.haslayer(IPv6) else AF_INET
+ rule_prefix_len = 128 if p.haslayer(IPv6) else 32
+ rule_l3_layer = IPv6 if p.haslayer(IPv6) else IP
+
+ if p.haslayer(UDP):
+ rule_l4_sport = p[UDP].sport
+ rule_l4_dport = p[UDP].dport
+ else:
+ if p.haslayer(ICMP):
+ rule_l4_sport = p[ICMP].type
+ rule_l4_dport = p[ICMP].code
+ else:
+ rule_l4_sport = p[ICMPv6Unknown].type
+ rule_l4_dport = p[ICMPv6Unknown].code
+ if p.haslayer(IPv6):
+ rule_l4_proto = ulp_l4.overload_fields[IPv6]['nh']
+ else:
+ rule_l4_proto = p[IP].proto
+
+ new_rule = {
+ 'is_permit': is_permit,
+ 'is_ipv6': p.haslayer(IPv6),
+ 'src_ip_addr': inet_pton(rule_family,
+ p[rule_l3_layer].src),
+ 'src_ip_prefix_len': rule_prefix_len,
+ 'dst_ip_addr': inet_pton(rule_family,
+ p[rule_l3_layer].dst),
+ 'dst_ip_prefix_len': rule_prefix_len,
+ 'srcport_or_icmptype_first': rule_l4_sport,
+ 'srcport_or_icmptype_last': rule_l4_sport,
+ 'dstport_or_icmpcode_first': rule_l4_dport,
+ 'dstport_or_icmpcode_last': rule_l4_dport,
+ 'proto': rule_l4_proto,
+ }
+ rules.append(new_rule)
+ new_rule_permit = new_rule.copy()
+ new_rule_permit['is_permit'] = 1
+ permit_rules.append(new_rule_permit)
+
+ new_rule_permit_and_reflect = new_rule.copy()
+ if can_reflect_this_packet:
+ new_rule_permit_and_reflect['is_permit'] = 2
+ else:
+ new_rule_permit_and_reflect['is_permit'] = is_permit
+ permit_and_reflect_rules.append(new_rule_permit_and_reflect)
+
+ return {'stream': pkts,
+ 'rules': rules,
+ 'permit_rules': permit_rules,
+ 'permit_and_reflect_rules': permit_and_reflect_rules}
+
+ def verify_capture(self, dst_ip_if, src_ip_if, capture, reverse):
+ last_info = dict()
+ for i in self.interfaces:
+ last_info[i.sw_if_index] = None
+
+ dst_ip_sw_if_index = dst_ip_if.sw_if_index
+ return
+
+ for packet in capture:
+ l3 = IP if packet.haslayer(IP) else IPv6
+ ip = packet[l3]
+ if packet.haslayer(UDP):
+ l4 = UDP
+ else:
+ if packet.haslayer(ICMP):
+ l4 = ICMP
+ else:
+ l4 = ICMPv6Unknown
+
+ # Scapy IPv6 stuff is too smart for its own good.
+ # So we do this and coerce the ICMP into unknown type
+ if packet.haslayer(UDP):
+ data = str(packet[UDP][Raw])
+ else:
+ if l3 == IP:
+ data = str(ICMP(str(packet[l3].payload))[Raw])
+ else:
+ data = str(ICMPv6Unknown(str(packet[l3].payload)).msgbody)
+ udp_or_icmp = packet[l3].payload
+ payload_info = self.payload_to_info(data)
+ packet_index = payload_info.index
+
+ self.assertEqual(payload_info.dst, dst_ip_sw_if_index)
+
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_ip_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ self.assertTrue(next_info is not None)
+ self.assertEqual(packet_index, next_info.index)
+ saved_packet = next_info.data
+ self.assertTrue(next_info is not None)
+
+ # MAC: src, dst
+ if not reverse:
+ self.assertEqual(packet.src, dst_ip_if.local_mac)
+ host = dst_ip_if.host_by_mac(packet.dst)
+
+ # IP: src, dst
+ # self.assertEqual(ip.src, src_ip_if.remote_ip4)
+ if saved_packet is not None:
+ self.assertEqual(ip.src, saved_packet[l3].src)
+ self.assertEqual(ip.dst, saved_packet[l3].dst)
+ if l4 == UDP:
+ self.assertEqual(udp_or_icmp.sport, saved_packet[l4].sport)
+ self.assertEqual(udp_or_icmp.dport, saved_packet[l4].dport)
+ else:
+ print("Saved packet is none")
+ # self.assertEqual(ip.dst, host.ip4)
+
+ # UDP:
+
+ def create_acls_for_a_stream(self, stream_dict,
+ test_l2_action, is_reflect):
+ r = stream_dict['rules']
+ r_permit = stream_dict['permit_rules']
+ r_permit_reflect = stream_dict['permit_and_reflect_rules']
+ r_action = r_permit_reflect if is_reflect else r
+ reply = self.api_acl_add_replace(acl_index=4294967295, r=r_action,
+ count=len(r_action), tag="action acl")
+ action_acl_index = reply.acl_index
+ reply = self.api_acl_add_replace(acl_index=4294967295, r=r_permit,
+ count=len(r_permit), tag="permit acl")
+ permit_acl_index = reply.acl_index
+ return {'L2': action_acl_index if test_l2_action else permit_acl_index,
+ 'L3': permit_acl_index if test_l2_action else action_acl_index,
+ 'permit': permit_acl_index, 'action': action_acl_index}
+
+ def apply_acl_ip46_x_to_y(self, bridged_to_routed, test_l2_deny,
+ is_ip6, is_reflect, add_eh):
+ """ Apply the ACLs
+ """
+ self.reset_packet_infos()
+ stream_dict = self.create_stream(
+ self.pg2, self.loop0,
+ bridged_to_routed,
+ self.pg_if_packet_sizes, is_ip6,
+ not is_reflect, False, add_eh)
+ stream = stream_dict['stream']
+ acl_idx = self.create_acls_for_a_stream(stream_dict, test_l2_deny,
+ is_reflect)
+ n_input_l3 = 0 if bridged_to_routed else 1
+ n_input_l2 = 1 if bridged_to_routed else 0
+ self.api_acl_interface_set_acl_list(sw_if_index=self.pg2.sw_if_index,
+ count=1,
+ n_input=n_input_l3,
+ acls=[acl_idx['L3']])
+ self.api_acl_interface_set_acl_list(sw_if_index=self.pg0.sw_if_index,
+ count=1,
+ n_input=n_input_l2,
+ acls=[acl_idx['L2']])
+ self.api_acl_interface_set_acl_list(sw_if_index=self.pg1.sw_if_index,
+ count=1,
+ n_input=n_input_l2,
+ acls=[acl_idx['L2']])
+
+ def apply_acl_ip46_both_directions_reflect(self,
+ primary_is_bridged_to_routed,
+ reflect_on_l2, is_ip6, add_eh):
+ primary_is_routed_to_bridged = not primary_is_bridged_to_routed
+ self.reset_packet_infos()
+ stream_dict_fwd = self.create_stream(self.pg2, self.loop0,
+ primary_is_bridged_to_routed,
+ self.pg_if_packet_sizes, is_ip6,
+ False, False, add_eh)
+ acl_idx_fwd = self.create_acls_for_a_stream(stream_dict_fwd,
+ reflect_on_l2, True)
+
+ stream_dict_rev = self.create_stream(self.pg2, self.loop0,
+ not primary_is_bridged_to_routed,
+ self.pg_if_packet_sizes, is_ip6,
+ True, True, add_eh)
+ # We want the primary action to be "deny" rather than reflect
+ acl_idx_rev = self.create_acls_for_a_stream(stream_dict_rev,
+ reflect_on_l2, False)
+
+ if primary_is_bridged_to_routed:
+ inbound_l2_acl = acl_idx_fwd['L2']
+ else:
+ inbound_l2_acl = acl_idx_rev['L2']
+
+ if primary_is_routed_to_bridged:
+ outbound_l2_acl = acl_idx_fwd['L2']
+ else:
+ outbound_l2_acl = acl_idx_rev['L2']
+
+ if primary_is_routed_to_bridged:
+ inbound_l3_acl = acl_idx_fwd['L3']
+ else:
+ inbound_l3_acl = acl_idx_rev['L3']
+
+ if primary_is_bridged_to_routed:
+ outbound_l3_acl = acl_idx_fwd['L3']
+ else:
+ outbound_l3_acl = acl_idx_rev['L3']
+
+ self.api_acl_interface_set_acl_list(sw_if_index=self.pg2.sw_if_index,
+ count=2,
+ n_input=1,
+ acls=[inbound_l3_acl,
+ outbound_l3_acl])
+ self.api_acl_interface_set_acl_list(sw_if_index=self.pg0.sw_if_index,
+ count=2,
+ n_input=1,
+ acls=[inbound_l2_acl,
+ outbound_l2_acl])
+ self.api_acl_interface_set_acl_list(sw_if_index=self.pg1.sw_if_index,
+ count=2,
+ n_input=1,
+ acls=[inbound_l2_acl,
+ outbound_l2_acl])
+
+ def apply_acl_ip46_routed_to_bridged(self, test_l2_deny, is_ip6,
+ is_reflect, add_eh):
+ self.apply_acl_ip46_x_to_y(False, test_l2_deny, is_ip6,
+ is_reflect, add_eh)
+
+ def apply_acl_ip46_bridged_to_routed(self, test_l2_deny, is_ip6,
+ is_reflect, add_eh):
+ self.apply_acl_ip46_x_to_y(True, test_l2_deny, is_ip6,
+ is_reflect, add_eh)
+
+ def run_traffic_ip46_x_to_y(self, bridged_to_routed,
+ test_l2_deny, is_ip6,
+ is_reflect, is_established, add_eh):
+ self.reset_packet_infos()
+ stream_dict = self.create_stream(self.pg2, self.loop0,
+ bridged_to_routed,
+ self.pg_if_packet_sizes, is_ip6,
+ not is_reflect, is_established,
+ add_eh)
+ stream = stream_dict['stream']
+
+ tx_if = self.pg0 if bridged_to_routed else self.pg2
+ rx_if = self.pg2 if bridged_to_routed else self.pg0
+
+ tx_if.add_stream(stream)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ packet_count = self.get_packet_count_for_if_idx(self.loop0.sw_if_index)
+ rcvd1 = rx_if.get_capture(packet_count)
+ self.verify_capture(self.loop0, self.pg2, rcvd1, bridged_to_routed)
+
+ def run_traffic_ip46_routed_to_bridged(self, test_l2_deny, is_ip6,
+ is_reflect, is_established, add_eh):
+ self.run_traffic_ip46_x_to_y(False, test_l2_deny, is_ip6,
+ is_reflect, is_established, add_eh)
+
+ def run_traffic_ip46_bridged_to_routed(self, test_l2_deny, is_ip6,
+ is_reflect, is_established, add_eh):
+ self.run_traffic_ip46_x_to_y(True, test_l2_deny, is_ip6,
+ is_reflect, is_established, add_eh)
+
+ def run_test_ip46_routed_to_bridged(self, test_l2_deny,
+ is_ip6, is_reflect, add_eh):
+ self.apply_acl_ip46_routed_to_bridged(test_l2_deny,
+ is_ip6, is_reflect, add_eh)
+ self.run_traffic_ip46_routed_to_bridged(test_l2_deny, is_ip6,
+ is_reflect, False, add_eh)
+
+ def run_test_ip46_bridged_to_routed(self, test_l2_deny,
+ is_ip6, is_reflect, add_eh):
+ self.apply_acl_ip46_bridged_to_routed(test_l2_deny,
+ is_ip6, is_reflect, add_eh)
+ self.run_traffic_ip46_bridged_to_routed(test_l2_deny, is_ip6,
+ is_reflect, False, add_eh)
+
+ def run_test_ip46_routed_to_bridged_and_back(self, test_l2_action,
+ is_ip6, add_eh):
+ self.apply_acl_ip46_both_directions_reflect(False, test_l2_action,
+ is_ip6, add_eh)
+ self.run_traffic_ip46_routed_to_bridged(test_l2_action, is_ip6,
+ True, False, add_eh)
+ self.run_traffic_ip46_bridged_to_routed(test_l2_action, is_ip6,
+ False, True, add_eh)
+
+ def run_test_ip46_bridged_to_routed_and_back(self, test_l2_action,
+ is_ip6, add_eh):
+ self.apply_acl_ip46_both_directions_reflect(True, test_l2_action,
+ is_ip6, add_eh)
+ self.run_traffic_ip46_bridged_to_routed(test_l2_action, is_ip6,
+ True, False, add_eh)
+ self.run_traffic_ip46_routed_to_bridged(test_l2_action, is_ip6,
+ False, True, add_eh)
+
+ def test_0000_ip6_irb_1(self):
+ """ ACL plugin prepare"""
+ if not self.vpp_dead:
+ cmd = "set acl-plugin session timeout udp idle 2000"
+ self.logger.info(self.vapi.ppcli(cmd))
+ # uncomment to not skip past the routing header
+ # and watch the EH tests fail
+ # self.logger.info(self.vapi.ppcli(
+ # "set acl-plugin skip-ipv6-extension-header 43 0"))
+ # uncomment to test the session limit (stateful tests will fail)
+ # self.logger.info(self.vapi.ppcli(
+ # "set acl-plugin session table max-entries 1"))
+ # new datapath is the default, but just in case
+ # self.logger.info(self.vapi.ppcli(
+ # "set acl-plugin l2-datapath new"))
+ # If you want to see some tests fail, uncomment the next line
+ # self.logger.info(self.vapi.ppcli(
+ # "set acl-plugin l2-datapath old"))
+
+ def test_0001_ip6_irb_1(self):
+ """ ACL IPv6 routed -> bridged, L2 ACL deny"""
+ self.run_test_ip46_routed_to_bridged(True, True, False,
+ self.WITHOUT_EH)
+
+ def test_0002_ip6_irb_1(self):
+ """ ACL IPv6 routed -> bridged, L3 ACL deny"""
+ self.run_test_ip46_routed_to_bridged(False, True, False,
+ self.WITHOUT_EH)
+
+ def test_0003_ip4_irb_1(self):
+ """ ACL IPv4 routed -> bridged, L2 ACL deny"""
+ self.run_test_ip46_routed_to_bridged(True, False, False,
+ self.WITHOUT_EH)
+
+ def test_0004_ip4_irb_1(self):
+ """ ACL IPv4 routed -> bridged, L3 ACL deny"""
+ self.run_test_ip46_routed_to_bridged(False, False, False,
+ self.WITHOUT_EH)
+
+ def test_0005_ip6_irb_1(self):
+ """ ACL IPv6 bridged -> routed, L2 ACL deny """
+ self.run_test_ip46_bridged_to_routed(True, True, False,
+ self.WITHOUT_EH)
+
+ def test_0006_ip6_irb_1(self):
+ """ ACL IPv6 bridged -> routed, L3 ACL deny """
+ self.run_test_ip46_bridged_to_routed(False, True, False,
+ self.WITHOUT_EH)
+
+ def test_0007_ip6_irb_1(self):
+ """ ACL IPv4 bridged -> routed, L2 ACL deny """
+ self.run_test_ip46_bridged_to_routed(True, False, False,
+ self.WITHOUT_EH)
+
+ def test_0008_ip6_irb_1(self):
+ """ ACL IPv4 bridged -> routed, L3 ACL deny """
+ self.run_test_ip46_bridged_to_routed(False, False, False,
+ self.WITHOUT_EH)
+
+ # Stateful ACL tests
+ def test_0101_ip6_irb_1(self):
+ """ ACL IPv6 routed -> bridged, L2 ACL permit+reflect"""
+ self.run_test_ip46_routed_to_bridged_and_back(True, True,
+ self.WITHOUT_EH)
+
+ def test_0102_ip6_irb_1(self):
+ """ ACL IPv6 bridged -> routed, L2 ACL permit+reflect"""
+ self.run_test_ip46_bridged_to_routed_and_back(True, True,
+ self.WITHOUT_EH)
+
+ def test_0103_ip6_irb_1(self):
+ """ ACL IPv4 routed -> bridged, L2 ACL permit+reflect"""
+ self.run_test_ip46_routed_to_bridged_and_back(True, False,
+ self.WITHOUT_EH)
+
+ def test_0104_ip6_irb_1(self):
+ """ ACL IPv4 bridged -> routed, L2 ACL permit+reflect"""
+ self.run_test_ip46_bridged_to_routed_and_back(True, False,
+ self.WITHOUT_EH)
+
+ def test_0111_ip6_irb_1(self):
+ """ ACL IPv6 routed -> bridged, L3 ACL permit+reflect"""
+ self.run_test_ip46_routed_to_bridged_and_back(False, True,
+ self.WITHOUT_EH)
+
+ def test_0112_ip6_irb_1(self):
+ """ ACL IPv6 bridged -> routed, L3 ACL permit+reflect"""
+ self.run_test_ip46_bridged_to_routed_and_back(False, True,
+ self.WITHOUT_EH)
+
+ def test_0113_ip6_irb_1(self):
+ """ ACL IPv4 routed -> bridged, L3 ACL permit+reflect"""
+ self.run_test_ip46_routed_to_bridged_and_back(False, False,
+ self.WITHOUT_EH)
+
+ def test_0114_ip6_irb_1(self):
+ """ ACL IPv4 bridged -> routed, L3 ACL permit+reflect"""
+ self.run_test_ip46_bridged_to_routed_and_back(False, False,
+ self.WITHOUT_EH)
+
+ # A block of tests with extension headers
+
+ def test_1001_ip6_irb_1(self):
+ """ ACL IPv6+EH routed -> bridged, L2 ACL deny"""
+ self.run_test_ip46_routed_to_bridged(True, True, False,
+ self.WITH_EH)
+
+ def test_1002_ip6_irb_1(self):
+ """ ACL IPv6+EH routed -> bridged, L3 ACL deny"""
+ self.run_test_ip46_routed_to_bridged(False, True, False,
+ self.WITH_EH)
+
+ def test_1005_ip6_irb_1(self):
+ """ ACL IPv6+EH bridged -> routed, L2 ACL deny """
+ self.run_test_ip46_bridged_to_routed(True, True, False,
+ self.WITH_EH)
+
+ def test_1006_ip6_irb_1(self):
+ """ ACL IPv6+EH bridged -> routed, L3 ACL deny """
+ self.run_test_ip46_bridged_to_routed(False, True, False,
+ self.WITH_EH)
+
+ def test_1101_ip6_irb_1(self):
+ """ ACL IPv6+EH routed -> bridged, L2 ACL permit+reflect"""
+ self.run_test_ip46_routed_to_bridged_and_back(True, True,
+ self.WITH_EH)
+
+ def test_1102_ip6_irb_1(self):
+ """ ACL IPv6+EH bridged -> routed, L2 ACL permit+reflect"""
+ self.run_test_ip46_bridged_to_routed_and_back(True, True,
+ self.WITH_EH)
+
+ def test_1111_ip6_irb_1(self):
+ """ ACL IPv6+EH routed -> bridged, L3 ACL permit+reflect"""
+ self.run_test_ip46_routed_to_bridged_and_back(False, True,
+ self.WITH_EH)
+
+ def test_1112_ip6_irb_1(self):
+ """ ACL IPv6+EH bridged -> routed, L3 ACL permit+reflect"""
+ self.run_test_ip46_bridged_to_routed_and_back(False, True,
+ self.WITH_EH)
+
+ # Old datapath group
+ def test_8900_ip6_irb_1(self):
+ """ ACL plugin set old L2 datapath"""
+ if not self.vpp_dead:
+ cmd = "set acl-plugin l2-datapath old"
+ self.logger.info(self.vapi.ppcli(cmd))
+
+ def test_8901_ip6_irb_1(self):
+ """ ACL IPv6 routed -> bridged, L2 ACL deny"""
+ self.run_test_ip46_routed_to_bridged(True, True, False,
+ self.WITHOUT_EH)
+
+ def test_8902_ip6_irb_1(self):
+ """ ACL IPv6 routed -> bridged, L3 ACL deny"""
+ self.run_test_ip46_routed_to_bridged(False, True, False,
+ self.WITHOUT_EH)
+
+ def test_8903_ip4_irb_1(self):
+ """ ACL IPv4 routed -> bridged, L2 ACL deny"""
+ self.run_test_ip46_routed_to_bridged(True, False, False,
+ self.WITHOUT_EH)
+
+ def test_8904_ip4_irb_1(self):
+ """ ACL IPv4 routed -> bridged, L3 ACL deny"""
+ self.run_test_ip46_routed_to_bridged(False, False, False,
+ self.WITHOUT_EH)
+
+ def test_8905_ip6_irb_1(self):
+ """ ACL IPv6 bridged -> routed, L2 ACL deny """
+ self.run_test_ip46_bridged_to_routed(True, True, False,
+ self.WITHOUT_EH)
+
+ def test_8906_ip6_irb_1(self):
+ """ ACL IPv6 bridged -> routed, L3 ACL deny """
+ self.run_test_ip46_bridged_to_routed(False, True, False,
+ self.WITHOUT_EH)
+
+ def test_8907_ip6_irb_1(self):
+ """ ACL IPv4 bridged -> routed, L2 ACL deny """
+ self.run_test_ip46_bridged_to_routed(True, False, False,
+ self.WITHOUT_EH)
+
+ def test_8908_ip6_irb_1(self):
+ """ ACL IPv4 bridged -> routed, L3 ACL deny """
+ self.run_test_ip46_bridged_to_routed(False, False, False,
+ self.WITHOUT_EH)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)