/* * Copyright (c) 2016-2018 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef struct { u32 next_index; u32 sw_if_index; u32 lc_index; u32 match_acl_in_index; u32 match_rule_index; u64 packet_info[6]; u32 trace_bitmap; u8 action; } acl_fa_trace_t; /* *INDENT-OFF* */ #define foreach_acl_fa_error \ _(ACL_DROP, "ACL deny packets") \ _(ACL_PERMIT, "ACL permit packets") \ _(ACL_NEW_SESSION, "new sessions added") \ _(ACL_EXIST_SESSION, "existing session packets") \ _(ACL_CHECK, "checked packets") \ _(ACL_RESTART_SESSION_TIMER, "restart session timer") \ _(ACL_TOO_MANY_SESSIONS, "too many sessions to add new") \ /* end of errors */ typedef enum { #define _(sym,str) ACL_FA_ERROR_##sym, foreach_acl_fa_error #undef _ ACL_FA_N_ERROR, } acl_fa_error_t; /* *INDENT-ON* */ typedef struct { u32 next_index; u32 sw_if_index; u16 ethertype; } nonip_in_out_trace_t; /* packet trace format function */ static u8 * format_nonip_in_out_trace (u8 * s, u32 is_output, va_list * args) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); nonip_in_out_trace_t *t = va_arg (*args, nonip_in_out_trace_t *); s = format (s, "%s: sw_if_index %d next_index %x ethertype %x", is_output ? "OUT-ETHER-WHITELIST" : "IN-ETHER-WHITELIST", t->sw_if_index, t->next_index, t->ethertype); return s; } static u8 * format_l2_nonip_in_trace (u8 * s, va_list * args) { return format_nonip_in_out_trace (s, 0, args); } static u8 * format_l2_nonip_out_trace (u8 * s, va_list * args) { return format_nonip_in_out_trace (s, 1, args); } #define foreach_nonip_in_error \ _(DROP, "dropped inbound non-whitelisted non-ip packets") \ _(PERMIT, "permitted inbound whitelisted non-ip packets") \ #define foreach_nonip_out_error \ _(DROP, "dropped outbound non-whitelisted non-ip packets") \ _(PERMIT, "permitted outbound whitelisted non-ip packets") \ /* *INDENT-OFF* */ typedef enum { #define _(sym,str) FA_IN_NONIP_ERROR_##sym, foreach_nonip_in_error #undef _ FA_IN_NONIP_N_ERROR, } l2_in_feat_arc_error_t; static char *fa_in_nonip_error_strings[] = { #define _(sym,string) string, foreach_nonip_in_error #undef _ }; typedef enum { #define _(sym,str) FA_OUT_NONIP_ERROR_##sym, foreach_nonip_out_error #undef _ FA_OUT_NONIP_N_ERROR, } l2_out_feat_arc_error_t; static char *fa_out_nonip_error_strings[] = { #define _(sym,string) string, foreach_nonip_out_error #undef _ }; /* *INDENT-ON* */ always_inline int is_permitted_ethertype (acl_main_t * am, int sw_if_index0, int is_output, u16 ethertype) { u16 **v = is_output ? am->output_etype_whitelist_by_sw_if_index : am->input_etype_whitelist_by_sw_if_index; u16 *whitelist = vec_elt (v, sw_if_index0); int i; if (vec_len (whitelist) == 0) return 1; for (i = 0; i < vec_len (whitelist); i++) if (whitelist[i] == ethertype) return 1; return 0; } #define get_u16(addr) ( *((u16 *)(addr)) ) always_inline uword nonip_in_out_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, int is_output) { acl_main_t *am = &acl_main; u32 n_left, *from; u16 nexts[VLIB_FRAME_SIZE], *next; vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b; vlib_node_runtime_t *error_node; from = vlib_frame_vector_args (frame); error_node = vlib_node_get_runtime (vm, node->node_index); vlib_get_buffers (vm, from, bufs, frame->n_vectors); /* set the initial values for the current buffer the next pointers */ b = bufs; next = nexts; n_left = frame->n_vectors; while (n_left > 0) { u32 next_index = 0; u32 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[is_output ? VLIB_TX : VLIB_RX]; u16 ethertype = 0; int error0 = 0; ethernet_header_t *h0 = vlib_buffer_get_current (b[0]); u8 *l3h0 = (u8 *) h0 + vnet_buffer (b[0])->l2.l2_len; ethertype = clib_net_to_host_u16 (get_u16 (l3h0 - 2)); if (is_permitted_ethertype (am, sw_if_index0, is_output, ethertype)) vnet_feature_next (&next_index, b[0]); next[0] = next_index; if (0 == next[0]) b[0]->error = error_node->errors[error0]; if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && (b[0]->flags & VLIB_BUFFER_IS_TRACED))) { nonip_in_out_trace_t *t = vlib_add_trace (vm, node, b[0], sizeof (*t)); t->sw_if_index = sw_if_index0; t->ethertype = ethertype; t->next_index = next[0]; } next[0] = next[0] < node->n_next_nodes ? next[0] : 0; next++; b++; n_left--; } vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors); return frame->n_vectors; } VLIB_NODE_FN (acl_in_nonip_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return nonip_in_out_node_fn (vm, node, frame, 0); } VLIB_NODE_FN (acl_out_nonip_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return nonip_in_out_node_fn (vm, node, frame, 1); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (acl_in_nonip_node) = { .name = "acl-plugin-in-nonip-l2", .vector_size = sizeof (u32), .format_trace = format_l2_nonip_in_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN (fa_in_nonip_error_strings), .error_strings = fa_in_nonip_error_strings, .n_next_nodes = AC
/*
 * Copyright (c) 2017-2019 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef __included_tw_timer_16t_2w_512sl_h__
#define __included_tw_timer_16t_2w_512sl_h__

/* ... So that a client app can create multiple wheel geometries */
#undef TW_TIMER_WHEELS
#undef TW_SLOTS_PER_RING
#undef TW_RING_SHIFT
#undef TW_RING_MASK
#undef TW_TIMERS_PER_OBJECT
#undef LOG2_TW_TIMERS_PER_OBJECT
#undef TW_SUFFIX
#undef TW_OVERFLOW_VECTOR
#undef TW_FAST_WHEEL_BITMAP
#undef TW_TIMER_ALLOW_DUPLICATE_STOP
#undef TW_START_STOP_TRACE_SIZE

#define TW_TIMER_WHEELS 2
#define TW_SLOTS_PER_RING 512
#define TW_RING_SHIFT 9
#define TW_RING_MASK (TW_SLOTS_PER_RING -1)
#define TW_TIMERS_PER_OBJECT 16
#define LOG2_TW_TIMERS_PER_OBJECT 4
#define TW_SUFFIX _16t_2w_512sl
#define TW_FAST_WHEEL_BITMAP 0
#define TW_TIMER_ALLOW_DUPLICATE_STOP 1

#include <vppinfra/tw_timer_template.h>

#endif /* __included_tw_timer_16t_2w_512sl_h__ */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
on prediction * is wrong, correct it. */ if ((f_sess_id_next.as_u64 != ~0ULL) && 0 == memcmp (&fa_5tuple[1], &fa_5tuple[0], sizeof (fa_5tuple[1]))) f_sess_id_next.as_u64 = ~0ULL; } } } } if (acl_check_needed) { if (is_input) lc_index0 = am->input_lc_index_by_sw_if_index[sw_if_index[0]]; else lc_index0 = am->output_lc_index_by_sw_if_index[sw_if_index[0]]; action = 0; /* deny by default */ int is_match = acl_plugin_match_5tuple_inline (am, lc_index0, (fa_5tuple_opaque_t *) & fa_5tuple[0], is_ip6, &action, &match_acl_pos, &match_acl_in_index, &match_rule_index, &trace_bitmap); if (PREDICT_FALSE (is_match && am->interface_acl_counters_enabled)) { u32 buf_len = vlib_buffer_length_in_chain (vm, b[0]); vlib_increment_combined_counter (am->combined_acl_counters + saved_matched_acl_index, thread_index, saved_matched_ace_index, saved_packet_count, saved_byte_count); saved_matched_acl_index = match_acl_in_index; saved_matched_ace_index = match_rule_index; saved_packet_count = 1; saved_byte_count = buf_len; /* prefetch the counter that we are going to increment */ vlib_prefetch_combined_counter (am->combined_acl_counters + saved_matched_acl_index, thread_index, saved_matched_ace_index); } b[0]->error = error_node->errors[action]; if (1 == action) pkts_acl_permit++; if (2 == action) { if (!acl_fa_can_add_session (am, is_input, sw_if_index[0])) acl_fa_try_recycle_session (am, is_input, thread_index, sw_if_index[0], now); if (acl_fa_can_add_session (am, is_input, sw_if_index[0])) { u16 current_policy_epoch = get_current_policy_epoch (am, is_input, sw_if_index[0]); fa_full_session_id_t f_sess_id = acl_fa_add_session (am, is_input, is_ip6, sw_if_index[0], now, &fa_5tuple[0], current_policy_epoch); /* perform the accounting for the newly added session */ process_established_session (vm, am, node->node_index, is_input, now, f_sess_id, &sw_if_index[0], &fa_5tuple[0], b[0]->current_length, node_trace_on, &trace_bitmap); pkts_new_session++; /* * If the next 5tuple is the same and we just added the session, * the f_sess_id_next can not be ~0. Correct it. */ if ((f_sess_id_next.as_u64 == ~0ULL) && 0 == memcmp (&fa_5tuple[1], &fa_5tuple[0], sizeof (fa_5tuple[1]))) f_sess_id_next = f_sess_id; } else { action = 0; b[0]->error = error_node->errors [ACL_FA_ERROR_ACL_TOO_MANY_SESSIONS]; } } } { u32 next0; /* speculatively get the next0 */ vnet_feature_next (&next0, b[0]); /* if the action is not deny - then use that next */ next[0] = action ? next0 : 0; } if (node_trace_on) // PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE)) { maybe_trace_buffer (vm, node, b[0], sw_if_index[0], lc_index0, next[0], match_acl_in_index, match_rule_index, &fa_5tuple[0], action, trace_bitmap); } next++; b++; fa_5tuple++; sw_if_index++; hash++; n_left -= 1; } } vlib_buffer_enqueue_to_next (vm, node, from, pw->nexts, frame->n_vectors); /* * if we were had an acl match then we have a counter to increment. * else it is all zeroes, so this will be harmless. */ vlib_increment_combined_counter (am->combined_acl_counters + saved_matched_acl_index, thread_index, saved_matched_ace_index, saved_packet_count, saved_byte_count); vlib_node_increment_counter (vm, node->node_index, ACL_FA_ERROR_ACL_CHECK, frame->n_vectors); vlib_node_increment_counter (vm, node->node_index, ACL_FA_ERROR_ACL_EXIST_SESSION, pkts_exist_session); vlib_node_increment_counter (vm, node->node_index, ACL_FA_ERROR_ACL_NEW_SESSION, pkts_new_session); vlib_node_increment_counter (vm, node->node_index, ACL_FA_ERROR_ACL_PERMIT, pkts_acl_permit); return frame->n_vectors; } always_inline uword acl_fa_outer_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, int is_ip6, int is_input, int is_l2_path, int do_stateful_datapath) { acl_main_t *am = &acl_main; acl_fa_node_common_prepare_fn (vm, node, frame, is_ip6, is_input, is_l2_path, do_stateful_datapath); if (am->reclassify_sessions) { if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE)) return acl_fa_inner_node_fn (vm, node, frame, is_ip6, is_input, is_l2_path, do_stateful_datapath, 1 /* trace */ , 1 /* reclassify */ ); else return acl_fa_inner_node_fn (vm, node, frame, is_ip6, is_input, is_l2_path, do_stateful_datapath, 0, 1 /* reclassify */ ); } else { if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE)) return acl_fa_inner_node_fn (vm, node, frame, is_ip6, is_input, is_l2_path, do_stateful_datapath, 1 /* trace */ , 0); else return acl_fa_inner_node_fn (vm, node, frame, is_ip6, is_input, is_l2_path, do_stateful_datapath, 0, 0); } } always_inline uword acl_fa_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, int is_ip6, int is_input, int is_l2_path) { /* select the reclassify/no-reclassify version of the datapath */ acl_main_t *am = &acl_main; if (am->fa_sessions_hash_is_initialized) return acl_fa_outer_node_fn (vm, node, frame, is_ip6, is_input, is_l2_path, 1); else return acl_fa_outer_node_fn (vm, node, frame, is_ip6, is_input, is_l2_path, 0); } static u8 * format_fa_5tuple (u8 * s, va_list * args) { fa_5tuple_t *p5t = va_arg (*args, fa_5tuple_t *); void *paddr0; void *paddr1; void *format_address_func; void *ip_af; void *ip_frag_txt = p5t->pkt.is_nonfirst_fragment ? " non-initial fragment" : ""; if (p5t->pkt.is_ip6) { ip_af = "ip6"; format_address_func = format_ip6_address; paddr0 = &p5t->ip6_addr[0]; paddr1 = &p5t->ip6_addr[1]; } else { ip_af = "ip4"; format_address_func = format_ip4_address; paddr0 = &p5t->ip4_addr[0]; paddr1 = &p5t->ip4_addr[1]; } s = format (s, "lc_index %d l3 %s%s ", p5t->pkt.lc_index, ip_af, ip_frag_txt); s = format (s, "%U -> %U ", format_address_func, paddr0, format_address_func, paddr1); s = format (s, "%U ", format_fa_session_l4_key, &p5t->l4); s = format (s, "tcp flags (%s) %02x rsvd %x", p5t->pkt.tcp_flags_valid ? "valid" : "invalid", p5t->pkt.tcp_flags, p5t->pkt.flags_reserved); return s; } #ifndef CLIB_MARCH_VARIANT u8 * format_acl_plugin_5tuple (u8 * s, va_list * args) { return format_fa_5tuple (s, args); } #endif /* packet trace format function */ static u8 * format_acl_plugin_trace (u8 * s, va_list * args) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); acl_fa_trace_t *t = va_arg (*args, acl_fa_trace_t *); s = format (s, "acl-plugin: lc_index: %d, sw_if_index %d, next index %d, action: %d, match: acl %d rule %d trace_bits %08x\n" " pkt info %016llx %016llx %016llx %016llx %016llx %016llx", t->lc_index, t->sw_if_index, t->next_index, t->action, t->match_acl_in_index, t->match_rule_index, t->trace_bitmap, t->packet_info[0], t->packet_info[1], t->packet_info[2], t->packet_info[3], t->packet_info[4], t->packet_info[5]); /* Now also print out the packet_info in a form usable by humans */ s = format (s, "\n %U", format_fa_5tuple, t->packet_info); return s; } /* *INDENT-OFF* */ static char *acl_fa_error_strings[] = { #define _(sym,string) string, foreach_acl_fa_error #undef _ }; VLIB_NODE_FN (acl_in_l2_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return acl_fa_node_fn (vm, node, frame, 1, 1, 1); } VLIB_NODE_FN (acl_in_l2_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return acl_fa_node_fn (vm, node, frame, 0, 1, 1); } VLIB_NODE_FN (acl_out_l2_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return acl_fa_node_fn (vm, node, frame, 1, 0, 1); } VLIB_NODE_FN (acl_out_l2_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return acl_fa_node_fn (vm, node, frame, 0, 0, 1); } /**** L3 processing path nodes ****/ VLIB_NODE_FN (acl_in_fa_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return acl_fa_node_fn (vm, node, frame, 1, 1, 0); } VLIB_NODE_FN (acl_in_fa_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return acl_fa_node_fn (vm, node, frame, 0, 1, 0); } VLIB_NODE_FN (acl_out_fa_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return acl_fa_node_fn (vm, node, frame, 1, 0, 0); } VLIB_NODE_FN (acl_out_fa_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return acl_fa_node_fn (vm, node, frame, 0, 0, 0); } VLIB_REGISTER_NODE (acl_in_l2_ip6_node) = { .name = "acl-plugin-in-ip6-l2", .vector_size = sizeof (u32), .format_trace = format_acl_plugin_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN (acl_fa_error_strings), .error_strings = acl_fa_error_strings, .n_next_nodes = ACL_FA_N_NEXT, .next_nodes = { [ACL_FA_ERROR_DROP] = "error-drop", } }; VNET_FEATURE_INIT (acl_in_l2_ip6_fa_feature, static) = { .arc_name = "l2-input-ip6", .node_name = "acl-plugin-in-ip6-l2", .runs_before = VNET_FEATURES ("l2-input-feat-arc-end"), }; VLIB_REGISTER_NODE (acl_in_l2_ip4_node) = { .name = "acl-plugin-in-ip4-l2", .vector_size = sizeof (u32), .format_trace = format_acl_plugin_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN (acl_fa_error_strings), .error_strings = acl_fa_error_strings, .n_next_nodes = ACL_FA_N_NEXT, .next_nodes = { [ACL_FA_ERROR_DROP] = "error-drop", } }; VNET_FEATURE_INIT (acl_in_l2_ip4_fa_feature, static) = { .arc_name = "l2-input-ip4", .node_name = "acl-plugin-in-ip4-l2", .runs_before = VNET_FEATURES ("l2-input-feat-arc-end"), }; VLIB_REGISTER_NODE (acl_out_l2_ip6_node) = { .name = "acl-plugin-out-ip6-l2", .vector_size = sizeof (u32), .format_trace = format_acl_plugin_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN (acl_fa_error_strings), .error_strings = acl_fa_error_strings, .n_next_nodes = ACL_FA_N_NEXT, .next_nodes = { [ACL_FA_ERROR_DROP] = "error-drop", } }; VNET_FEATURE_INIT (acl_out_l2_ip6_fa_feature, static) = { .arc_name = "l2-output-ip6", .node_name = "acl-plugin-out-ip6-l2", .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"), }; VLIB_REGISTER_NODE (acl_out_l2_ip4_node) = { .name = "acl-plugin-out-ip4-l2", .vector_size = sizeof (u32), .format_trace = format_acl_plugin_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN (acl_fa_error_strings), .error_strings = acl_fa_error_strings, .n_next_nodes = ACL_FA_N_NEXT, .next_nodes = { [ACL_FA_ERROR_DROP] = "error-drop", } }; VNET_FEATURE_INIT (acl_out_l2_ip4_fa_feature, static) = { .arc_name = "l2-output-ip4", .node_name = "acl-plugin-out-ip4-l2", .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"), }; VLIB_REGISTER_NODE (acl_in_fa_ip6_node) = { .name = "acl-plugin-in-ip6-fa", .vector_size = sizeof (u32), .format_trace = format_acl_plugin_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN (acl_fa_error_strings), .error_strings = acl_fa_error_strings, .n_next_nodes = ACL_FA_N_NEXT, .next_nodes = { [ACL_FA_ERROR_DROP] = "error-drop", } }; VNET_FEATURE_INIT (acl_in_ip6_fa_feature, static) = { .arc_name = "ip6-unicast", .node_name = "acl-plugin-in-ip6-fa", .runs_before = VNET_FEATURES ("ip6-flow-classify"), }; VLIB_REGISTER_NODE (acl_in_fa_ip4_node) = { .name = "acl-plugin-in-ip4-fa", .vector_size = sizeof (u32), .format_trace = format_acl_plugin_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN (acl_fa_error_strings), .error_strings = acl_fa_error_strings, .n_next_nodes = ACL_FA_N_NEXT, .next_nodes = { [ACL_FA_ERROR_DROP] = "error-drop", } }; VNET_FEATURE_INIT (acl_in_ip4_fa_feature, static) = { .arc_name = "ip4-unicast", .node_name = "acl-plugin-in-ip4-fa", .runs_before = VNET_FEATURES ("ip4-flow-classify"), }; VLIB_REGISTER_NODE (acl_out_fa_ip6_node) = { .name = "acl-plugin-out-ip6-fa", .vector_size = sizeof (u32), .format_trace = format_acl_plugin_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN (acl_fa_error_strings), .error_strings = acl_fa_error_strings, .n_next_nodes = ACL_FA_N_NEXT, .next_nodes = { [ACL_FA_ERROR_DROP] = "error-drop", } }; VNET_FEATURE_INIT (acl_out_ip6_fa_feature, static) = { .arc_name = "ip6-output", .node_name = "acl-plugin-out-ip6-fa", .runs_before = VNET_FEATURES ("interface-output"), }; VLIB_REGISTER_NODE (acl_out_fa_ip4_node) = { .name = "acl-plugin-out-ip4-fa", .vector_size = sizeof (u32), .format_trace = format_acl_plugin_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN (acl_fa_error_strings), .error_strings = acl_fa_error_strings, .n_next_nodes = ACL_FA_N_NEXT, /* edit / add dispositions here */ .next_nodes = { [ACL_FA_ERROR_DROP] = "error-drop", } }; VNET_FEATURE_INIT (acl_out_ip4_fa_feature, static) = { .arc_name = "ip4-output", .node_name = "acl-plugin-out-ip4-fa", .runs_before = VNET_FEATURES ("interface-output"), }; /* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */