summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorFlorin Coras <fcoras@cisco.com>2021-05-06 15:32:14 -0700
committerDamjan Marion <dmarion@me.com>2021-05-07 10:46:20 +0000
commit94a6df005d9e94f3b836ad3c93d1464b4a840499 (patch)
treec55259ac6ce0e373abf7633353c29ad74e305b26 /src
parentbd376a8ce8425f45b23074398949e8055de29280 (diff)
session: connects seg manager is always first
By convention, connects segment manager will be first. Therefore it will be the one with the first segment wherein lies the app's message queue. Saves us the trouble of allocating it on first connect, if app started by listening, and we no longer need to track if it's assignable to a listener or if it can be removed. Type: improvement Signed-off-by: Florin Coras <fcoras@cisco.com> Change-Id: Iba9a8ffaab618eeb41ec2144dcfee62d006dc7a2
Diffstat (limited to 'src')
-rw-r--r--src/plugins/hs_apps/http_server.c4
-rw-r--r--src/plugins/hs_apps/proxy.c3
-rw-r--r--src/plugins/http_static/static_server.c4
-rw-r--r--src/plugins/quic/quic.c3
-rw-r--r--src/plugins/unittest/session_test.c5
-rw-r--r--src/vnet/session/application.c8
-rw-r--r--src/vnet/session/application.h18
-rw-r--r--src/vnet/session/application_worker.c74
-rw-r--r--src/vnet/tls/tls.c2
9 files changed, 32 insertions, 89 deletions
diff --git a/src/plugins/hs_apps/http_server.c b/src/plugins/hs_apps/http_server.c
index 34892b6bd4d..a46e0a4ae13 100644
--- a/src/plugins/hs_apps/http_server.c
+++ b/src/plugins/hs_apps/http_server.c
@@ -671,8 +671,7 @@ http_server_session_connected_callback (u32 app_index, u32 api_context,
static int
http_server_add_segment_callback (u32 client_index, u64 segment_handle)
{
- clib_warning ("called...");
- return -1;
+ return 0;
}
static void
@@ -732,6 +731,7 @@ http_server_attach ()
a->session_cb_vft = &http_server_session_cb_vft;
a->options = options;
a->options[APP_OPTIONS_SEGMENT_SIZE] = segment_size;
+ a->options[APP_OPTIONS_ADD_SEGMENT_SIZE] = segment_size;
a->options[APP_OPTIONS_RX_FIFO_SIZE] =
hsm->fifo_size ? hsm->fifo_size : 8 << 10;
a->options[APP_OPTIONS_TX_FIFO_SIZE] =
diff --git a/src/plugins/hs_apps/proxy.c b/src/plugins/hs_apps/proxy.c
index 1a49a0f1f3a..18d6edd6be2 100644
--- a/src/plugins/hs_apps/proxy.c
+++ b/src/plugins/hs_apps/proxy.c
@@ -282,8 +282,7 @@ proxy_connected_callback (u32 app_index, u32 api_context,
static int
proxy_add_segment_callback (u32 client_index, u64 segment_handle)
{
- clib_warning ("called...");
- return -1;
+ return 0;
}
static int
diff --git a/src/plugins/http_static/static_server.c b/src/plugins/http_static/static_server.c
index 23860b083d8..c715dfa6fb8 100644
--- a/src/plugins/http_static/static_server.c
+++ b/src/plugins/http_static/static_server.c
@@ -1095,8 +1095,7 @@ http_static_server_session_connected_callback (u32 app_index, u32 api_context,
static int
http_static_server_add_segment_callback (u32 client_index, u64 segment_handle)
{
- clib_warning ("called...");
- return -1;
+ return 0;
}
static void
@@ -1157,6 +1156,7 @@ http_static_server_attach ()
a->session_cb_vft = &http_static_server_session_cb_vft;
a->options = options;
a->options[APP_OPTIONS_SEGMENT_SIZE] = segment_size;
+ a->options[APP_OPTIONS_ADD_SEGMENT_SIZE] = segment_size;
a->options[APP_OPTIONS_RX_FIFO_SIZE] =
hsm->fifo_size ? hsm->fifo_size : 8 << 10;
a->options[APP_OPTIONS_TX_FIFO_SIZE] =
diff --git a/src/plugins/quic/quic.c b/src/plugins/quic/quic.c
index 11c3ac2d718..5a57947325a 100644
--- a/src/plugins/quic/quic.c
+++ b/src/plugins/quic/quic.c
@@ -1451,9 +1451,6 @@ quic_start_listen (u32 quic_listen_session_index, transport_endpoint_t * tep)
ccfg = &sep->ext_cfg->crypto;
app_wrk = app_worker_get (sep->app_wrk_index);
- /* We need to call this because we call app_worker_init_connected in
- * quic_accept_stream, which assumes the connect segment manager exists */
- app_worker_alloc_connects_segment_manager (app_wrk);
app = application_get (app_wrk->app_index);
QUIC_DBG (2, "Called quic_start_listen for app %d", app_wrk->app_index);
diff --git a/src/plugins/unittest/session_test.c b/src/plugins/unittest/session_test.c
index cd99b0c5416..ad9c976f49e 100644
--- a/src/plugins/unittest/session_test.c
+++ b/src/plugins/unittest/session_test.c
@@ -337,6 +337,9 @@ session_test_endpoint_cfg (vlib_main_t * vm, unformat_input_t * input)
attach_args.name = format (0, "session_test_server");
attach_args.namespace_id = appns_id;
+ /* Allow server to allocate another segment for listens. Needed
+ * because by default we do not allow segment additions */
+ attach_args.options[APP_OPTIONS_ADD_SEGMENT_SIZE] = 32 << 20;
attach_args.options[APP_OPTIONS_NAMESPACE_SECRET] = placeholder_secret;
error = vnet_application_attach (&attach_args);
SESSION_TEST ((error == 0), "server app attached: %U", format_clib_error,
@@ -1839,7 +1842,7 @@ session_test_mq_speed (vlib_main_t * vm, unformat_input_t * input)
SESSION_TEST (prod_fd != -1, "mq producer eventd valid %u", prod_fd);
}
- sm = app_worker_get_or_alloc_connect_segment_manager (app_wrk);
+ sm = app_worker_get_connect_segment_manager (app_wrk);
segment_manager_alloc_session_fifos (sm, 0, &rx_fifo, &tx_fifo);
s.rx_fifo = rx_fifo;
s.tx_fifo = tx_fifo;
diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c
index 2f6bfa97892..8abec7797ec 100644
--- a/src/vnet/session/application.c
+++ b/src/vnet/session/application.c
@@ -1008,7 +1008,7 @@ application_alloc_worker_and_init (application_t * app, app_worker_t ** wrk)
/*
* Setup app worker
*/
- app_wrk->first_segment_manager = segment_manager_index (sm);
+ app_wrk->connects_seg_manager = segment_manager_index (sm);
app_wrk->listeners_table = hash_create (0, sizeof (u64));
app_wrk->event_queue = segment_manager_event_queue (sm);
app_wrk->app_is_builtin = application_is_builtin (app);
@@ -1041,7 +1041,7 @@ vnet_app_worker_add_del (vnet_app_worker_add_del_args_t * a)
app_wrk->api_client_index = a->api_client_index;
application_api_table_add (app->app_index, a->api_client_index);
- sm = segment_manager_get (app_wrk->first_segment_manager);
+ sm = segment_manager_get (app_wrk->connects_seg_manager);
fs = segment_manager_get_segment_w_lock (sm, 0);
a->segment = &fs->ssvm;
a->segment_handle = segment_manager_segment_handle (sm, fs);
@@ -1157,7 +1157,7 @@ vnet_application_attach (vnet_app_attach_args_t * a)
a->app_evt_q = app_wrk->event_queue;
app_wrk->api_client_index = a->api_client_index;
- sm = segment_manager_get (app_wrk->first_segment_manager);
+ sm = segment_manager_get (app_wrk->connects_seg_manager);
fs = segment_manager_get_segment_w_lock (sm, 0);
if (application_is_proxy (app))
@@ -1165,7 +1165,7 @@ vnet_application_attach (vnet_app_attach_args_t * a)
application_setup_proxy (app);
/* The segment manager pool is reallocated because a new listener
* is added. Re-grab segment manager to avoid dangling reference */
- sm = segment_manager_get (app_wrk->first_segment_manager);
+ sm = segment_manager_get (app_wrk->connects_seg_manager);
}
ASSERT (vec_len (fs->ssvm.name) <= 128);
diff --git a/src/vnet/session/application.h b/src/vnet/session/application.h
index 0bfd4d1d813..a8ddfec07f5 100644
--- a/src/vnet/session/application.h
+++ b/src/vnet/session/application.h
@@ -45,20 +45,17 @@ typedef struct app_worker_
/** Application listens for events on this svm queue */
svm_msg_q_t *event_queue;
- /** Segment manager used for outgoing connects issued by the app */
+ /**
+ * Segment manager used for outgoing connects issued by the app. By
+ * convention this is the first segment manager allocated by the worker
+ * so it's also the one that holds the first segment with the app's
+ * message queue in it.
+ */
u32 connects_seg_manager;
/** Lookup tables for listeners. Value is segment manager index */
uword *listeners_table;
- /**
- * First segment manager has in the the first segment the application's
- * event fifo. Depending on what the app does, it may be either used for
- * a listener or for connects.
- */
- u32 first_segment_manager;
- u8 first_segment_manager_in_use;
-
/** API index for the worker. Needed for multi-process apps */
u32 api_client_index;
@@ -339,9 +336,6 @@ int app_worker_session_fifo_tuning (app_worker_t * app_wrk, session_t * s,
segment_manager_t *app_worker_get_listen_segment_manager (app_worker_t *,
session_t *);
segment_manager_t *app_worker_get_connect_segment_manager (app_worker_t *);
-segment_manager_t
- * app_worker_get_or_alloc_connect_segment_manager (app_worker_t *);
-int app_worker_alloc_connects_segment_manager (app_worker_t * app);
int app_worker_add_segment_notify (app_worker_t * app_wrk,
u64 segment_handle);
int app_worker_del_segment_notify (app_worker_t * app_wrk,
diff --git a/src/vnet/session/application_worker.c b/src/vnet/session/application_worker.c
index 8c3be22d4dc..7e03171b115 100644
--- a/src/vnet/session/application_worker.c
+++ b/src/vnet/session/application_worker.c
@@ -32,7 +32,6 @@ app_worker_alloc (application_t * app)
app_wrk->app_index = app->app_index;
app_wrk->wrk_map_index = ~0;
app_wrk->connects_seg_manager = APP_INVALID_SEGMENT_MANAGER_INDEX;
- app_wrk->first_segment_manager = APP_INVALID_SEGMENT_MANAGER_INDEX;
clib_spinlock_init (&app_wrk->detached_seg_managers_lock);
APP_DBG ("New app %v worker %u", app->name, app_wrk->wrk_index);
return app_wrk;
@@ -143,17 +142,6 @@ app_worker_free (app_worker_t * app_wrk)
vec_free (app_wrk->detached_seg_managers);
clib_spinlock_free (&app_wrk->detached_seg_managers_lock);
- /* If first segment manager is used by a listener that recently
- * stopped listening, mark it as detached */
- if (app_wrk->first_segment_manager != app_wrk->connects_seg_manager
- && (sm = segment_manager_get_if_valid (app_wrk->first_segment_manager))
- && !segment_manager_app_detached (sm))
- {
- sm->first_is_protected = 0;
- sm->app_wrk_index = SEGMENT_MANAGER_INVALID_APP_INDEX;
- segment_manager_init_free (sm);
- }
-
if (CLIB_DEBUG)
clib_memset (app_wrk, 0xfe, sizeof (*app_wrk));
pool_put (app_workers, app_wrk);
@@ -172,19 +160,9 @@ app_worker_get_app (u32 wrk_index)
static segment_manager_t *
app_worker_alloc_segment_manager (app_worker_t * app_wrk)
{
- segment_manager_t *sm = 0;
+ segment_manager_t *sm;
- /* If the first segment manager is not in use, don't allocate a new one */
- if (app_wrk->first_segment_manager != APP_INVALID_SEGMENT_MANAGER_INDEX
- && app_wrk->first_segment_manager_in_use == 0)
- {
- sm = segment_manager_get (app_wrk->first_segment_manager);
- app_wrk->first_segment_manager_in_use = 1;
- }
- else
- {
- sm = segment_manager_alloc ();
- }
+ sm = segment_manager_alloc ();
sm->app_wrk_index = app_wrk->wrk_index;
segment_manager_init (sm);
return sm;
@@ -309,20 +287,21 @@ app_worker_stop_listen_session (app_worker_t * app_wrk, session_t * ls)
/* Try to cleanup segment manager */
sm = segment_manager_get (*sm_indexp);
- if (sm && segment_manager_has_fifos (sm))
- {
- /* Delete sessions in CREATED state */
- vec_add1 (states, SESSION_STATE_CREATED);
- segment_manager_del_sessions_filter (sm, states);
- vec_free (states);
- }
- if (sm && app_wrk->first_segment_manager != *sm_indexp)
+ if (sm)
{
segment_manager_app_detach (sm);
if (!segment_manager_has_fifos (sm))
- segment_manager_free (sm);
+ {
+ /* Empty segment manager, cleanup it up */
+ segment_manager_free (sm);
+ }
else
{
+ /* Delete sessions in CREATED state */
+ vec_add1 (states, SESSION_STATE_CREATED);
+ segment_manager_del_sessions_filter (sm, states);
+ vec_free (states);
+
/* Track segment manager in case app detaches and all the
* outstanding sessions need to be closed */
app_worker_add_detached_sm (app_wrk, *sm_indexp);
@@ -534,7 +513,7 @@ app_worker_own_session (app_worker_t * app_wrk, session_t * s)
s->rx_fifo = 0;
s->tx_fifo = 0;
- sm = app_worker_get_or_alloc_connect_segment_manager (app_wrk);
+ sm = app_worker_get_connect_segment_manager (app_wrk);
if (app_worker_alloc_session_fifos (sm, s))
return -1;
@@ -555,10 +534,6 @@ app_worker_connect_session (app_worker_t * app_wrk, session_endpoint_t * sep,
{
int rv;
- /* Make sure we have a segment manager for connects */
- if (app_worker_alloc_connects_segment_manager (app_wrk))
- return SESSION_E_ALLOC;
-
if ((rv = session_open (app_wrk->wrk_index, sep, api_context)))
return rv;
@@ -574,21 +549,6 @@ app_worker_session_fifo_tuning (app_worker_t * app_wrk, session_t * s,
return app->cb_fns.fifo_tuning_callback (s, f, act, len);
}
-int
-app_worker_alloc_connects_segment_manager (app_worker_t * app_wrk)
-{
- segment_manager_t *sm;
-
- if (app_wrk->connects_seg_manager == APP_INVALID_SEGMENT_MANAGER_INDEX)
- {
- sm = app_worker_alloc_segment_manager (app_wrk);
- if (sm == 0)
- return -1;
- app_wrk->connects_seg_manager = segment_manager_index (sm);
- }
- return 0;
-}
-
segment_manager_t *
app_worker_get_connect_segment_manager (app_worker_t * app)
{
@@ -597,14 +557,6 @@ app_worker_get_connect_segment_manager (app_worker_t * app)
}
segment_manager_t *
-app_worker_get_or_alloc_connect_segment_manager (app_worker_t * app_wrk)
-{
- if (app_wrk->connects_seg_manager == (u32) ~ 0)
- app_worker_alloc_connects_segment_manager (app_wrk);
- return segment_manager_get (app_wrk->connects_seg_manager);
-}
-
-segment_manager_t *
app_worker_get_listen_segment_manager (app_worker_t * app,
session_t * listener)
{
diff --git a/src/vnet/tls/tls.c b/src/vnet/tls/tls.c
index 57dcc7fbb54..e19f8e6c040 100644
--- a/src/vnet/tls/tls.c
+++ b/src/vnet/tls/tls.c
@@ -711,7 +711,6 @@ tls_connect (transport_endpoint_cfg_t * tep)
}
tls_ctx_half_open_reader_unlock ();
- app_worker_alloc_connects_segment_manager (app_wrk);
ctx->tls_ctx_engine = engine_type;
clib_memcpy_fast (&cargs->sep, sep, sizeof (session_endpoint_t));
@@ -1138,7 +1137,6 @@ dtls_connect (transport_endpoint_cfg_t *tep)
vec_terminate_c_string (ctx->srv_hostname);
}
- app_worker_alloc_connects_segment_manager (app_wrk);
ctx->tls_ctx_engine = engine_type;
clib_memcpy_fast (&cargs->sep, sep, sizeof (session_endpoint_t));
href='#n913'>913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
/*
 * Copyright (c) 2015 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#include <vnet/ip/ip.h>
#include <vnet/classify/vnet_classify.h>
#include <vnet/classify/in_out_acl.h>

typedef struct
{
  u32 sw_if_index;
  u32 next_index;
  u32 table_index;
  u32 offset;
}
ip_in_out_acl_trace_t;

/* packet trace format function */
static u8 *
format_ip_in_out_acl_trace (u8 * s, u32 is_output, va_list * args)
{
  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
  ip_in_out_acl_trace_t *t = va_arg (*args, ip_in_out_acl_trace_t *);
  const vnet_classify_main_t *vcm = &vnet_classify_main;
  const u32 indent = format_get_indent (s);
  vnet_classify_table_t *table;
  vnet_classify_entry_t *e;

  s =
    format (s, "%s: sw_if_index %d, next_index %d, table_index %d, offset %d",
	    is_output ? "OUTACL" : "INACL", t->sw_if_index, t->next_index,
	    t->table_index, t->offset);

  if (pool_is_free_index (vcm->tables, t->table_index))
    return format (s, "\n%Uno table", format_white_space, indent + 4);

  if (~0 == t->offset)
    return format (s, "\n%Uno match", format_white_space, indent + 4);

  table = vnet_classify_table_get (t->table_index);
  e = vnet_classify_get_entry (table, t->offset);
  return format (s, "\n%U%U", format_white_space, indent + 4,
		 format_classify_entry, table, e);
}

static u8 *
format_ip_inacl_trace (u8 * s, va_list * args)
{
  return format_ip_in_out_acl_trace (s, 0 /* is_output */ , args);
}

static u8 *
format_ip_outacl_trace (u8 * s, va_list * args)
{
  return format_ip_in_out_acl_trace (s, 1 /* is_output */ , args);
}

extern vlib_node_registration_t ip4_inacl_node;
extern vlib_node_registration_t ip4_outacl_node;
extern vlib_node_registration_t ip6_inacl_node;
extern vlib_node_registration_t ip6_outacl_node;

#define foreach_ip_inacl_error                  \
_(MISS, "input ACL misses")                     \
_(HIT, "input ACL hits")                        \
_(CHAIN_HIT, "input ACL hits after chain walk")

#define foreach_ip_outacl_error                  \
_(MISS, "output ACL misses")                     \
_(HIT, "output ACL hits")                        \
_(CHAIN_HIT, "output ACL hits after chain walk")

typedef enum
{
#define _(sym,str) IP_INACL_ERROR_##sym,
  foreach_ip_inacl_error
#undef _
    IP_INACL_N_ERROR,
}
ip_inacl_error_t;

static char *ip_inacl_error_strings[] = {
#define _(sym,string) string,
  foreach_ip_inacl_error
#undef _
};

typedef enum
{
#define _(sym,str) IP_OUTACL_ERROR_##sym,
  foreach_ip_outacl_error
#undef _
    IP_OUTACL_N_ERROR,
}
ip_outacl_error_t;

static char *ip_outacl_error_strings[] = {
#define _(sym,string) string,
  foreach_ip_outacl_error
#undef _
};

static_always_inline void
ip_in_out_acl_inline_trace (
  vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame,
  vlib_buffer_t **b, u16 *next, u32 n_left, u32 *hits__, u32 *misses__,
  u32 *chain_hits__, const vlib_error_t error_none,
  const vlib_error_t error_deny, const vlib_error_t error_miss,
  vnet_classify_table_t *tables, const u32 *table_index_by_sw_if_index,
  u32 *fib_index_by_sw_if_index, vnet_config_main_t *cm,
  const vlib_rx_or_tx_t way, const int is_output, const int do_trace)
{
  f64 now = vlib_time_now (vm);
  u32 hits = 0;
  u32 misses = 0;
  u32 chain_hits = 0;
  u32 n_next_nodes = node->n_next_nodes;
  u8 *h[4];
  u32 sw_if_index[4];
  u32 table_index[4];
  vnet_classify_table_t *t[4] = { 0, 0 };
  u32 hash[4];

  /* calculate hashes for b[0] & b[1] */
  if (n_left >= 2)
    {
      /* ~0 is used as a wildcard to say 'always use sw_if_index 0'
       * aka local0. It is used when we do not care about the sw_if_index, as
       * when punting */
      sw_if_index[2] = ~0 == way ? 0 : vnet_buffer (b[0])->sw_if_index[way];
      sw_if_index[3] = ~0 == way ? 0 : vnet_buffer (b[1])->sw_if_index[way];

      table_index[2] = table_index_by_sw_if_index[sw_if_index[2]];
      table_index[3] = table_index_by_sw_if_index[sw_if_index[3]];

      t[2] = pool_elt_at_index (tables, table_index[2]);
      t[3] = pool_elt_at_index (tables, table_index[3]);

      if (t[2]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
	h[2] =
	  (void *) vlib_buffer_get_current (b[0]) + t[2]->current_data_offset;
      else
	h[2] = b[0]->data;

      if (t[3]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
	h[3] =
	  (void *) vlib_buffer_get_current (b[1]) + t[3]->current_data_offset;
      else
	h[3] = b[1]->data;

      if (is_output)
	{
	  /* Save the rewrite length, since we are using the l2_classify struct */
	  vnet_buffer (b[0])->l2.l2_len =
	    vnet_buffer (b[0])->ip.save_rewrite_length;
	  /* advance the match pointer so the matching happens on IP header */
	  h[2] += vnet_buffer (b[0])->l2.l2_len;

	  /* Save the rewrite length, since we are using the l2_classify struct */
	  vnet_buffer (b[1])->l2.l2_len =
	    vnet_buffer (b[1])->ip.save_rewrite_length;
	  /* advance the match pointer so the matching happens on IP header */
	  h[3] += vnet_buffer (b[1])->l2.l2_len;
	}

      hash[2] = vnet_classify_hash_packet_inline (t[2], (u8 *) h[2]);
      hash[3] = vnet_classify_hash_packet_inline (t[3], (u8 *) h[3]);

      vnet_buffer (b[0])->l2_classify.hash = hash[2];
      vnet_buffer (b[1])->l2_classify.hash = hash[3];

      vnet_buffer (b[0])->l2_classify.table_index = table_index[2];
      vnet_buffer (b[1])->l2_classify.table_index = table_index[3];

      vnet_buffer (b[0])->l2_classify.opaque_index = ~0;
      vnet_buffer (b[1])->l2_classify.opaque_index = ~0;

      vnet_classify_prefetch_bucket (t[2],
				     vnet_buffer (b[0])->l2_classify.hash);
      vnet_classify_prefetch_bucket (t[3],
				     vnet_buffer (b[1])->l2_classify.hash);
    }

  while (n_left >= 2)
    {
      vnet_classify_entry_t *e[2] = { 0, 0 };
      u32 _next[2] = { ACL_NEXT_INDEX_DENY, ACL_NEXT_INDEX_DENY };

      h[0] = h[2];
      h[1] = h[3];
      t[0] = t[2];
      t[1] = t[3];

      sw_if_index[0] = sw_if_index[2];
      sw_if_index[1] = sw_if_index[3];

      table_index[0] = table_index[2];
      table_index[1] = table_index[3];

      hash[0] = hash[2];
      hash[1] = hash[3];

      /* prefetch next iteration */
      if (n_left >= 6)
	{
	  vlib_prefetch_buffer_header (b[4], LOAD);
	  vlib_prefetch_buffer_header (b[5], LOAD);

	  clib_prefetch_load (b[4]->data);
	  clib_prefetch_load (b[5]->data);
	}

      /* calculate hashes for b[2] & b[3] */
      if (n_left >= 4)
	{
	  sw_if_index[2] =
	    ~0 == way ? 0 : vnet_buffer (b[2])->sw_if_index[way];
	  sw_if_index[3] =
	    ~0 == way ? 0 : vnet_buffer (b[3])->sw_if_index[way];

	  table_index[2] = table_index_by_sw_if_index[sw_if_index[2]];
	  table_index[3] = table_index_by_sw_if_index[sw_if_index[3]];

	  t[2] = pool_elt_at_index (tables, table_index[2]);
	  t[3] = pool_elt_at_index (tables, table_index[3]);

	  if (t[2]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
	    h[2] =
	      (void *) vlib_buffer_get_current (b[2]) +
	      t[2]->current_data_offset;
	  else
	    h[2] = b[2]->data;

	  if (t[3]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
	    h[3] =
	      (void *) vlib_buffer_get_current (b[3]) +
	      t[3]->current_data_offset;
	  else
	    h[3] = b[3]->data;

	  if (is_output)
	    {
	      /* Save the rewrite length, since we are using the l2_classify struct */
	      vnet_buffer (b[2])->l2.l2_len =
		vnet_buffer (b[2])->ip.save_rewrite_length;
	      /* advance the match pointer so the matching happens on IP header */
	      h[2] += vnet_buffer (b[2])->l2.l2_len;

	      /* Save the rewrite length, since we are using the l2_classify struct */
	      vnet_buffer (b[3])->l2.l2_len =
		vnet_buffer (b[3])->ip.save_rewrite_length;
	      /* advance the match pointer so the matching happens on IP header */
	      h[3] += vnet_buffer (b[3])->l2.l2_len;
	    }

	  hash[2] = vnet_classify_hash_packet_inline (t[2], (u8 *) h[2]);
	  hash[3] = vnet_classify_hash_packet_inline (t[3], (u8 *) h[3]);

	  vnet_buffer (b[2])->l2_classify.hash = hash[2];
	  vnet_buffer (b[3])->l2_classify.hash = hash[3];

	  vnet_buffer (b[2])->l2_classify.table_index = table_index[2];
	  vnet_buffer (b[3])->l2_classify.table_index = table_index[3];

	  vnet_buffer (b[2])->l2_classify.opaque_index = ~0;
	  vnet_buffer (b[3])->l2_classify.opaque_index = ~0;

	  vnet_classify_prefetch_bucket (t[2],
					 vnet_buffer (b[2])->
					 l2_classify.hash);
	  vnet_classify_prefetch_bucket (t[3],
					 vnet_buffer (b[3])->
					 l2_classify.hash);
	}

      /* find entry for b[0] & b[1] */
      vnet_get_config_data (cm, &b[0]->current_config_index, &_next[0],
			    /* # bytes of config data */ 0);
      vnet_get_config_data (cm, &b[1]->current_config_index, &_next[1],
			    /* # bytes of config data */ 0);

      if (PREDICT_TRUE (table_index[0] != ~0))
	{
	  e[0] =
	    vnet_classify_find_entry_inline (t[0], (u8 *) h[0], hash[0], now);
	  if (e[0])
	    {
	      vnet_buffer (b[0])->l2_classify.opaque_index
		= e[0]->opaque_index;
	      vlib_buffer_advance (b[0], e[0]->advance);

	      _next[0] = (e[0]->next_index < n_next_nodes) ?
		e[0]->next_index : _next[0];

	      hits++;

	      b[0]->error =
		(_next[0] == ACL_NEXT_INDEX_DENY) ? error_deny : error_none;

	      if (!is_output)
		{
		  if (e[0]->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX ||
		      e[0]->action == CLASSIFY_ACTION_SET_IP6_FIB_INDEX)
		    vnet_buffer (b[0])->sw_if_index[VLIB_TX] = e[0]->metadata;
		  else if (e[0]->action == CLASSIFY_ACTION_SET_METADATA)
		    {
		      vnet_buffer (b[0])->ip.adj_index[VLIB_TX] =
			e[0]->metadata;
		      /* For source check in case we skip the lookup node */
		      ip_lookup_set_buffer_fib_index (fib_index_by_sw_if_index,
						      b[0]);
		    }
		}
	    }
	  else
	    {
	      while (1)
		{
		  table_index[0] = t[0]->next_table_index;
		  if (PREDICT_TRUE (table_index[0] != ~0))
		    t[0] = pool_elt_at_index (tables, table_index[0]);
		  else
		    {
		      _next[0] = (t[0]->miss_next_index < n_next_nodes) ?
			t[0]->miss_next_index : _next[0];

		      misses++;

		      b[0]->error = (_next[0] == ACL_NEXT_INDEX_DENY) ?
				      error_miss :
				      error_none;
		      break;
		    }

		  if (t[0]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
		    h[0] =
		      (void *) vlib_buffer_get_current (b[0]) +
		      t[0]->current_data_offset;
		  else
		    h[0] = b[0]->data;

		  /* advance the match pointer so the matching happens on IP header */
		  if (is_output)
		    h[0] += vnet_buffer (b[0])->l2.l2_len;

		  hash[0] =
		    vnet_classify_hash_packet_inline (t[0], (u8 *) h[0]);
		  e[0] =
		    vnet_classify_find_entry_inline (t[0], (u8 *) h[0],
						     hash[0], now);
		  if (e[0])
		    {
		      vnet_buffer (b[0])->l2_classify.opaque_index
			= e[0]->opaque_index;
		      vlib_buffer_advance (b[0], e[0]->advance);
		      _next[0] = (e[0]->next_index < n_next_nodes) ?
			e[0]->next_index : _next[0];
		      hits++;
		      chain_hits++;

		      b[0]->error = (_next[0] == ACL_NEXT_INDEX_DENY) ?
				      error_deny :
				      error_none;

		      if (!is_output)
			{
			  if (e[0]->action ==
			      CLASSIFY_ACTION_SET_IP4_FIB_INDEX
			      || e[0]->action ==
			      CLASSIFY_ACTION_SET_IP6_FIB_INDEX)
			    vnet_buffer (b[0])->sw_if_index[VLIB_TX] =
			      e[0]->metadata;
			  else if (e[0]->action ==
				   CLASSIFY_ACTION_SET_METADATA)
			    {
			      vnet_buffer (b[0])->ip.adj_index[VLIB_TX] =
				e[0]->metadata;
			      /* For source check in case we skip the lookup
			       * node */
			      ip_lookup_set_buffer_fib_index (
				fib_index_by_sw_if_index, b[0]);
			    }
			}
		      break;
		    }
		}
	    }
	}

      if (PREDICT_TRUE (table_index[1] != ~0))
	{
	  e[1] =
	    vnet_classify_find_entry_inline (t[1], (u8 *) h[1], hash[1], now);
	  if (e[1])
	    {
	      vnet_buffer (b[1])->l2_classify.opaque_index
		= e[1]->opaque_index;
	      vlib_buffer_advance (b[1], e[1]->advance);

	      _next[1] = (e[1]->next_index < n_next_nodes) ?
		e[1]->next_index : _next[1];

	      hits++;

	      b[1]->error =
		(_next[1] == ACL_NEXT_INDEX_DENY) ? error_deny : error_none;

	      if (!is_output)
		{
		  if (e[1]->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX ||
		      e[1]->action == CLASSIFY_ACTION_SET_IP6_FIB_INDEX)
		    vnet_buffer (b[1])->sw_if_index[VLIB_TX] = e[1]->metadata;
		  else if (e[1]->action == CLASSIFY_ACTION_SET_METADATA)
		    {
		      vnet_buffer (b[1])->ip.adj_index[VLIB_TX] =
			e[1]->metadata;
		      /* For source check in case we skip the lookup node */
		      ip_lookup_set_buffer_fib_index (fib_index_by_sw_if_index,
						      b[1]);
		    }
		}
	    }
	  else
	    {
	      while (1)
		{
		  table_index[1] = t[1]->next_table_index;
		  if (PREDICT_TRUE (table_index[1] != ~0))
		    t[1] = pool_elt_at_index (tables, table_index[1]);
		  else
		    {
		      _next[1] = (t[1]->miss_next_index < n_next_nodes) ?
			t[1]->miss_next_index : _next[1];

		      misses++;

		      b[1]->error = (_next[1] == ACL_NEXT_INDEX_DENY) ?
				      error_miss :
				      error_none;
		      break;
		    }

		  if (t[1]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
		    h[1] =
		      (void *) vlib_buffer_get_current (b[1]) +
		      t[1]->current_data_offset;
		  else
		    h[1] = b[1]->data;

		  /* advance the match pointer so the matching happens on IP header */
		  if (is_output)
		    h[1] += vnet_buffer (b[1])->l2.l2_len;

		  hash[1] =
		    vnet_classify_hash_packet_inline (t[1], (u8 *) h[1]);
		  e[1] =
		    vnet_classify_find_entry_inline (t[1], (u8 *) h[1],
						     hash[1], now);
		  if (e[1])
		    {
		      vnet_buffer (b[1])->l2_classify.opaque_index
			= e[1]->opaque_index;
		      vlib_buffer_advance (b[1], e[1]->advance);
		      _next[1] = (e[1]->next_index < n_next_nodes) ?
			e[1]->next_index : _next[1];
		      hits++;
		      chain_hits++;

		      b[1]->error = (_next[1] == ACL_NEXT_INDEX_DENY) ?
				      error_deny :
				      error_none;

		      if (!is_output)
			{
			  if (e[1]->action ==
			      CLASSIFY_ACTION_SET_IP4_FIB_INDEX
			      || e[1]->action ==
			      CLASSIFY_ACTION_SET_IP6_FIB_INDEX)
			    vnet_buffer (b[1])->sw_if_index[VLIB_TX] =
			      e[1]->metadata;
			  else if (e[1]->action ==
				   CLASSIFY_ACTION_SET_METADATA)
			    {
			      vnet_buffer (b[1])->ip.adj_index[VLIB_TX] =
				e[1]->metadata;
			      /* For source check in case we skip the lookup
			       * node */
			      ip_lookup_set_buffer_fib_index (
				fib_index_by_sw_if_index, b[1]);
			    }
			}
		      break;
		    }
		}
	    }
	}

      if (do_trace && b[0]->flags & VLIB_BUFFER_IS_TRACED)
	{
	  ip_in_out_acl_trace_t *_t =
	    vlib_add_trace (vm, node, b[0], sizeof (*_t));
	  _t->sw_if_index =
	    ~0 == way ? 0 : vnet_buffer (b[0])->sw_if_index[way];
	  _t->next_index = _next[0];
	  _t->table_index = table_index[0];
	  _t->offset = (e[0]
			&& t[0]) ? vnet_classify_get_offset (t[0], e[0]) : ~0;
	}

      if (do_trace && b[1]->flags & VLIB_BUFFER_IS_TRACED)
	{
	  ip_in_out_acl_trace_t *_t =
	    vlib_add_trace (vm, node, b[1], sizeof (*_t));
	  _t->sw_if_index =
	    ~0 == way ? 0 : vnet_buffer (b[1])->sw_if_index[way];
	  _t->next_index = _next[1];
	  _t->table_index = table_index[1];
	  _t->offset = (e[1]
			&& t[1]) ? vnet_classify_get_offset (t[1], e[1]) : ~0;
	}

      if ((_next[0] == ACL_NEXT_INDEX_DENY) && is_output)
	{
	  /* on output, for the drop node to work properly, go back to ip header */
	  vlib_buffer_advance (b[0], vnet_buffer (b[0])->l2.l2_len);
	}

      if ((_next[1] == ACL_NEXT_INDEX_DENY) && is_output)
	{
	  /* on output, for the drop node to work properly, go back to ip header */
	  vlib_buffer_advance (b[1], vnet_buffer (b[1])->l2.l2_len);
	}

      next[0] = _next[0];
      next[1] = _next[1];

      /* _next */
      next += 2;
      b += 2;
      n_left -= 2;
    }

  while (n_left > 0)
    {
      u8 *h0;
      u32 sw_if_index0;
      u32 table_index0;
      vnet_classify_table_t *t0 = 0;
      vnet_classify_entry_t *e0 = 0;
      u32 next0 = ACL_NEXT_INDEX_DENY;
      u32 hash0;

      sw_if_index0 = ~0 == way ? 0 : vnet_buffer (b[0])->sw_if_index[way];
      table_index0 = table_index_by_sw_if_index[sw_if_index0];

      t0 = pool_elt_at_index (tables, table_index0);

      if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
	h0 =
	  (void *) vlib_buffer_get_current (b[0]) + t0->current_data_offset;
      else
	h0 = b[0]->data;

      if (is_output)
	{
	  /* Save the rewrite length, since we are using the l2_classify struct */
	  vnet_buffer (b[0])->l2.l2_len =
	    vnet_buffer (b[0])->ip.save_rewrite_length;
	  /* advance the match pointer so the matching happens on IP header */
	  h0 += vnet_buffer (b[0])->l2.l2_len;
	}

      vnet_buffer (b[0])->l2_classify.hash =
	vnet_classify_hash_packet (t0, (u8 *) h0);

      vnet_buffer (b[0])->l2_classify.table_index = table_index0;
      vnet_buffer (b[0])->l2_classify.opaque_index = ~0;

      vnet_get_config_data (cm, &b[0]->current_config_index, &next0,
			    /* # bytes of config data */ 0);

      if (PREDICT_TRUE (table_index0 != ~0))
	{
	  hash0 = vnet_buffer (b[0])->l2_classify.hash;
	  t0 = pool_elt_at_index (tables, table_index0);

	  if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
	    h0 =
	      (void *) vlib_buffer_get_current (b[0]) +
	      t0->current_data_offset;
	  else
	    h0 = b[0]->data;

	  /* advance the match pointer so the matching happens on IP header */
	  if (is_output)
	    h0 += vnet_buffer (b[0])->l2.l2_len;

	  e0 = vnet_classify_find_entry_inline (t0, (u8 *) h0, hash0, now);
	  if (e0)
	    {
	      vnet_buffer (b[0])->l2_classify.opaque_index = e0->opaque_index;
	      vlib_buffer_advance (b[0], e0->advance);

	      next0 = (e0->next_index < n_next_nodes) ?
		e0->next_index : next0;

	      hits++;

	      b[0]->error =
		(next0 == ACL_NEXT_INDEX_DENY) ? error_deny : error_none;

	      if (!is_output)
		{
		  if (e0->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX ||
		      e0->action == CLASSIFY_ACTION_SET_IP6_FIB_INDEX)
		    vnet_buffer (b[0])->sw_if_index[VLIB_TX] = e0->metadata;
		  else if (e0->action == CLASSIFY_ACTION_SET_METADATA)
		    {
		      vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = e0->metadata;
		      /* For source check in case we skip the lookup node */
		      ip_lookup_set_buffer_fib_index (fib_index_by_sw_if_index,
						      b[0]);
		    }
		}
	    }
	  else
	    {
	      while (1)
		{
		  table_index0 = t0->next_table_index;
		  if (PREDICT_TRUE (table_index0 != ~0))
		    t0 = pool_elt_at_index (tables, table_index0);
		  else
		    {
		      next0 = (t0->miss_next_index < n_next_nodes) ?
			t0->miss_next_index : next0;

		      misses++;

		      b[0]->error = (next0 == ACL_NEXT_INDEX_DENY) ?
				      error_miss :
				      error_none;
		      break;
		    }

		  if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
		    h0 =
		      (void *) vlib_buffer_get_current (b[0]) +
		      t0->current_data_offset;
		  else
		    h0 = b[0]->data;

		  /* advance the match pointer so the matching happens on IP header */
		  if (is_output)
		    h0 += vnet_buffer (b[0])->l2.l2_len;

		  hash0 = vnet_classify_hash_packet_inline (t0, (u8 *) h0);
		  e0 = vnet_classify_find_entry_inline
		    (t0, (u8 *) h0, hash0, now);
		  if (e0)
		    {
		      vnet_buffer (b[0])->l2_classify.opaque_index
			= e0->opaque_index;
		      vlib_buffer_advance (b[0], e0->advance);
		      next0 = (e0->next_index < n_next_nodes) ?
			e0->next_index : next0;
		      hits++;

		      b[0]->error = (next0 == ACL_NEXT_INDEX_DENY) ?
				      error_deny :
				      error_none;

		      if (!is_output)
			{
			  if (e0->action ==
			      CLASSIFY_ACTION_SET_IP4_FIB_INDEX
			      || e0->action ==
			      CLASSIFY_ACTION_SET_IP6_FIB_INDEX)
			    vnet_buffer (b[0])->sw_if_index[VLIB_TX] =
			      e0->metadata;
			  else if (e0->action == CLASSIFY_ACTION_SET_METADATA)
			    {
			      vnet_buffer (b[0])->ip.adj_index[VLIB_TX] =
				e0->metadata;
			      /* For source check in case we skip the lookup
			       * node */
			      ip_lookup_set_buffer_fib_index (
				fib_index_by_sw_if_index, b[0]);
			    }
			}
		      break;
		    }
		}
	    }
	}

      if (do_trace && b[0]->flags & VLIB_BUFFER_IS_TRACED)
	{
	  ip_in_out_acl_trace_t *t =
	    vlib_add_trace (vm, node, b[0], sizeof (*t));
	  t->sw_if_index =
	    ~0 == way ? 0 : vnet_buffer (b[0])->sw_if_index[way];
	  t->next_index = next0;
	  t->table_index = table_index0;
	  t->offset = (e0 && t0) ? vnet_classify_get_offset (t0, e0) : ~0;
	}

      if ((next0 == ACL_NEXT_INDEX_DENY) && is_output)
	{
	  /* on output, for the drop node to work properly, go back to ip header */
	  vlib_buffer_advance (b[0], vnet_buffer (b[0])->l2.l2_len);
	}

      next[0] = next0;

      /* next */
      next++;
      b++;
      n_left--;
    }

  *hits__ = hits;
  *misses__ = misses;
  *chain_hits__ = chain_hits;
}

static_always_inline uword
ip_in_out_acl_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
		      vlib_frame_t *frame, const in_out_acl_table_id_t tid,
		      u32 *fib_index_by_sw_if_index,
		      const vlib_node_registration_t *parent_error_node,
		      const u32 error_none_index, const u32 error_deny_index,
		      const u32 error_miss_index, const vlib_rx_or_tx_t way,
		      const int is_output)
{
  const in_out_acl_main_t *am = &in_out_acl_main;
  vnet_classify_table_t *tables = am->vnet_classify_main->tables;
  u32 *from = vlib_frame_vector_args (frame);
  const u32 *table_index_by_sw_if_index =
    am->classify_table_index_by_sw_if_index[is_output][tid];
  vnet_config_main_t *cm = am->vnet_config_main[is_output][tid];
  const vlib_node_runtime_t *error_node =
    vlib_node_get_runtime (vm, parent_error_node->index);
  const vlib_error_t error_none = error_node->errors[error_none_index];
  const vlib_error_t error_deny = error_node->errors[error_deny_index];
  const vlib_error_t error_miss = error_node->errors[error_miss_index];
  vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
  u16 nexts[VLIB_FRAME_SIZE];
  u32 hits, misses, chain_hits;

  vlib_get_buffers (vm, from, bufs, frame->n_vectors);

#define ip_in_out_acl_inline_trace__(do_trace)                                \
  ip_in_out_acl_inline_trace (                                                \
    vm, node, frame, bufs, nexts, frame->n_vectors, &hits, &misses,           \
    &chain_hits, error_deny, error_miss, error_none, tables,                  \
    table_index_by_sw_if_index, fib_index_by_sw_if_index, cm, way, is_output, \
    do_trace)

  if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
    ip_in_out_acl_inline_trace__ (1 /* do_trace */);
  else
    ip_in_out_acl_inline_trace__ (0 /* do_trace */);

  vlib_node_increment_counter (
    vm, node->node_index,
    is_output ? IP_OUTACL_ERROR_MISS : IP_INACL_ERROR_MISS, misses);
  vlib_node_increment_counter (
    vm, node->node_index, is_output ? IP_OUTACL_ERROR_HIT : IP_INACL_ERROR_HIT,
    hits);
  vlib_node_increment_counter (vm, node->node_index,
			       is_output ? IP_OUTACL_ERROR_CHAIN_HIT :
					   IP_INACL_ERROR_CHAIN_HIT,
			       chain_hits);

  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
  return frame->n_vectors;
}

VLIB_NODE_FN (ip4_inacl_node)
(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
{
  return ip_in_out_acl_inline (
    vm, node, frame, IN_OUT_ACL_TABLE_IP4, ip4_main.fib_index_by_sw_if_index,
    &ip4_input_node, IP4_ERROR_NONE, IP4_ERROR_INACL_SESSION_DENY,
    IP4_ERROR_INACL_TABLE_MISS, VLIB_RX, 0 /* is_output */);
}

VLIB_NODE_FN (ip4_punt_acl_node)
(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
{
  return ip_in_out_acl_inline (
    vm, node, frame, IN_OUT_ACL_TABLE_IP4_PUNT,
    ip4_main.fib_index_by_sw_if_index, &ip4_input_node, IP4_ERROR_NONE,
    IP4_ERROR_INACL_SESSION_DENY, IP4_ERROR_INACL_TABLE_MISS, ~0 /* way */,
    0 /* is_output */);
}

VLIB_NODE_FN (ip4_outacl_node)
(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
{
  return ip_in_out_acl_inline (
    vm, node, frame, IN_OUT_ACL_TABLE_IP4, NULL, &ip4_input_node,
    IP4_ERROR_NONE, IP4_ERROR_INACL_SESSION_DENY, IP4_ERROR_INACL_TABLE_MISS,
    VLIB_TX, 1 /* is_output */);
}

/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_inacl_node) = {
  .name = "ip4-inacl",
  .vector_size = sizeof (u32),
  .format_trace = format_ip_inacl_trace,
  .n_errors = ARRAY_LEN(ip_inacl_error_strings),
  .error_strings = ip_inacl_error_strings,

  .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
  .next_nodes = {
    [ACL_NEXT_INDEX_DENY] = "ip4-drop",
  },
};

VLIB_REGISTER_NODE (ip4_punt_acl_node) = {
  .name = "ip4-punt-acl",
  .vector_size = sizeof (u32),
  .format_trace = format_ip_inacl_trace,
  .n_errors = ARRAY_LEN(ip_inacl_error_strings),
  .error_strings = ip_inacl_error_strings,

  .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
  .next_nodes = {
    [ACL_NEXT_INDEX_DENY] = "ip4-drop",
  },
};

VLIB_REGISTER_NODE (ip4_outacl_node) = {
  .name = "ip4-outacl",
  .vector_size = sizeof (u32),
  .format_trace = format_ip_outacl_trace,
  .n_errors = ARRAY_LEN(ip_outacl_error_strings),
  .error_strings = ip_outacl_error_strings,

  .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
  .next_nodes = {
    [ACL_NEXT_INDEX_DENY] = "ip4-drop",
  },
};
/* *INDENT-ON* */

VNET_FEATURE_INIT (ip4_punt_acl_feature) = {
  .arc_name = "ip4-punt",
  .node_name = "ip4-punt-acl",
  .runs_after = VNET_FEATURES ("ip4-punt-policer"),
};

VLIB_NODE_FN (ip6_inacl_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
			       vlib_frame_t * frame)
{
  return ip_in_out_acl_inline (
    vm, node, frame, IN_OUT_ACL_TABLE_IP6, ip6_main.fib_index_by_sw_if_index,
    &ip6_input_node, IP6_ERROR_NONE, IP6_ERROR_INACL_SESSION_DENY,
    IP6_ERROR_INACL_TABLE_MISS, VLIB_RX, 0 /* is_output */);
}

VLIB_NODE_FN (ip6_punt_acl_node)
(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
{
  return ip_in_out_acl_inline (
    vm, node, frame, IN_OUT_ACL_TABLE_IP6_PUNT,
    ip4_main.fib_index_by_sw_if_index, &ip6_input_node, IP6_ERROR_NONE,
    IP6_ERROR_INACL_SESSION_DENY, IP6_ERROR_INACL_TABLE_MISS, ~0 /* way */,
    0 /* is_output */);
}

VLIB_NODE_FN (ip6_outacl_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
				vlib_frame_t * frame)
{
  return ip_in_out_acl_inline (
    vm, node, frame, IN_OUT_ACL_TABLE_IP6, NULL, &ip6_input_node,
    IP6_ERROR_NONE, IP6_ERROR_INACL_SESSION_DENY, IP6_ERROR_INACL_TABLE_MISS,
    VLIB_TX, 1 /* is_output */);
}

/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip6_inacl_node) = {
  .name = "ip6-inacl",
  .vector_size = sizeof (u32),
  .format_trace = format_ip_inacl_trace,
  .n_errors = ARRAY_LEN(ip_inacl_error_strings),
  .error_strings = ip_inacl_error_strings,

  .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
  .next_nodes = {
    [ACL_NEXT_INDEX_DENY] = "ip6-drop",
  },
};

VLIB_REGISTER_NODE (ip6_punt_acl_node) = {
  .name = "ip6-punt-acl",
  .vector_size = sizeof (u32),
  .format_trace = format_ip_inacl_trace,
  .n_errors = ARRAY_LEN(ip_inacl_error_strings),
  .error_strings = ip_inacl_error_strings,

  .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
  .next_nodes = {
    [ACL_NEXT_INDEX_DENY] = "ip6-drop",
  },
};

VLIB_REGISTER_NODE (ip6_outacl_node) = {
  .name = "ip6-outacl",
  .vector_size = sizeof (u32),
  .format_trace = format_ip_outacl_trace,
  .n_errors = ARRAY_LEN(ip_outacl_error_strings),
  .error_strings = ip_outacl_error_strings,

  .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
  .next_nodes = {
    [ACL_NEXT_INDEX_DENY] = "ip6-drop",
  },
};
/* *INDENT-ON* */

VNET_FEATURE_INIT (ip6_punt_acl_feature) = {
  .arc_name = "ip6-punt",
  .node_name = "ip6-punt-acl",
  .runs_after = VNET_FEATURES ("ip6-punt-policer"),
};

#ifndef CLIB_MARCH_VARIANT
static clib_error_t *
ip_in_out_acl_init (vlib_main_t * vm)
{
  return 0;
}

VLIB_INIT_FUNCTION (ip_in_out_acl_init);
#endif /* CLIB_MARCH_VARIANT */


/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */