/* * map.c : MAP support * * Copyright (c) 2015 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include "map.h" map_main_t map_main; /* * This code supports the following MAP modes: * * Algorithmic Shared IPv4 address (ea_bits_len > 0): * ea_bits_len + ip4_prefix > 32 * psid_length > 0, ip6_prefix < 64, ip4_prefix <= 32 * Algorithmic Full IPv4 address (ea_bits_len > 0): * ea_bits_len + ip4_prefix = 32 * psid_length = 0, ip6_prefix < 64, ip4_prefix <= 32 * Algorithmic IPv4 prefix (ea_bits_len > 0): * ea_bits_len + ip4_prefix < 32 * psid_length = 0, ip6_prefix < 64, ip4_prefix <= 32 * * Independent Shared IPv4 address (ea_bits_len = 0): * ip4_prefix = 32 * psid_length > 0 * Rule IPv6 address = 128, Rule PSID Set * Independent Full IPv4 address (ea_bits_len = 0): * ip4_prefix = 32 * psid_length = 0, ip6_prefix = 128 * Independent IPv4 prefix (ea_bits_len = 0): * ip4_prefix < 32 * psid_length = 0, ip6_prefix = 128 * */ /* * This code supports MAP-T: * * With DMR prefix length equal to 96. * */ int map_create_domain (ip4_address_t * ip4_prefix, u8 ip4_prefix_len, ip6_address_t * ip6_prefix, u8 ip6_prefix_len, ip6_address_t * ip6_src, u8 ip6_src_len, u8 ea_bits_len, u8 psid_offset, u8 psid_length, u32 * map_domain_index, u16 mtu, u8 flags) { u8 suffix_len, suffix_shift; map_main_t *mm = &map_main; dpo_id_t dpo_v4 = DPO_INVALID; dpo_id_t dpo_v6 = DPO_INVALID; map_domain_t *d; /* Sanity check on the src prefix length */ if (flags & MAP_DOMAIN_TRANSLATION) { if (ip6_src_len != 96) { clib_warning ("MAP-T only supports ip6_src_len = 96 for now."); return -1; } if ((flags & MAP_DOMAIN_RFC6052) && ip6_prefix_len != 96) { clib_warning ("RFC6052 translation only supports ip6_prefix_len = " "96 for now"); return -1; } } else { if (ip6_src_len != 128) { clib_warning ("MAP-E requires a BR address, not a prefix (ip6_src_len should " "be 128)."); return -1; } } /* How many, and which bits to grab from the IPv4 DA */ if (ip4_prefix_len + ea_bits_len < 32) { if (!(flags & MAP_DOMAIN_TRANSLATION)) flags |= MAP_DOMAIN_PREFIX; suffix_shift = 32 - ip4_prefix_len - ea_bits_len; suffix_len = ea_bits_len; } else { suffix_shift = 0; suffix_len = 32 - ip4_prefix_len; } /* EA bits must be within the first 64 bits */ if (ea_bits_len > 0 && ((ip6_prefix_len + ea_bits_len) > 64 || ip6_prefix_len + suffix_len + psid_length > 64)) { clib_warning ("Embedded Address bits must be within the first 64 bits of " "the IPv6 prefix"); return -1; } if (mm->is_ce && !(flags & MAP_DOMAIN_TRANSLATION)) { clib_warning ("MAP-E CE is not supported yet"); return -1; } /* Get domain index */ pool_get_aligned (mm->domains, d, CLIB_CACHE_LINE_BYTES); memset (d, 0, sizeof (*d)); *map_domain_index = d - mm->domains; /* Init domain struct */ d->ip4_prefix.as_u32 = ip4_prefix->as_u32; d->ip4_prefix_len = ip4_prefix_len; d->ip6_prefix = *ip6_prefix; d->ip6_prefix_len = ip6_prefix_len; d->ip6_src = *ip6_src; d->ip6_src_len = ip6_src_len; d->ea_bits_len = ea_bits_len; d->psid_offset = psid_offset; d->psid_length = psid_length; d->mtu = mtu; d->flags = flags; d->suffix_shift = suffix_shift; d->suffix_mask = (1 << suffix_len) - 1; d->psid_shift = 16 - psid_length - psid_offset; d->psid_mask = (1 << d->psid_length) - 1; d->ea_shift = 64 - ip6_prefix_len - suffix_len - d->psid_length; /* MAP data-plane object */ if (d->flags & MAP_DOMAIN_TRANSLATION) map_t_dpo_create (DPO_PROTO_IP4, *map_domain_index, &dpo_v4); else map_dpo_create (DPO_PROTO_IP4, *map_domain_index, &dpo_v4); /* Create ip4 route */ u8 ip4_pfx_len; ip4_address_t ip4_pfx; if (mm->is_ce) { ip4_pfx_len = 0; ip4_pfx.as_u32 = 0; } else { ip4_pfx_len = d->ip4_prefix_len; ip4_pfx = d->ip4_prefix; } fib_prefix_t pfx = { .fp_proto = FIB_PROTOCOL_IP4, .fp_len = ip4_pfx_len, .fp_addr = { .ip4 = ip4_pfx, } , }; fib_table_entry_special_dpo_add (0, &pfx, FIB_SOURCE_MAP, FIB_ENTRY_FLAG_EXCLUSIVE, &dpo_v4); dpo_reset (&dpo_v4); /* * construct a DPO to use the v6 domain */ if (d->flags & MAP_DOMAIN_TRANSLATION) map_t_dpo_create (DPO_PROTO_IP6, *map_domain_index, &dpo_v6); else map_dpo_create (DPO_PROTO_IP6, *map_domain_index, &dpo_v6); /* * Multiple MAP domains may share same source IPv6 TEP. Which is just dandy. * We are not tracking the sharing. So a v4 lookup to find the correct * domain post decap/trnaslate is always done * * Create ip6 route. This is a reference counted add. If the prefix * already exists and is MAP sourced, it is now MAP source n+1 times * and will need to be removed n+1 times. */ u8 ip6_pfx_len; ip6_address_t ip6_pfx; if (mm->is_ce) { ip6_pfx_len = d->ip6_prefix_len; ip6_pfx = d->ip6_prefix; } else { ip6_pfx_len = d->ip6_src_len; ip6_pfx = d->ip6_src; } fib_prefix_t pfx6 = { .fp_proto = FIB_PROTOCOL_IP6, .fp_len = ip6_pfx_len, .fp_addr.ip6 = ip6_pfx, }; fib_table_entry_special_dpo_add (0, &pfx6, FIB_SOURCE_MAP, FIB_ENTRY_FLAG_EXCLUSIVE, &dpo_v6); dpo_reset (&dpo_v6); /* Validate packet/byte counters */ map_domain_counter_lock (mm); int i; for (i = 0; i < vec_len (mm->simple_domain_counters); i++) { vlib_validate_simple_counter (&mm->simple_domain_counters[i], *map_domain_index); vlib_zero_simple_counter (&mm->simple_domain_counters[i], *map_domain_index); } for (i = 0; i < vec_len (mm->domain_counters); i++) { vlib_validate_combined_counter (&mm->domain_counters[i], *map_domain_index); vlib_zero_combined_counter (&mm->domain_counters[i], *map_domain_index); } map_domain_counter_unlock (mm); return 0; } /* * map_delete_domain */ int map_delete_domain (u32 map_domain_index) { map_main_t *mm = &map_main; map_domain_t *d; if (pool_is_free_index (mm->domains, map_domain_index)) { clib_warning ("MAP domain delete: domain does not exist: %d", map_domain_index); return -1; } d = pool_elt_at_index (mm->domains, map_domain_index); fib_prefix_t pfx = { .fp_proto = FIB_PROTOCOL_IP4, .fp_len = d->ip4_prefix_len, .fp_addr = { .ip4 = d->ip4_prefix, } , }; fib_table_entry_special_remove (0, &pfx, FIB_SOURCE_MAP); fib_prefix_t pfx6 = { .fp_proto = FIB_PROTOCOL_IP6, .fp_len = d->ip6_src_len, .fp_addr = { .ip6 = d->ip6_src, } , }; fib_table_entry_special_remove (0, &pfx6, FIB_SOURCE_MAP); /* Deleting rules */ if (d->rules) clib_mem_free (d->rules); pool_put (mm->domains, d); return 0; } int map_add_del_psid (u32 map_domain_index, u16 psid, ip6_address_t * tep, u8 is_add) { map_domain_t *d; map_main_t *mm = &map_main; if (pool_is_free_index (mm->domains, map_domain_index)) { clib_warning ("MAP rule: domain does not exist: %d", map_domain_index); return -1; } d = pool_elt_at_index (mm->domains, map_domain_index); /* Rules are only used in 1:1 independent case */ if (d->ea_bits_len > 0) return (-1); if (!d->rules) { u32 l = (0x1 << d->psid_length) * sizeof (ip6_address_t); d->rules = clib_mem_alloc_aligned (l, CLIB_CACHE_LINE_BYTES); if (!d->rules) return -1; memset (d->rules, 0, l); } if (psid >= (0x1 << d->psid_length)) { clib_warning ("MAP rule: PSID outside bounds: %d [%d]", psid, 0x1 << d->psid_length); return -1; } if (is_add) { d->rules[psid] = *tep; } else { memset (&d->rules[psid], 0, sizeof (ip6_address_t)); } return 0; } #ifdef MAP_SKIP_IP6_LOOKUP /** * Pre-resolvd per-protocol global next-hops */ map_main_pre_resolved_t pre_resolved[FIB_PROTOCOL_MAX]; static void map_pre_resolve_init (map_main_pre_resolved_t * pr) { pr->fei = FIB_NODE_INDEX_INVALID; fib_node_init (&pr->node, FIB_NODE_TYPE_MAP_E); } static u8 * format_map_pre_resolve (u8 * s, va_list * ap) { map_main_pre_resolved_t *pr = va_arg (*ap, map_main_pre_resolved_t *); if (FIB_NODE_INDEX_INVALID != pr->fei) { fib_prefix_t pfx; fib_entry_get_prefix (pr->fei, &pfx); return (format (s, "%U (%u)", format_ip46_address, &pfx.fp_addr, IP46_TYPE_ANY, pr->dpo.dpoi_index)); } else { return (format (s, "un-set")); } } /** * Function definition to inform the FIB node that its last lock has gone. */ static void map_last_lock_gone (fib_node_t * node) { /* * The MAP is a root of the graph. As such * it never has children and thus is never locked. */ ASSERT (0); } static map_main_pre_resolved_t * map_from_fib_node (fib_node_t * node) { ASSERT (FIB_NODE_TYPE_MAP_E == node->fn_type); return ((map_main_pre_resolved_t *) (((char *) node) - STRUCT_OFFSET_OF (map_main_pre_resolved_t, node))); } static void map_stack (map_main_pre_resolved_t * pr) { const dpo_id_t *dpo; dpo = fib_entry_contribute_ip_forwarding (pr->fei); dpo_copy (&pr->dpo, dpo); } /** * Function definition to backwalk a FIB node */ static fib_node_back_walk_rc_t map_back_walk (fib_node_t * node, fib_node_back_walk_ctx_t * ctx) { map_stack (map_from_fib_node (node)); return (FIB_NODE_BACK_WALK_CONTINUE); } /** * Function definition to get a FIB node from its index */ static fib_node_t * map_fib_node_get (fib_node_index_t index) { return (&pre_resolved[index].node); } /* * Virtual function table registered by MPLS GRE tunnels * for participation in the FIB object graph. */ const static fib_node_vft_t map_vft = { .fnv_get = map_fib_node_get, .fnv_last_lock = map_last_lock_gone, .fnv_back_walk = map_back_walk, }; static void map_fib_resolve (map_main_pre_resolved_t * pr, fib_protocol_t proto, u8 len, const ip46_address_t * addr) { fib_prefix_t pfx = { .fp_proto = proto, .fp_len = len, .fp_addr = *addr, }; pr->fei = fib_table_entry_special_add (0, // default fib &pfx, FIB_SOURCE_RR, FIB_ENTRY_FLAG_NONE); pr->sibling = fib_entry_child_add (pr->fei, FIB_NODE_TYPE_MAP_E, proto); map_stack (pr); } static void map_fib_unresolve (map_main_pre_resolved_t * pr, fib_protocol_t proto, u8 len, const ip46_address_t * addr) { fib_prefix_t pfx = { .fp_proto = proto, .fp_len = len, .fp_addr = *addr, }; fib_entry_child_remove (pr->fei, pr->sibling); fib_table_entry_special_remove (0, // default fib &pfx, FIB_SOURCE_RR); dpo_reset (&pr->dpo); pr->fei = FIB_NODE_INDEX_INVALID; pr->sibling = FIB_NODE_INDEX_INVALID; } static void map_pre_resolve (ip4_address_t * ip4, ip6_address_t * ip6, int is_del) { if (ip6 && (ip6->as_u64[0] != 0 || ip6->as_u64[1] != 0)) { ip46_address_t addr = { .ip6 = *ip6, }; if (is_del) map_fib_unresolve (&pre_resolved[FIB_PROTOCOL_IP6], FIB_PROTOCOL_IP6, 128, &addr); else map_fib_resolve (&pre_resolved[FIB_PROTOCOL_IP6], FIB_PROTOCOL_IP6, 128, &addr); } if (ip4 && (ip4->as_u32 != 0)) { ip46_address_t addr = { .ip4 = *ip4, }; if (is_del) map_fib_unresolve (&pre_resolved[FIB_PROTOCOL_IP4], FIB_PROTOCOL_IP4, 32, &addr); else map_fib_resolve (&pre_resolved[FIB_PROTOCOL_IP4], FIB_PROTOCOL_IP4, 32, &addr); } } #endif static clib_error_t * map_security_check_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { unformat_input_t _line_input, *line_input = &_line_input; map_main_t *mm = &map_main; clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) return 0; while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { if (unformat (line_input, "off")) mm->sec_check = false; else if (unformat (line_input, "on")) mm->sec_check = true; else { error = clib_error_return (0, "unknown input `%U'", format_unformat_error, line_input); goto done; } } done: unformat_free (line_input); return error; } static clib_error_t * map_security_check_frag_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { unformat_input_t _line_input, *line_input = &_line_input; map_main_t *mm = &map_main; clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) return 0; while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { if (unformat (line_input, "off")) mm->sec_check_frag = false; else if (unformat (line_input, "on")) mm->sec_check_frag = true; else { error = clib_error_return (0, "unknown input `%U'", format_unformat_error, line_input); goto done; } } done: unformat_free (line_input); return error; } static clib_error_t * map_add_domain_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { unformat_input_t _line_input, *line_input = &_line_input; ip4_address_t ip4_prefix; ip6_address_t ip6_prefix; ip6_address_t ip6_src; u32 ip6_prefix_len = 0, ip4_prefix_len = 0, map_domain_index, ip6_src_len; u32 num_m_args = 0; /* Optional arguments */ u32 ea_bits_len = 0, psid_offset = 0, psid_length = 0; u32 mtu = 0; u8 flags = 0; ip6_src_len = 128; clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) return 0; while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { if (unformat (line_input, "ip4-pfx %U/%d", unformat_ip4_address, &ip4_prefix, &ip4_prefix_len)) num_m_args++; else if (unformat (line_input, "ip6-pfx %U/%d", unformat_ip6_address, &ip6_prefix, &ip6_prefix_len)) num_m_args++; else if (unformat (line_inp
/*
 * Copyright (c) 2016,2020 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <plugins/adl/adl.h>

typedef struct {
  u32 next_index;
  u32 sw_if_index;
} adl_input_trace_t;

/* packet trace format function */
static u8 * format_adl_input_trace (u8 * s, va_list * args)
{
  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
  adl_input_trace_t * t = va_arg (*args, adl_input_trace_t *);

  s = format (s, "ADL_INPUT: sw_if_index %d, next index %d",
              t->sw_if_index, t->next_index);
  return s;
}

#define foreach_adl_input_error \
_(PROCESSED, "Allow/Deny packets processed")

typedef enum {
#define _(sym,str) ADL_INPUT_ERROR_##sym,
  foreach_adl_input_error
#undef _
  ADL_INPUT_N_ERROR,
} adl_input_error_t;

static char * adl_input_error_strings[] = {
#define _(sym,string) string,
  foreach_adl_input_error
#undef _
};

VLIB_NODE_FN (adl_input_node) (vlib_main_t * vm,
		  vlib_node_runtime_t * node,
		  vlib_frame_t * frame)
{
  u32 n_left_from, * from, * to_next;
  adl_feature_type_t next_index;
  adl_main_t *am = &adl_main;

  from = vlib_frame_vector_args (frame);
  n_left_from = frame->n_vectors;
  next_index = node->cached_next_index;

  while (n_left_from > 0)
    {
      u32 n_left_to_next;

      vlib_get_next_frame (vm, node, next_index,
			   to_next, n_left_to_next);

      while (n_left_from >= 4 && n_left_to_next >= 2)
	{
          u32 bi0, bi1;
	  vlib_buffer_t * b0, * b1;
          u32 next0, next1;
          u32 sw_if_index0, sw_if_index1;
          ethernet_header_t * en0, * en1;
          adl_config_main_t * ccm0, * ccm1;
          u32 advance0, advance1;
          int proto0, proto1;

	  /* Prefetch next iteration. */
	  {
	    vlib_buffer_t * p2, * p3;

	    p2 = vlib_get_buffer (vm, from[2]);
	    p3 = vlib_get_buffer (vm, from[3]);

	    vlib_prefetch_buffer_header (p2, LOAD);
	    vlib_prefetch_buffer_header (p3, LOAD);

	    clib_prefetch_store (p2->data);
	    clib_prefetch_store (p3->data);
	  }

          /* speculatively enqueue b0 and b1 to the current next frame */
	  to_next[0] = bi0 = from[0];
	  to_next[1] = bi1 = from[1];
	  from += 2;
	  to_next += 2;
	  n_left_from -= 2;
	  n_left_to_next -= 2;

	  b0 = vlib_get_buffer (vm, bi0);
	  b1 = vlib_get_buffer (vm, bi1);

          en0 = vlib_buffer_get_current (b0);
          en1 = vlib_buffer_get_current (b1);

          sw_if_index0 = adl_buffer(b0)->sw_if_index[VLIB_RX];
          sw_if_index1 = adl_buffer(b1)->sw_if_index[VLIB_RX];

          proto0 = VNET_ADL_DEFAULT;
          proto1 = VNET_ADL_DEFAULT;
          advance0 = 0;
          advance1 = 0;

          if (en0->type == clib_host_to_net_u16(ETHERNET_TYPE_IP4))
            {
              proto0 = VNET_ADL_IP4;
              advance0 = sizeof(ethernet_header_t);
            }
          else if (en0->type == clib_host_to_net_u16(ETHERNET_TYPE_IP6))
            {
              proto0 = VNET_ADL_IP6;
              advance0 = sizeof(ethernet_header_t);
            }

          if (en1->type == clib_host_to_net_u16(ETHERNET_TYPE_IP4))
            {
              proto1 = VNET_ADL_IP4;
              advance1 = sizeof(ethernet_header_t);
            }
          else if (en1->type == clib_host_to_net_u16(ETHERNET_TYPE_IP6))
            {
              proto1 = VNET_ADL_IP6;
              advance1 = sizeof(ethernet_header_t);
            }

	  ccm0 = am->adl_config_mains + proto0;
	  ccm1 = am->adl_config_mains + proto1;
          adl_buffer(b0)->adl.current_config_index =
            ccm0->config_index_by_sw_if_index [sw_if_index0];

          adl_buffer(b1)->adl.current_config_index =
            ccm1->config_index_by_sw_if_index [sw_if_index1];

          vlib_buffer_advance (b0, advance0);
          vlib_buffer_advance (b1, advance1);

          vnet_get_config_data (&ccm0->config_main,
                                &adl_buffer(b0)->adl.current_config_index,
                                &next0, 0 /* bytes of config data */);

          vnet_get_config_data (&ccm1->config_main,
                                &adl_buffer(b1)->adl.current_config_index,
                                &next1, 0 /* bytes of config data */);

          if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
                            && (b0->flags & VLIB_BUFFER_IS_TRACED)))
            {
              adl_input_trace_t *t =
                 vlib_add_trace (vm, node, b0, sizeof (*t));
              t->sw_if_index = sw_if_index0;
              t->next_index = next0;
            }

          if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
                            && (b1->flags & VLIB_BUFFER_IS_TRACED)))
            {
              adl_input_trace_t *t =
                 vlib_add_trace (vm, node, b1, sizeof (*t));
              t->sw_if_index = sw_if_index1;
              t->next_index = next1;
            }
          /* verify speculative enqueues, maybe switch current next frame */
          vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
                                           to_next, n_left_to_next,
                                           bi0, bi1, next0, next1);
        }

      while (n_left_from > 0 && n_left_to_next > 0)
	{
          u32 bi0;
	  vlib_buffer_t * b0;
          u32 next0;
          u32 sw_if_index0;
          ethernet_header_t *en0;
          adl_config_main_t *ccm0;
          u32 advance0;
          int proto0;

          /* speculatively enqueue b0 to the current next frame */
	  bi0 = from[0];
	  to_next[0] = bi0;
	  from += 1;
	  to_next += 1;
	  n_left_from -= 1;
	  n_left_to_next -= 1;

	  b0 = vlib_get_buffer (vm, bi0);

          /*
           * Direct from the driver, we should be at offset 0
           * aka at &b0->data[0]
           */
          ASSERT (b0->current_data == 0);

          en0 = vlib_buffer_get_current (b0);

          sw_if_index0 = adl_buffer(b0)->sw_if_index[VLIB_RX];

          proto0 = VNET_ADL_DEFAULT;
          advance0 = 0;

          if (en0->type == clib_host_to_net_u16(ETHERNET_TYPE_IP4))
            {
              proto0 = VNET_ADL_IP4;
              advance0 = sizeof(ethernet_header_t);
            }
          else if (en0->type == clib_host_to_net_u16(ETHERNET_TYPE_IP6))
            {
              proto0 = VNET_ADL_IP6;
              advance0 = sizeof(ethernet_header_t);
            }

	  ccm0 = am->adl_config_mains + proto0;
          adl_buffer(b0)->adl.current_config_index =
            ccm0->config_index_by_sw_if_index [sw_if_index0];

          vlib_buffer_advance (b0, advance0);

          vnet_get_config_data (&ccm0->config_main,
                                &adl_buffer(b0)->adl.current_config_index,
                                &next0, 0 /* bytes of config data */);

          if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
                            && (b0->flags & VLIB_BUFFER_IS_TRACED)))
            {
              adl_input_trace_t *t =
                 vlib_add_trace (vm, node, b0, sizeof (*t));
              t->sw_if_index = sw_if_index0;
              t->next_index = next0;
            }

          /* verify speculative enqueue, maybe switch current next frame */
	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
					   to_next, n_left_to_next,
					   bi0, next0);
	}

      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
    }
  vlib_node_increment_counter (vm, adl_input_node.index,
                               ADL_INPUT_ERROR_PROCESSED, frame->n_vectors);
  return frame->n_vectors;
}

VLIB_REGISTER_NODE (adl_input_node) = {
  .name = "adl-input",
  .vector_size = sizeof (u32),
  .format_trace = format_adl_input_trace,
  .unformat_buffer = unformat_ethernet_header,
  .type = VLIB_NODE_TYPE_INTERNAL,

  .n_errors = ARRAY_LEN(adl_input_error_strings),
  .error_strings = adl_input_error_strings,

  .n_next_nodes = ADL_RX_N_FEATURES,

  /* edit / add dispositions here */
  .next_nodes = {
    [IP4_RX_ADL_ALLOWLIST] = "ip4-adl-allowlist",
    [IP6_RX_ADL_ALLOWLIST] = "ip6-adl-allowlist",
    [DEFAULT_RX_ADL_ALLOWLIST] = "default-adl-allowlist",
    [IP4_RX_ADL_INPUT] = "ip4-input",
    [IP6_RX_ADL_INPUT] = "ip6-input",
    [DEFAULT_RX_ADL_INPUT] = "ethernet-input",
    [RX_ADL_DROP] = "error-drop",
  },
};

#define foreach_adl_stub                        \
_(default-adl-allowlist, default_adl_allowlist)

#define _(n,f)                                  \
                                                \
static uword                                    \
f##_node_fn (vlib_main_t * vm,                  \
             vlib_node_runtime_t * node,        \
             vlib_frame_t * frame)              \
{                                               \
  clib_warning ("BUG: stub function called");   \
  return 0;                                     \
}                                               \
                                                \
VLIB_REGISTER_NODE (f##_input_node) = {         \
  .function = f##_node_fn,                      \
  .name = #n,                                   \
  .vector_size = sizeof (u32),                  \
  .type = VLIB_NODE_TYPE_INTERNAL,              \
                                                \
  .n_errors = 0,                                \
  .error_strings = 0,                           \
                                                \
  .n_next_nodes = 0,                            \
};

foreach_adl_stub;