/* *------------------------------------------------------------------ * ip_api.c - vnet ip api * * Copyright (c) 2016 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *------------------------------------------------------------------ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define vl_typedefs /* define message structures */ #include #undef vl_typedefs #define vl_endianfun /* define message structures */ #include #undef vl_endianfun /* instantiate all the print functions we know about */ #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) #define vl_printfun #include #undef vl_printfun #include #include #define foreach_ip_api_msg \ _ (SW_INTERFACE_IP6_ENABLE_DISABLE, sw_interface_ip6_enable_disable) \ _ (IP_TABLE_DUMP, ip_table_dump) \ _ (IP_ROUTE_DUMP, ip_route_dump) \ _ (IP_MTABLE_DUMP, ip_mtable_dump) \ _ (IP_MROUTE_DUMP, ip_mroute_dump) \ _ (IP_MROUTE_ADD_DEL, ip_mroute_add_del) \ _ (MFIB_SIGNAL_DUMP, mfib_signal_dump) \ _ (IP_ADDRESS_DUMP, ip_address_dump) \ _ (IP_UNNUMBERED_DUMP, ip_unnumbered_dump) \ _ (IP_DUMP, ip_dump) \ _ (IP_TABLE_REPLACE_BEGIN, ip_table_replace_begin) \ _ (IP_TABLE_REPLACE_END, ip_table_replace_end) \ _ (IP_TABLE_FLUSH, ip_table_flush) \ _ (IP_ROUTE_ADD_DEL, ip_route_add_del) \ _ (IP_ROUTE_LOOKUP, ip_route_lookup) \ _ (IP_TABLE_ADD_DEL, ip_table_add_del) \ _ (IP_PUNT_POLICE, ip_punt_police) \ _ (IP_PUNT_REDIRECT, ip_punt_redirect) \ _ (SET_IP_FLOW_HASH, set_ip_flow_hash) \ _ (IP_CONTAINER_PROXY_ADD_DEL, ip_container_proxy_add_del) \ _ (IP_CONTAINER_PROXY_DUMP, ip_container_proxy_dump) \ _ (IOAM_ENABLE, ioam_enable) \ _ (IOAM_DISABLE, ioam_disable) \ _ (IP_SOURCE_AND_PORT_RANGE_CHECK_ADD_DEL, \ ip_source_and_port_range_check_add_del) \ _ (IP_SOURCE_AND_PORT_RANGE_CHECK_INTERFACE_ADD_DEL, \ ip_source_and_port_range_check_interface_add_del) \ _ (SW_INTERFACE_IP6_SET_LINK_LOCAL_ADDRESS, \ sw_interface_ip6_set_link_local_address) \ _ (SW_INTERFACE_IP6_GET_LINK_LOCAL_ADDRESS, \ sw_interface_ip6_get_link_local_address) \ _ (IP_REASSEMBLY_SET, ip_reassembly_set) \ _ (IP_REASSEMBLY_GET, ip_reassembly_get) \ _ (IP_REASSEMBLY_ENABLE_DISABLE, ip_reassembly_enable_disable) \ _ (IP_PUNT_REDIRECT_DUMP, ip_punt_redirect_dump) static void vl_api_sw_interface_ip6_enable_disable_t_handler (vl_api_sw_interface_ip6_enable_disable_t * mp) { vl_api_sw_interface_ip6_enable_disable_reply_t *rmp; int rv = 0; VALIDATE_SW_IF_INDEX (mp); rv = ((mp->enable == 1) ? ip6_link_enable (ntohl (mp->sw_if_index), NULL) : ip6_link_disable (ntohl (mp->sw_if_index))); BAD_SW_IF_INDEX_LABEL; REPLY_MACRO (VL_API_SW_INTERFACE_IP6_ENABLE_DISABLE_REPLY); } static void send_ip_table_details (vpe_api_main_t * am, vl_api_registration_t * reg, u32 context, const fib_table_t * table) { vl_api_ip_table_details_t *mp; mp = vl_msg_api_alloc (sizeof (*mp)); if (!mp) return; clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_IP_TABLE_DETAILS); mp->context = context; mp->table.is_ip6 = (table->ft_proto == FIB_PROTOCOL_IP6); mp->table.table_id = htonl (table->ft_table_id); memcpy (mp->table.name, table->ft_desc, clib_min (vec_len (table->ft_desc), sizeof (mp->table.name))); vl_api_send_msg (reg, (u8 *) mp); } static void vl_api_ip_table_dump_t_handler (vl_api_ip_table_dump_t * mp) { vpe_api_main_t *am = &vpe_api_main; vl_api_registration_t *reg; fib_table_t *fib_table; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; /* *INDENT-OFF* */ pool_foreach (fib_table, ip4_main.fibs) { send_ip_table_details(am, reg, mp->context, fib_table); } pool_foreach (fib_table, ip6_main.fibs) { /* don't send link locals */ if (fib_table->ft_flags & FIB_TABLE_FLAG_IP6_LL) continue; send_ip_table_details(am, reg, mp->context, fib_table); } /* *INDENT-ON* */ } typedef struct vl_api_ip_fib_dump_walk_ctx_t_ { fib_node_index_t *feis; } vl_api_ip_fib_dump_walk_ctx_t; static fib_table_walk_rc_t vl_api_ip_fib_dump_walk (fib_node_index_t fei, void *arg) { vl_api_ip_fib_dump_walk_ctx_t *ctx = arg; vec_add1 (ctx->feis, fei); return (FIB_TABLE_WALK_CONTINUE); } static void send_ip_route_details (vpe_api_main_t * am, vl_api_registration_t * reg, u32 context, fib_node_index_t fib_entry_index) { fib_route_path_t *rpaths, *rpath; vl_api_ip_route_details_t *mp; const fib_prefix_t *pfx; vl_api_fib_path_t *fp; int path_count; rpaths = NULL; pfx = fib_entry_get_prefix (fib_entry_index); rpaths = fib_entry_encode (fib_entry_index); path_count = vec_len (rpaths); mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp)); if (!mp) return; clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_IP_ROUTE_DETAILS); mp->context = context; ip_prefix_encode (pfx, &mp->route.prefix); mp->route.table_id = htonl (fib_table_get_table_id (fib_entry_get_fib_index (fib_entry_index), pfx->fp_proto)); mp->route.n_paths = path_count; mp->route.stats_index = htonl (fib_table_entry_get_stats_index (fib_entry_get_fib_index (fib_entry_index), pfx)); fp = mp->route.paths; vec_foreach (rpath, rpaths) { fib_api_path_encode (rpath, fp); fp++; } vl_api_send_msg (reg, (u8 *) mp); vec_free (rpaths); } typedef struct apt_ip6_fib_show_ctx_t_ { fib_node_index_t *entries; } api_ip6_fib_show_ctx_t; static void vl_api_ip_route_dump_t_handler (vl_api_ip_route_dump_t * mp) { vpe_api_main_t *am = &vpe_api_main; fib_node_index_t *fib_entry_index; vl_api_registration_t *reg; fib_protocol_t fproto; u32 fib_index; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; vl_api_ip_fib_dump_walk_ctx_t ctx = { .feis = NULL, }; fproto = (mp->table.is_ip6 ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4); fib_index = fib_table_find (fproto, ntohl (mp->table.table_id)); if (INDEX_INVALID == fib_index) return; fib_table_walk (fib_index, fproto, vl_api_ip_fib_dump_walk, &ctx); vec_foreach (fib_entry_index, ctx.feis) { send_ip_route_details (am, reg, mp->context, *fib_entry_index); } vec_free (ctx.feis); } static void send_ip_mtable_details (vl_api_registration_t * reg, u32 context, const mfib_table_t * mfib_table) { vl_api_ip_mtable_details_t *mp; mp = vl_msg_api_alloc (sizeof (*mp)); if (!mp) return; memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_IP_MTABLE_DETAILS); mp->context = context; mp->table.table_id = htonl (mfib_table->mft_table_id); mp->table.is_ip6 = (FIB_PROTOCOL_IP6 == mfib_table->mft_proto); vl_api_send_msg (reg, (u8 *) mp); } static void vl_api_ip_mtable_dump_t_handler (vl_api_ip_mtable_dump_t * mp) { vl_api_registration_t *reg; mfib_table_t *mfib_table; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; /* *INDENT-OFF* */ pool_foreach (mfib_table, ip4_main.mfibs) { send_ip_mtable_details (reg, mp->context, mfib_table); } pool_foreach (mfib_table, ip6_main.mfibs) { send_ip_mtable_details (reg, mp->context, mfib_table); } /* *INDENT-ON* */ } typedef struct vl_api_ip_mfib_dump_ctx_t_ { fib_node_index_t *entries; } vl_api_ip_mfib_dump_ctx_t; static walk_rc_t mfib_route_dump_walk (fib_node_index_t fei, void *arg) { vl_api_ip_mfib_dump_ctx_t *ctx = arg; vec_add1 (ctx->entries, fei); return (WALK_CONTINUE); } static void send_ip_mroute_details (vpe_api_main_t * am, vl_api_registration_t * reg, u32 context, fib_node_index_t mfib_entry_index) { fib_route_path_t *rpaths, *rpath; vl_api_ip_mroute_details_t *mp; const mfib_prefix_t *pfx; vl_api_mfib_path_t *fp; u8 path_count; rpaths = NULL; pfx = mfib_entry_get_prefix (mfib_entry_index); rpaths = mfib_entry_encode (mfib_entry_index); path_count = vec_len (rpaths); mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp)); if (!mp) return; clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_IP_MROUTE_DETAILS); mp->context = context; ip_mprefix_encode (pfx, &mp->route.prefix); mp->route.table_id = htonl (mfib_table_get_table_id (mfib_entry_get_fib_index (mfib_entry_index), pfx->fp_proto)); mp->route.n_paths = path_count; fp = mp->route.paths; vec_foreach (rpath, rpaths) { mfib_api_path_encode (rpath, fp); fp++; } vl_api_send_msg (reg, (u8 *) mp); vec_free (rpaths); } static void vl_api_ip_mroute_dump_t_handler (vl_api_ip_mroute_dump_t * mp) { vpe_api_main_t *am = &vpe_api_main; vl_api_registration_t *reg; fib_node_index_t *mfeip; fib_protocol_t fproto; u32 fib_index; vl_api_ip_mfib_dump_ctx_t ctx = { .entries = NULL, }; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; fproto = fib_ip_proto (mp->table.is_ip6); fib_index = mfib_table_find (fproto, ntohl (mp->table.table_id)); if (INDEX_INVALID == fib_index) return; mfib_table_walk (fib_index, fproto, mfib_route_dump_walk, &ctx); vec_sort_with_function (ctx.entries, mfib_entry_cmp_for_sort); vec_foreach (mfeip, ctx.entries) { send_ip_mroute_details (am, reg, mp->context, *mfeip); } vec_free (ctx.entries); } static void vl_api_ip_punt_police_t_handler (vl_api_ip_punt_police_t * mp, vlib_main_t * vm) { vl_api_ip_punt_police_reply_t *rmp; int rv = 0; if (mp->is_ip6) ip6_punt_policer_add_del (mp->is_add, ntohl (mp->policer_index)); else ip4_punt_policer_add_del (mp->is_add, ntohl (mp->policer_index)); REPLY_MACRO (VL_API_IP_PUNT_POLICE_REPLY); } static void vl_api_ip_punt_redirect_t_handler (vl_api_ip_punt_redirect_t * mp, vlib_main_t * vm) { vl_api_ip_punt_redirect_reply_t *rmp; int rv = 0; ip46_type_t ipv; ip46_address_t nh; if (!vnet_sw_if_index_is_api_valid (ntohl (mp->punt.tx_sw_if_index))) goto bad_sw_if_index; ipv = ip_address_decode (&mp->punt.nh, &nh); if (mp->is_add) { if (ipv == IP46_TYPE_IP6) { ip6_punt_redirect_add (ntohl (mp->punt.rx_sw_if_index), ntohl (mp->punt.tx_sw_if_index), &nh); } else if (ipv == IP46_TYPE_IP4) { ip4_punt_redirect_add (ntohl (mp->punt.rx_sw_if_index), ntohl (mp->punt.tx_sw_if_index), &nh); } } else { if (ipv == IP46_TYPE_IP6) { ip6_punt_redirect_del (ntohl (mp->punt.rx_sw_if_index)); } else if (ipv == IP46_TYPE_IP4) { ip4_punt_redirect_del (ntohl (mp->punt.rx_sw_if_index)); } } BAD_SW_IF_INDEX_LABEL; REPLY_MACRO (VL_API_IP_PUNT_REDIRECT_REPLY); } static clib_error_t * call_elf_section_ip_table_callbacks (vnet_main_t * vnm, u32 table_id, u32 flags, _vnet_ip_table_function_list_elt_t ** elts) { _vnet_ip_table_function_list_elt_t *elt; vnet_ip_table_function_priority_t prio; clib_error_t *error = 0; for (prio = VNET_IP_TABLE_FUNC_PRIORITY_LOW; prio <= VNET_IP_TABLE_FUNC_PRIORITY_HIGH; prio++) { elt = elts[prio]; while (elt) { error = elt->fp (vnm, table_id, flags); if (error) return error; elt = elt->next_ip_table_function; } } return error; } void ip_table_delete (fib_protocol_t fproto, u32 table_id, u8 is_api) { u32 fib_index, mfib_index; vnet_main_t *vnm = vnet_get_main (); /* * ignore action on the default table - this is always present * and cannot be added nor deleted from the API */ if (0 != table_id) { /* * The API holds only one lock on the table. * i.e. it can be added many times via the API but needs to be * deleted only once. * The FIB index for unicast and multicast is not necessarily the * same, since internal VPP systesm (like LISP and SR) create * their own unicast tables. */ fib_index = fib_table_find (fproto, table_id); mfib_index = mfib_table_find (fproto, table_id); if ((~0 != fib_index) || (~0 != mfib_index)) call_elf_section_ip_table_callbacks (vnm, table_id, 0 /* is_add */ , vnm->ip_table_add_del_functions); if (~0 != fib_index) { fib_table_unlock (fib_index, fproto, (is_api ? FIB_SOURCE_API : FIB_SOURCE_CLI)); } if (~0 != mfib_index) { mfib_table_unlock (mfib_index, fproto, (is_api ? MFIB_SOURCE_API : MFIB_SOURCE_CLI)); } } } void vl_api_ip_table_add_del_t_handler (vl_api_ip_table_add_del_t * mp) { vl_api_ip_table_add_del_reply_t *rmp; fib_protocol_t fproto = (mp->table.is_ip6 ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4); u32 table_id = ntohl (mp->table.table_id); int rv = 0; if (mp->is_add) { ip_table_create (fproto, table_id, 1, mp->table.name); } else { ip_table_delete (fproto, table_id, 1); } REPLY_MACRO (VL_API_IP_TABLE_ADD_DEL_REPLY); } static int ip_route_add_del_t_handler (vl_api_ip_route_add_del_t * mp, u32 * stats_index) { fib_route_path_t *rpaths = NULL, *rpath; fib_entry_flag_t entry_flags; vl_api_fib_path_t *apath; fib_prefix_t pfx; u32 fib_index; int rv, ii; entry_flags = FIB_ENTRY_FLAG_NONE; ip_prefix_decode (&mp->route.prefix, &pfx); rv = fib_api_table_id_decode (pfx.fp_proto, ntohl (mp->route.table_id), &fib_index); if (0 != rv) goto out; if (0 != mp->route.n_paths) vec_validate (rpaths, mp->route.n_paths - 1); for (ii = 0; ii < mp->route.n_paths; ii++) { apath = &mp->route.paths[ii]; rpath = &rpaths[ii]; rv = fib_api_path_decode (apath, rpath); if ((rpath->frp_flags & FIB_ROUTE_PATH_LOCAL) && (~0 == rpath->frp_sw_if_index)) entry_flags |= (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL); if (
/*
 * Copyright (c) 2018 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vppinfra/valloc.h>

/** @file
    @brief Simple first-fit virtual space allocator
*/

/** Add a chunk of memory to a virtual allocation arena
    @param vam - clib_valloc_main_t * pointer to the allocation arena
    @param template - clib_valloc_chunk_t * pointer to a template chunk which
    describes the virtual address range to add

    @note only the baseva and size member of the template chunk are significant
    It's perfectly OK for the new chunk to be discontinuous with previous
    chunks, the chunk fusion algorithm won't merge them.
 */

void
clib_valloc_add_chunk (clib_valloc_main_t * vam,
		       clib_valloc_chunk_t * template)
{
  clib_valloc_chunk_t *ch, *new_ch;
  u32 index;

  ASSERT (vam->flags & CLIB_VALLOC_INITIALIZED);

  clib_spinlock_lock_if_init (&vam->lock);

  /* Add at the beginning, or at the end... */
  index = vam->first_index;

  /*
   * Make sure we're not trying to add an overlapping chunk..
   * It's worth checking, because someone will eventually do that.
   */
  if (CLIB_DEBUG > 0 && index != ~0)
    {
      while (index != ~0)
	{
	  ch = pool_elt_at_index (vam->chunks, index);
	  ASSERT (template->baseva < ch->baseva || template->baseva >=
		  (ch->baseva + ch->size));
	  ASSERT (template->baseva + template->size < ch->baseva ||
		  template->baseva + template->size >=
		  (ch->baseva + ch->size));
	  index = ch->next;
	}
      index = vam->first_index;
    }

  if (index != ~0)
    ch = pool_elt_at_index (vam->chunks, index);

  if (index == ~0 || template->baseva < ch->baseva)
    {
      pool_get (vam->chunks, new_ch);
      clib_memset (new_ch, 0, sizeof (*new_ch));

      if (index != ~0)
	{
	  ch = pool_elt_at_index (vam->chunks, index);

	  new_ch->next = index;
	  new_ch->prev = ~0;
	  ch->prev = new_ch - vam->chunks;
	}
      else
	{
	  new_ch->next = new_ch->prev = ~0;
	}

      new_ch->baseva = template->baseva;
      new_ch->size = template->size;

      vam->first_index = new_ch - vam->chunks;

      hash_set (vam->chunk_index_by_baseva, new_ch->baseva, vam->first_index);
    }
  else
    {
      /* Walk to the end of the chunk chain */
      while (index != ~0)
	{
	  ch = pool_elt_at_index (vam->chunks, index);
	  index = ch->next;
	}
      /* we want the last chunk index */
      index = ch - vam->chunks;

      pool_get (vam->chunks, new_ch);
      clib_memset (new_ch, 0, sizeof (*new_ch));

      ch = pool_elt_at_index (vam->chunks, index);

      new_ch->next = ~0;
      new_ch->prev = index;
      ch->next = new_ch - vam->chunks;

      new_ch->baseva = template->baseva;
      new_ch->size = template->size;

      hash_set (vam->chunk_index_by_baseva, new_ch->baseva,
		new_ch - vam->chunks);
    }

  clib_spinlock_unlock_if_init (&vam->lock);
}

/** Initialize a virtual memory allocation arena
    @param vam - clib_valloc_main_t * pointer to the arena to initialize
    @param template - clib_valloc_chunk_t * pointer to a template chunk which
    describes the initial virtual address range
*/
void
clib_valloc_init (clib_valloc_main_t * vam, clib_valloc_chunk_t * template,
		  int need_lock)
{
  ASSERT (template && template->baseva && template->size);
  clib_memset (vam, 0, sizeof (*vam));
  if (need_lock)
    clib_spinlock_init (&vam->lock);

  vam->chunk_index_by_baseva = hash_create (0, sizeof (uword));
  vam->first_index = ~0;
  vam->flags |= CLIB_VALLOC_INITIALIZED;

  clib_valloc_add_chunk (vam, template);
}

/** Allocate virtual space
    @param vam - clib_valloc_main_t * pointer to the allocation arena
    @param size - u64 number of bytes to allocate
    @os_out_of_memory_on_failure - 1=> panic on allocation failure
    @return uword allocated space, 0=> failure
*/
uword
clib_valloc_alloc (clib_valloc_main_t * vam, uword size,
		   int os_out_of_memory_on_failure)
{
  clib_valloc_chunk_t *ch, *new_ch, *next_ch;
  u32 index;

  clib_spinlock_lock_if_init (&vam->lock);

  index = vam->first_index;

  while (index != ~0)
    {
      ch = pool_elt_at_index (vam->chunks, index);
      /* If the chunk is free... */
      if ((ch->flags & CLIB_VALLOC_BUSY) == 0)
	{
	  /* Too small? */
	  if (ch->size < size)
	    goto next_chunk;
	  /* Exact match? */
	  if (ch->size == size)
	    {
	      ch->flags |= CLIB_VALLOC_BUSY;
	      clib_spinlock_unlock_if_init (&vam->lock);
	      return ch->baseva;
	    }
	  /*
	   * The current free chunk is larger than necessary, spl