/* *------------------------------------------------------------------ * ip_api.c - vnet ip api * * Copyright (c) 2016 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *------------------------------------------------------------------ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define vl_typedefs /* define message structures */ #include #undef vl_typedefs #define vl_endianfun /* define message structures */ #include #undef vl_endianfun /* instantiate all the print functions we know about */ #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) #define vl_printfun #include #undef vl_printfun #include #define foreach_ip_api_msg \ _(IP_TABLE_DUMP, ip_table_dump) \ _(IP_ROUTE_DUMP, ip_route_dump) \ _(IP_MTABLE_DUMP, ip_mtable_dump) \ _(IP_MROUTE_DUMP, ip_mroute_dump) \ _(IP_NEIGHBOR_DUMP, ip_neighbor_dump) \ _(IP_MROUTE_ADD_DEL, ip_mroute_add_del) \ _(MFIB_SIGNAL_DUMP, mfib_signal_dump) \ _(IP_ADDRESS_DUMP, ip_address_dump) \ _(IP_UNNUMBERED_DUMP, ip_unnumbered_dump) \ _(IP_DUMP, ip_dump) \ _(IP_NEIGHBOR_ADD_DEL, ip_neighbor_add_del) \ _(SET_ARP_NEIGHBOR_LIMIT, set_arp_neighbor_limit) \ _(IP_PROBE_NEIGHBOR, ip_probe_neighbor) \ _(IP_SCAN_NEIGHBOR_ENABLE_DISABLE, ip_scan_neighbor_enable_disable) \ _(WANT_IP4_ARP_EVENTS, want_ip4_arp_events) \ _(WANT_IP6_ND_EVENTS, want_ip6_nd_events) \ _(WANT_IP6_RA_EVENTS, want_ip6_ra_events) \ _(PROXY_ARP_ADD_DEL, proxy_arp_add_del) \ _(PROXY_ARP_DUMP, proxy_arp_dump) \ _(PROXY_ARP_INTFC_ENABLE_DISABLE, proxy_arp_intfc_enable_disable) \ _(PROXY_ARP_INTFC_DUMP, proxy_arp_intfc_dump) \ _(RESET_FIB, reset_fib) \ _(IP_ROUTE_ADD_DEL, ip_route_add_del) \ _(IP_TABLE_ADD_DEL, ip_table_add_del) \ _(IP_PUNT_POLICE, ip_punt_police) \ _(IP_PUNT_REDIRECT, ip_punt_redirect) \ _(SET_IP_FLOW_HASH,set_ip_flow_hash) \ _(SW_INTERFACE_IP6ND_RA_CONFIG, sw_interface_ip6nd_ra_config) \ _(SW_INTERFACE_IP6ND_RA_PREFIX, sw_interface_ip6nd_ra_prefix) \ _(IP6ND_PROXY_ADD_DEL, ip6nd_proxy_add_del) \ _(IP6ND_PROXY_DUMP, ip6nd_proxy_dump) \ _(IP6ND_SEND_ROUTER_SOLICITATION, ip6nd_send_router_solicitation) \ _(SW_INTERFACE_IP6_ENABLE_DISABLE, sw_interface_ip6_enable_disable ) \ _(IP_CONTAINER_PROXY_ADD_DEL, ip_container_proxy_add_del) \ _(IP_CONTAINER_PROXY_DUMP, ip_container_proxy_dump) \ _(IOAM_ENABLE, ioam_enable) \ _(IOAM_DISABLE, ioam_disable) \ _(IP_SOURCE_AND_PORT_RANGE_CHECK_ADD_DEL, \ ip_source_and_port_range_check_add_del) \ _(IP_SOURCE_AND_PORT_RANGE_CHECK_INTERFACE_ADD_DEL, \ ip_source_and_port_range_check_interface_add_del) \ _(IP_SOURCE_CHECK_INTERFACE_ADD_DEL, \ ip_source_check_interface_add_del) \ _(IP_REASSEMBLY_SET, ip_reassembly_set) \ _(IP_REASSEMBLY_GET, ip_reassembly_get) \ _(IP_REASSEMBLY_ENABLE_DISABLE, ip_reassembly_enable_disable) \ _(IP_PUNT_REDIRECT_DUMP, ip_punt_redirect_dump) static vl_api_ip_neighbor_flags_t ip_neighbor_flags_encode (ip_neighbor_flags_t f) { vl_api_ip_neighbor_flags_t v = IP_API_NEIGHBOR_FLAG_NONE; if (f & IP_NEIGHBOR_FLAG_STATIC) v |= IP_API_NEIGHBOR_FLAG_STATIC; if (f & IP_NEIGHBOR_FLAG_NO_FIB_ENTRY) v |= IP_API_NEIGHBOR_FLAG_NO_FIB_ENTRY; return (clib_host_to_net_u32 (v)); } static void send_ip_neighbor_details (u32 sw_if_index, const ip46_address_t * ip_address, const mac_address_t * mac, ip_neighbor_flags_t flags, vl_api_registration_t * reg, u32 context) { vl_api_ip_neighbor_details_t *mp; mp = vl_msg_api_alloc (sizeof (*mp)); clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_IP_NEIGHBOR_DETAILS); mp->context = context; mp->neighbor.sw_if_index = htonl (sw_if_index); mp->neighbor.flags = ip_neighbor_flags_encode (flags); ip_address_encode (ip_address, IP46_TYPE_ANY, &mp->neighbor.ip_address); mac_address_encode (mac, mp->neighbor.mac_address); vl_api_send_msg (reg, (u8 *) mp); } static void vl_api_ip_neighbor_dump_t_handler (vl_api_ip_neighbor_dump_t * mp) { vl_api_registration_t *reg; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; u32 sw_if_index = ntohl (mp->sw_if_index); if (mp->is_ipv6) { ip6_neighbor_t *n, *ns; ns = ip6_neighbors_entries (sw_if_index); /* *INDENT-OFF* */ vec_foreach (n, ns) { ip46_address_t nh = { .ip6 = { .as_u64[0] = n->key.ip6_address.as_u64[0], .as_u64[1] = n->key.ip6_address.as_u64[1], }, }; send_ip_neighbor_details (n->key.sw_if_index, &nh, &n->mac, n->flags, reg, mp->context); } /* *INDENT-ON* */ vec_free (ns); } else { ethernet_arp_ip4_entry_t *n, *ns; ns = ip4_neighbor_entries (sw_if_index); /* *INDENT-OFF* */ vec_foreach (n, ns) { ip46_address_t nh = { .ip4 = { .as_u32 = n->ip4_address.as_u32, }, }; send_ip_neighbor_details (n->sw_if_index, &nh, &n->mac, n->flags, reg, mp->context); } /* *INDENT-ON* */ vec_free (ns); } } static void send_ip_table_details (vpe_api_main_t * am, vl_api_registration_t * reg, u32 context, const fib_table_t * table) { vl_api_ip_table_details_t *mp; mp = vl_msg_api_alloc (sizeof (*mp)); if (!mp) return; clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_IP_TABLE_DETAILS); mp->context = context; mp->table.is_ip6 = (table->ft_proto == FIB_PROTOCOL_IP6); mp->table.table_id = htonl (table->ft_table_id); memcpy (mp->table.name, table->ft_desc, clib_min (vec_len (table->ft_desc), sizeof (mp->table.name))); vl_api_send_msg (reg, (u8 *) mp); } static void vl_api_ip_table_dump_t_handler (vl_api_ip_table_dump_t * mp) { vpe_api_main_t *am = &vpe_api_main; vl_api_registration_t *reg; fib_table_t *fib_table; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; /* *INDENT-OFF* */ pool_foreach (fib_table, ip4_main.fibs, ({ send_ip_table_details(am, reg, mp->context, fib_table); })); pool_foreach (fib_table, ip6_main.fibs, ({ /* don't send link locals */ if (fib_table->ft_flags & FIB_TABLE_FLAG_IP6_LL) continue; send_ip_table_details(am, reg, mp->context, fib_table); })); /* *INDENT-ON* */ } typedef struct vl_api_ip_fib_dump_walk_ctx_t_ { fib_node_index_t *feis; } vl_api_ip_fib_dump_walk_ctx_t; static fib_table_walk_rc_t vl_api_ip_fib_dump_walk (fib_node_index_t fei, void *arg) { vl_api_ip_fib_dump_walk_ctx_t *ctx = arg; vec_add1 (ctx->feis, fei); return (FIB_TABLE_WALK_CONTINUE); } static void send_ip_route_details (vpe_api_main_t * am, vl_api_registration_t * reg, u32 context, fib_node_index_t fib_entry_index) { fib_route_path_t *rpaths, *rpath; vl_api_ip_route_details_t *mp; const fib_prefix_t *pfx; vl_api_fib_path_t *fp; int path_count; rpaths = NULL; pfx = fib_entry_get_prefix (fib_entry_index); rpaths = fib_entry_encode (fib_entry_index); path_count = vec_len (rpaths); mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp)); if (!mp) return; clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_IP_ROUTE_DETAILS); mp->context = context; ip_prefix_encode (pfx, &mp->route.prefix); mp->route.table_id = htonl (fib_table_get_table_id (fib_entry_get_fib_index (fib_entry_index), pfx->fp_proto)); mp->route.n_paths = path_count; mp->route.stats_index = htonl (fib_table_entry_get_stats_index (fib_entry_get_fib_index (fib_entry_index), pfx)); fp = mp->route.paths; vec_foreach (rpath, rpaths) { fib_api_path_encode (rpath, fp); fp++; } vl_api_send_msg (reg, (u8 *) mp); } typedef struct apt_ip6_fib_show_ctx_t_ { fib_node_index_t *entries; } api_ip6_fib_show_ctx_t; static void vl_api_ip_route_dump_t_handler (vl_api_ip_route_dump_t * mp) { vpe_api_main_t *am = &vpe_api_main; fib_node_index_t *fib_entry_index; vl_api_registration_t *reg; fib_protocol_t fproto; u32 fib_index; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; vl_api_ip_fib_dump_walk_ctx_t ctx = { .feis = NULL, }; fproto = (mp->table.is_ip6 ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4); fib_index = fib_table_find (fproto, ntohl (mp->table.table_id)); if (INDEX_INVALID == fib_index) return; fib_table_walk (fib_index, fproto, vl_api_ip_fib_dump_walk, &ctx); vec_foreach (fib_entry_index, ctx.feis) { send_ip_route_details (am, reg, mp->context, *fib_entry_index); } vec_free (ctx.feis); } static void send_ip_mtable_details (vl_api_registration_t * reg, u32 context, const mfib_table_t * mfib_table) { vl_api_ip_mtable_details_t *mp; mp = vl_msg_api_alloc (sizeof (*mp)); if (!mp) return; memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_IP_MTABLE_DETAILS); mp->context = context; mp->table.table_id = htonl (mfib_table->mft_table_id); mp->table.is_ip6 = (FIB_PROTOCOL_IP6 == mfib_table->mft_proto); vl_api_send_msg (reg, (u8 *) mp); } static void vl_api_ip_mtable_dump_t_handler (vl_api_ip_mtable_dump_t * mp) { vl_api_registration_t *reg; mfib_table_t *mfib_table; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; /* *INDENT-OFF* */ pool_foreach (mfib_table, ip4_main.mfibs, ({ send_ip_mtable_details (reg, mp->context, mfib_table); })); pool_foreach (mfib_table, ip6_main.mfibs, ({ send_ip_mtable_details (reg, mp->context, mfib_table); })); /* *INDENT-ON* */ } typedef struct vl_api_ip_mfib_dump_ctx_t_ { fib_node_index_t *entries; } vl_api_ip_mfib_dump_ctx_t; static int mfib_route_dump_walk (fib_node_index_t fei, void *arg) { vl_api_ip_mfib_dump_ctx_t *ctx = arg; vec_add1 (ctx->entries, fei); return (0); } static void send_ip_mroute_details (vpe_api_main_t * am, vl_api_registration_t * reg, u32 context, fib_node_index_t mfib_entry_index) { fib_route_path_t *rpaths, *rpath; vl_api_ip_mroute_details_t *mp; const mfib_prefix_t *pfx; vl_api_mfib_path_t *fp; int path_count; rpaths = NULL; pfx = mfib_entry_get_prefix (mfib_entry_index); rpaths = mfib_entry_encode (mfib_entry_index); path_count = vec_len (rpaths); mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp)); if (!mp) return; clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_IP_MROUTE_DETAILS); mp->context = context; ip_mprefix_encode (pfx, &mp->route.prefix); mp->route.table_id = htonl (mfib_table_get_table_id (mfib_entry_get_fib_index (mfib_entry_index), pfx->fp_proto)); mp->route.n_paths = htonl (path_count); fp = mp->route.paths; vec_foreach (rpath, rpaths) { mfib_api_path_encode (rpath, fp); fp++; } vl_api_send_msg (reg, (u8 *) mp); vec_free (rpaths); } static void vl_api_ip_mroute_dump_t_handler (vl_api_ip_mroute_dump_t * mp) { vpe_api_main_t *am = &vpe_api_main; vl_api_registration_t *reg; fib_node_index_t *mfeip; fib_protocol_t fproto; u32 fib_index; vl_api_ip_mfib_dump_ctx_t ctx = { .entries = NULL, }; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; fproto = fib_ip_proto (mp->table.is_ip6); fib_index = mfib_table_find (fproto, ntohl (mp->table.table_id)); if (INDEX_INVALID == fib_index) return; mfib_table_walk (fib_index, fproto, mfib_route_dump_walk, &ctx); vec_sort_with_function (ctx.entries, mfib_entry_cmp_for_sort); vec_foreach (mfeip, ctx.entries) { send_ip_mroute_details (am, reg, mp->context, *mfeip); } vec_free (ctx.entries); } static void vl_api_ip_punt_police_t_handler (vl_api_ip_punt_police_t * mp, vlib_main_t * vm) { vl_api_ip_punt_police_reply_t *rmp; int rv = 0; if (mp->is_ip6) ip6_punt_policer_add_del (mp->is_add, ntohl (mp->policer_index)); else ip4_punt_policer_add_del (mp->is_add, ntohl (mp->policer_index)); REPLY_MACRO (VL_API_IP_PUNT_POLICE_REPLY); } static void vl_api_ip_punt_redirect_t_handler (vl_api_ip_punt_redirect_t * mp, vlib_main_t * vm) { vl_api_ip_punt_redirect_reply_t *rmp; int rv = 0; ip46_type_t ipv; ip46_address_t nh; if (!vnet_sw_if_index_is_api_valid (ntohl (mp->punt.tx_sw_if_index))) goto bad_sw_if_index; ipv = ip_address_decode (&mp->punt.nh, &nh); if (mp->is_add) { if (ipv == IP46_TYPE_IP6) { ip6_punt_redirect_add (ntohl (mp->punt.rx_sw_if_index), ntohl (mp->punt.tx_sw_if_index), &nh); } else if (ipv == IP46_TYPE_IP4) { ip4_punt_redirect_add (ntohl (mp->punt.rx_sw_if_index), ntohl (mp->punt.tx_sw_if_index), &nh); } } else { if (ipv == IP46_TYPE_IP6) { ip6_punt_redirect_del (ntohl (mp->punt.rx_sw_if_index)); } else if (ipv == IP46_TYPE_IP4) { ip4_punt_redirect_del (ntohl (mp->punt.rx_sw_if_index)); } } BAD_SW_IF_INDEX_LABEL; REPLY_MACRO (VL_API_IP_PUNT_REDIRECT_REPLY); } static ip_neighbor_flags_t ip_neighbor_flags_decode (vl_api_ip_neighbor_flags_t v) { ip_neighbor_flags_t f = IP_NEIGHBOR_FLAG_NONE; v = clib_net_to_host_u32 (v); if (v & IP_API_NEIGHBOR_FLAG_STATIC) f |= IP_NEIGHBOR_FLAG_STATIC; if (v & IP_API_NEIGHBOR_FLAG_NO_FIB_ENTRY) f |= IP_NEIGHBOR_FLAG_NO_FIB_ENTRY; return (f); } static void vl_api_ip_neighbor_add_del_t_handler (vl_api_ip_neighbor_add_del_t * mp, vlib_main_t * vm) { vl_api_ip_neighbor_add_del_reply_t *rmp; ip_neighbor_flags_t flags; u32 stats_index = ~0; ip46_address_t ip; mac_address_t mac; ip46_type_t type; int rv; VALIDATE_SW_IF_INDEX ((&mp->neighbor)); flags = ip_neighbor_flags_decode (mp->neighbor.flags); type = ip_address_decode (&mp->neighbor.ip_address, &ip); mac_address_decode (mp->neighbor.mac_address, &mac); /* * there's no validation here of the ND/ARP entry being added. * The expectation is that the FIB will ensure that nothing bad * will come of adding bogus entries. */ if (mp->is_add) rv = ip_neighbor_add (&ip, type, &mac, ntohl (mp->neighbor.sw_if_index), flags, &stats_index); else rv = ip_neighbor_del (&ip, type, ntohl (mp->neighbor.sw_if_index)); BAD_SW_IF_INDEX_LABEL; /* *INDENT-OFF* */ REPLY_MACRO2 (VL_API_IP_NEIGHBOR_ADD_DEL_REPLY, ({ rmp->stats_index = htonl (stats_index); })); /* *INDENT-ON* */ } void ip_table_delete (fib_protocol_t fproto, u32 table_id, u8 is_api) { u32 fib_index, mfib_index; /* * ignore action on the default table - this is always present * and cannot be added nor deleted from the API */ if (0 != table_id) { /* * The API holds only one lock on the table. * i.e. it can be added many times via the API but needs to be * deleted only once. * The FIB index for unicast and multicast is not necessarily the * same, since internal VPP systesm (like LISP and SR) create * their own unicast tables. */ fib_index = fib_table_find (fproto, table_id); mfib_index = mfib_table_find (fproto, table_id); if (~0 != fib_index) { fib_table_unlock (fib_index, fproto, (is_api ? FIB_SOURCE_API : FIB_SOURCE_CLI)); } if (~0 != mfib_index) { mfib_table_unlock (mfib_index, fproto, (is_api ? MFIB_SOURCE_API : MFIB_SOURCE_CLI)); } } } void vl_api_ip_table_add_del_t_handler (vl_api_ip_table_add_del_t * mp) { vl_api_ip_table_add_del_reply_t *rmp; fib_protocol_t fproto = (mp->table.is_ip6 ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4); u32 table_id = ntohl (mp->table.table_id); int rv = 0; if (mp->is_add) { ip_table_create (fproto, table_id, 1, mp->table.name); } else { ip_table_delete (fproto, table_id, 1); } REPLY_MACRO (VL_API_IP_TABLE_ADD_DEL_REPLY); } static int ip_route_add_del_t_handler (vl_api_ip_route_add_del_t * mp, u32 * stats_index) { fib_route_path_t *rpaths = NULL, *rpath; fib_entry_flag_t entry_flags; vl_api_fib_path_t *apath; fib_prefix_t pfx; u32 fib_index; int rv, ii; entry_flags = FIB_ENTRY_FLAG_NONE; ip_prefix_decode (&mp->route.prefix, &pfx); rv = fib_api_table_id_decode (pfx.fp_proto, ntohl (mp->route.table_id), &fib_index); if (0 != rv) goto out; if (0 != mp->route.n_paths) vec_validate (rpaths, mp->route.n_paths - 1); for (ii = 0; ii < mp->route.n_paths; ii++) { apath = &mp->route.paths[ii]; rpath = &rpaths[ii]; rv = fib_api_path_decode (apath, rpath); if ((rpath->frp_flags & FIB_ROUTE_PATH_LOCAL) && (~0 == rpath->frp_sw_if_index)) entry_flags |= (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL); if (0 != rv) goto out; } rv = fib_api_route_add_del (mp->is_add, mp->is_multipath, fib_index, &pfx, entry_flags, rpaths); if (mp->is_add && 0 == rv) *stats_index = fib_table_entry_get_stats_index (fib_index, &pfx); out: vec_free (rpaths); return (rv); } void vl_api_ip_route_add_del_t_handler (vl_api_ip_route_add_del_t * mp) { vl_api_ip_route_add_del_reply_t *rmp; u32 stats_index = ~0; int rv; rv = ip_route_add_del_t_handler (mp, &stats_index); /* *INDENT-OFF* */ REPLY_MACRO2 (VL_API_IP_ROUTE_ADD_DEL_REPLY, ({ rmp->stats_index = htonl (stats_index); })) /* *INDENT-ON* */ } void ip_table_create (fib_protocol_t fproto, u32 table_id, u8 is_api, const u8 * name) { u32 fib_index, mfib_index; /* * ignore action on the default table - this is always present * and cannot be added nor deleted from the API */ if (0 != table_id) { /* * The API holds only one lock on the table. * i.e. it can be added many times via the API but needs to be * deleted only once. * The FIB index for unicast and multicast is not necessarily the * same, since internal VPP systesm (like LISP and SR) create * their own unicast tables. */ fib_index = fib_table_find (fproto, table_id); mfib_index = mfib_table_find (fproto, table_id); if (~0 == fib_index) { fib_table_find_or_create_and_lock_w_name (fproto, table_id, (is_api ? FIB_SOURCE_API : FIB_SOURCE_CLI), name); } if (~0 == mfib_index) { mfib_table_find_or_create_and_lock_w_name (fproto, table_id, (is_api ? MFIB_SOURCE_API : MFIB_SOURCE_CLI), name); } } } static u32 mroute_add_del_handler (u8 is_add, u8 is_multipath, u32 fib_index, const mfib_prefix_t * prefix, u32 entry_flags, u32 rpf_id, fib_route_path_t * rpaths) { u32 mfib_entry_index = ~0; if (0 == vec_len (rpaths)) { mfib_entry_index = mfib_table_entry_update (fib_index, prefix, MFIB_SOURCE_API, rpf_id, entry_flags); } else { if (is_add) { mfib_entry_index = mfib_table_entry_paths_update (fib_index, prefix, MFIB_SOURCE_API, rpaths); } else { mfib_table_entry_paths_remove (fib_index, prefix, MFIB_SOURCE_API, rpaths); } } return (mfib_entry_index); } static int api_mroute_add_del_t_handler (vl_api_ip_mroute_add_del_t * mp, u32 * stats_index) { fib_route_path_t *rpath, *rpaths = NULL; fib_node_index_t mfib_entry_index; mfib_prefix_t pfx; u32 fib_index; int rv; u16 ii; ip_mprefix_decode (&mp->route.prefix, &pfx); rv = mfib_api_table_id_decode (pfx.fp_proto, ntohl (mp->route.table_id), &fib_index); if (0 }
/*
  Copyright (c) 2013 Cisco and/or its affiliates.

  * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
*/

#ifndef included_clib_pfhash_h
#define included_clib_pfhash_h


#include <vppinfra/clib.h>
#include <vppinfra/hash.h>
#include <vppinfra/pool.h>

#if defined(CLIB_HAVE_VEC128) && ! defined (__ALTIVEC__)

typedef struct
{
  /* 3 x 16 = 48 key bytes */
  union
  {
    u32x4 k_u32x4[3];
    u64 k_u64[6];
  } kb;
  /* 3 x 4 = 12 value bytes */
  u32 values[3];
  u32 pad;
} pfhash_kv_16_t;

typedef struct
{
  /* 5 x 8 = 40 key bytes */
  union
  {
    u64 k_u64[5];
  } kb;

  /* 5 x 4 = 20 value bytes */
  u32 values[5];
  u32 pad;
} pfhash_kv_8_t;

typedef struct
{
  /* 4 x 8 = 32 key bytes */
  union
  {
    u64 k_u64[4];
  } kb;

  /* 4 x 8 = 32 value bytes */
  u64 values[4];
} pfhash_kv_8v8_t;

typedef struct
{
  /* 8 x 4 = 32 key bytes */
  union
  {
    u32x4 k_u32x4[2];
    u32 kb[8];
  } kb;

  /* 8 x 4 = 32 value bytes */
  u32 values[8];
} pfhash_kv_4_t;

typedef union
{
  pfhash_kv_16_t kv16;
  pfhash_kv_8_t kv8;
  pfhash_kv_8v8_t kv8v8;
  pfhash_kv_4_t kv4;
} pfhash_kv_t;

typedef struct
{
  /* Bucket vector */
  u32 *buckets;
#define PFHASH_BUCKET_OVERFLOW (u32)~0

  /* Pool of key/value pairs */
  pfhash_kv_t *kvp;

  /* overflow plain-o-hash */
  uword *overflow_hash;

  /* Pretty-print name */
  u8 *name;

  u32 key_size;
  u32 value_size;

  u32 overflow_count;
  u32 nitems;
  u32 nitems_in_overflow;
} pfhash_t;

void pfhash_init (pfhash_t * p, char *name, u32 key_size, u32 value_size,
		  u32 nbuckets);
void pfhash_free (pfhash_t * p);
u64 pfhash_get (pfhash_t * p, u32 bucket, void *key);
void pfhash_set (pfhash_t * p, u32 bucket, void *key, void *value);
void pfhash_unset (pfhash_t * p, u32 bucket, void *key);

format_function_t format_pfhash;

static inline void
pfhash_prefetch_bucket (pfhash_t * p, u32 bucket)
{
  CLIB_PREFETCH (&p->buckets[bucket], CLIB_CACHE_LINE_BYTES, LOAD);
}

static inline u32
pfhash_read_bucket_prefetch_kv (pfhash_t * p, u32 bucket)
{
  u32 bucket_contents = p->buckets[bucket];
  if (PREDICT_TRUE ((bucket_contents & PFHASH_BUCKET_OVERFLOW) == 0))
    CLIB_PREFETCH (&p->kvp[bucket_contents], CLIB_CACHE_LINE_BYTES, LOAD);
  return bucket_contents;
}

/*
 * pfhash_search_kv_16
 * See if the supplied 16-byte key matches one of three 16-byte (key,value) pairs.
 * Return the indicated value, or ~0 if no match
 *
 * Note: including the overflow test, the fast path is 35 instrs
 * on x86_64. Elves will steal your keyboard in the middle of the night if
 * you "improve" it without checking the generated code!
 */
static inline u32
pfhash_search_kv_16 (pfhash_t * p, u32 bucket_contents, u32x4 * key)
{
  u32x4 diff0, diff1, diff2;
  u32 is_equal0, is_equal1, is_equal2;
  u32 no_match;
  pfhash_kv_16_t *kv;
  u32 rv;

  if (PREDICT_FALSE (bucket_contents == PFHASH_BUCKET_OVERFLOW))
    {
      uword *hp;
      hp = hash_get_mem (p->overflow_hash, key);
      if (hp)
	return hp[0];
      return (u32) ~ 0;
    }

  kv = &p->kvp[bucket_contents].kv16;

  diff0 = u32x4_sub (kv->kb.k_u32x4[0], key[0]);
  diff1 = u32x4_sub (kv->kb.k_u32x4[1], key[0]);
  diff2 = u32x4_sub (kv->kb.k_u32x4[2], key[0]);

  no_match = is_equal0 = (i16) u32x4_zero_byte_mask (diff0);
  is_equal1 = (i16) u32x4_zero_byte_mask (diff1);
  no_match |= is_equal1;
  is_equal2 = (i16) u32x4_zero_byte_mask (diff2);
  no_match |= is_equal2;
  /* If any of the three items matched, no_match will be zero after this line */
  no_match = ~no_match;

  rv = (is_equal0 & kv->values[0])
    | (is_equal1 & kv->values[1]) | (is_equal2 & kv->values[2]) | no_match;

  return rv;
}

static inline u32
pfhash_search_kv_8 (pfhash_t * p, u32 bucket_contents, u64 * key)
{
  pfhash_kv_8_t *kv;
  u32 rv = (u32) ~ 0;

  if (PREDICT_FALSE (bucket_contents == PFHASH_BUCKET_OVERFLOW))
    {
      uword *hp;
      hp = hash_get_mem (p->overflow_hash, key);
      if (hp)
	return hp[0];
      return (u32) ~ 0;
    }

  kv = &p->kvp[bucket_contents].kv8;

  rv = (kv->kb.k_u64[0] == key[0]) ? kv->values[0] : rv;
  rv = (kv->kb.k_u64[1] == key[0]) ? kv->values[1] : rv;
  rv = (kv->kb.k_u64[2] == key[0]) ? kv->values[2] : rv;
  rv = (kv->kb.k_u64[3] == key[0]) ? kv->values[3] : rv;
  rv = (kv->kb.k_u64[4] == key[0]) ? kv->values[4] : rv;

  return rv;
}

static inline u64
pfhash_search_kv_8v8 (pfhash_t * p, u32 bucket_contents, u64 * key)
{
  pfhash_kv_8v8_t *kv;
  u64 rv = (u64) ~ 0;

  if (PREDICT_FALSE (bucket_contents == PFHASH_BUCKET_OVERFLOW))
    {
      uword *hp;
      hp = hash_get_mem (p->overflow_hash, key);
      if (hp)
	return hp[0];
      return (u64) ~ 0;
    }

  kv = &p->kvp[bucket_contents].kv8v8;

  rv = (kv->kb.k_u64[0] == key[0]) ? kv->values[0] : rv;
  rv = (kv->kb.k_u64[1] == key[0]) ? kv->values[1] : rv;
  rv = (kv->kb.k_u64[2] == key[0]) ? kv->values[2] : rv;
  rv = (kv->kb.k_u64[3] == key[0]) ? kv->values[3] : rv;

  return rv;
}

static inline u32
pfhash_search_kv_4 (pfhash_t * p, u32 bucket_contents, u32 * key)
{
  u32x4 vector_key;
  u32x4 is_equal[2];
  u32 zbm[2], winner_index;
  pfhash_kv_4_t *kv;

  if (PREDICT_FALSE (bucket_contents == PFHASH_BUCKET_OVERFLOW))
    {
      uword *hp;
      hp = hash_get_mem (p->overflow_hash, key);
      if (hp)
	return hp[0];
      return (u32) ~ 0;
    }

  kv = &p->kvp[bucket_contents].kv4;

  vector_key = u32x4_splat (key[0]);

  is_equal[0] = (kv->kb.k_u32x4[0] == vector_key);
  is_equal[1] = (kv->kb.k_u32x4[1] == vector_key);
  zbm[0] = ~u32x4_zero_byte_mask (is_equal[0]) & 0xFFFF;
  zbm[1] = ~u32x4_zero_byte_mask (is_equal[1]) & 0xFFFF;

  if (PREDICT_FALSE ((zbm[0] == 0) && (zbm[1] == 0)))
    return (u32) ~ 0;

  winner_index = min_log2 (zbm[0]) >> 2;
  winner_index = zbm[1] ? (4 + (min_log2 (zbm[1]) >> 2)) : winner_index;

  return kv->values[winner_index];
}

#endif /* CLIB_HAVE_VEC128 */

#endif /* included_clib_pfhash_h */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
rp = pool_elt_at_index (am->wc_ip4_arp_events_registrations, p[0]); pool_put (am->wc_ip4_arp_events_registrations, rp); hash_unset (am->wc_ip4_arp_events_registration_hash, client_index); if (pool_elts (am->wc_ip4_arp_events_registrations) == 0) wc_arp_set_publisher_node (~0, REPORT_MAX); } return (NULL); } VL_MSG_API_REAPER_FUNCTION (want_ip4_arp_events_reaper); static void vl_api_want_ip6_nd_events_t_handler (vl_api_want_ip6_nd_events_t * mp) { vpe_api_main_t *am = &vpe_api_main; vnet_main_t *vnm = vnet_get_main (); vl_api_want_ip6_nd_events_reply_t *rmp; ip6_address_t ip6; int rv = 0; ip6_address_decode (mp->ip, &ip6); if (ip6_address_is_zero (&ip6)) { uword *p = hash_get (am->wc_ip6_nd_events_registration_hash, mp->client_index); vpe_client_registration_t *rp; if (p) { if (mp->enable_disable) { clib_warning ("pid %d: already enabled...", mp->pid); rv = VNET_API_ERROR_INVALID_REGISTRATION; goto reply; } else { rp = pool_elt_at_index (am->wc_ip6_nd_events_registrations, p[0]); pool_put (am->wc_ip6_nd_events_registrations, rp); hash_unset (am->wc_ip6_nd_events_registration_hash, mp->client_index); if (pool_elts (am->wc_ip6_nd_events_registrations) == 0) wc_nd_set_publisher_node (~0, REPORT_MAX); goto reply; } } if (mp->enable_disable == 0) { clib_warning ("pid %d: already disabled...", mp->pid); rv = VNET_API_ERROR_INVALID_REGISTRATION; goto reply; } pool_get (am->wc_ip6_nd_events_registrations, rp); rp->client_index = mp->client_index; rp->client_pid = mp->pid; hash_set (am->wc_ip6_nd_events_registration_hash, rp->client_index, rp - am->wc_ip6_nd_events_registrations); wc_nd_set_publisher_node (wc_arp_process_node.index, WC_ND_REPORT); goto reply; } if (mp->enable_disable) { vl_api_ip6_nd_event_t *event; pool_get (am->nd_events, event); rv = vnet_add_del_ip6_nd_change_event (vnm, nd_change_data_callback, mp->pid, &ip6, ip_resolver_process_node.index, IP6_ND_EVENT, event - am->nd_events, 1 /* is_add */ ); if (rv) { pool_put (am->nd_events, event); goto reply; } clib_memset (event, 0, sizeof (*event)); event->_vl_msg_id = ntohs (VL_API_IP6_ND_EVENT); event->client_index = mp->client_index; ip6_address_encode (&ip6, event->ip); event->pid = mp->pid; } else { rv = vnet_add_del_ip6_nd_change_event (vnm, nd_change_delete_callback, mp->pid, &ip6 /* addr, in net byte order */ , ip_resolver_process_node.index, IP6_ND_EVENT, ~0 /* pool index */ , 0 /* is_add */ ); } reply: REPLY_MACRO (VL_API_WANT_IP6_ND_EVENTS_REPLY); } static clib_error_t * want_ip6_nd_events_reaper (u32 client_index) { vpe_client_registration_t *rp; vl_api_ip6_nd_event_t *event; u32 *to_delete, *event_id; vpe_api_main_t *am; vnet_main_t *vnm; uword *p; am = &vpe_api_main; vnm = vnet_get_main (); to_delete = NULL; /* clear out all of its pending resolutions */ /* *INDENT-OFF* */ pool_foreach(event, am->nd_events, ({ if (event->client_index == client_index) { vec_add1(to_delete, event - am->nd_events); } })); /* *INDENT-ON* */ vec_foreach (event_id, to_delete) { event = pool_elt_at_index (am->nd_events, *event_id); vnet_add_del_ip6_nd_change_event (vnm, nd_change_delete_callback, event->pid, event->ip, ip_resolver_process_node.index, IP6_ND_EVENT, ~0 /* pool index, notused */ , 0 /* is_add */ ); } vec_free (to_delete); /* remove from the registration hash */ p = hash_get (am->wc_ip6_nd_events_registration_hash, client_index); if (p) { rp = pool_elt_at_index (am->wc_ip6_nd_events_registrations, p[0]); pool_put (am->wc_ip6_nd_events_registrations, rp); hash_unset (am->wc_ip6_nd_events_registration_hash, client_index); if (pool_elts (am->wc_ip6_nd_events_registrations) == 0) wc_nd_set_publisher_node (~0, REPORT_MAX); } return (NULL); } VL_MSG_API_REAPER_FUNCTION (want_ip6_nd_events_reaper); static void vl_api_want_ip6_ra_events_t_handler (vl_api_want_ip6_ra_events_t * mp) { vpe_api_main_t *am = &vpe_api_main; vl_api_want_ip6_ra_events_reply_t *rmp; int rv = 0; uword *p = hash_get (am->ip6_ra_events_registration_hash, mp->client_index); vpe_client_registration_t *rp; if (p) { if (mp->enable_disable) { clib_warning ("pid %d: already enabled...", ntohl (mp->pid)); rv = VNET_API_ERROR_INVALID_REGISTRATION; goto reply; } else { rp = pool_elt_at_index (am->ip6_ra_events_registrations, p[0]); pool_put (am->ip6_ra_events_registrations, rp); hash_unset (am->ip6_ra_events_registration_hash, mp->client_index); goto reply; } } if (mp->enable_disable == 0) { clib_warning ("pid %d: already disabled...", ntohl (mp->pid)); rv = VNET_API_ERROR_INVALID_REGISTRATION; goto reply; } pool_get (am->ip6_ra_events_registrations, rp); rp->client_index = mp->client_index; rp->client_pid = ntohl (mp->pid); hash_set (am->ip6_ra_events_registration_hash, rp->client_index, rp - am->ip6_ra_events_registrations); reply: REPLY_MACRO (VL_API_WANT_IP6_RA_EVENTS_REPLY); } static clib_error_t * want_ip6_ra_events_reaper (u32 client_index) { vpe_api_main_t *am = &vpe_api_main; vpe_client_registration_t *rp; uword *p; p = hash_get (am->ip6_ra_events_registration_hash, client_index); if (p) { rp = pool_elt_at_index (am->ip6_ra_events_registrations, p[0]); pool_put (am->ip6_ra_events_registrations, rp); hash_unset (am->ip6_ra_events_registration_hash, client_index); } return (NULL); } VL_MSG_API_REAPER_FUNCTION (want_ip6_ra_events_reaper); static void vl_api_proxy_arp_add_del_t_handler (vl_api_proxy_arp_add_del_t * mp) { vl_api_proxy_arp_add_del_reply_t *rmp; ip4_address_t lo, hi; u32 fib_index; int rv; fib_index = fib_table_find (FIB_PROTOCOL_IP4, ntohl (mp->proxy.table_id)); if (~0 == fib_index) { rv = VNET_API_ERROR_NO_SUCH_FIB; goto out; } ip4_address_decode (mp->proxy.low, &lo); ip4_address_decode (mp->proxy.hi, &hi); rv = vnet_proxy_arp_add_del (&lo, &hi, fib_index, mp->is_add == 0); out: REPLY_MACRO (VL_API_PROXY_ARP_ADD_DEL_REPLY); } typedef struct proxy_arp_walk_ctx_t_ { vl_api_registration_t *reg; u32 context; } proxy_arp_walk_ctx_t; static walk_rc_t send_proxy_arp_details (const ip4_address_t * lo_addr, const ip4_address_t * hi_addr, u32 fib_index, void *data) { vl_api_proxy_arp_details_t *mp; proxy_arp_walk_ctx_t *ctx; ctx = data; mp = vl_msg_api_alloc (sizeof (*mp)); clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_PROXY_ARP_DETAILS); mp->context = ctx->context; mp->proxy.table_id = htonl (fib_index); ip4_address_encode (lo_addr, mp->proxy.low); ip4_address_encode (hi_addr, mp->proxy.hi); vl_api_send_msg (ctx->reg, (u8 *) mp); return (WALK_CONTINUE); } static void vl_api_proxy_arp_dump_t_handler (vl_api_proxy_arp_dump_t * mp) { vl_api_registration_t *reg; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; proxy_arp_walk_ctx_t wctx = { .reg = reg, .context = mp->context, }; proxy_arp_walk (send_proxy_arp_details, &wctx); } static walk_rc_t send_proxy_arp_intfc_details (u32 sw_if_index, void *data) { vl_api_proxy_arp_intfc_details_t *mp; proxy_arp_walk_ctx_t *ctx; ctx = data; mp = vl_msg_api_alloc (sizeof (*mp)); clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_PROXY_ARP_INTFC_DETAILS); mp->context = ctx->context; mp->sw_if_index = htonl (sw_if_index); vl_api_send_msg (ctx->reg, (u8 *) mp); return (WALK_CONTINUE); } static void vl_api_proxy_arp_intfc_dump_t_handler (vl_api_proxy_arp_intfc_dump_t * mp) { vl_api_registration_t *reg; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; proxy_arp_walk_ctx_t wctx = { .reg = reg, .context = mp->context, }; proxy_arp_intfc_walk (send_proxy_arp_intfc_details, &wctx); } static void vl_api_proxy_arp_intfc_enable_disable_t_handler (vl_api_proxy_arp_intfc_enable_disable_t * mp) { int rv = 0; vnet_main_t *vnm = vnet_get_main (); vl_api_proxy_arp_intfc_enable_disable_reply_t *rmp; VALIDATE_SW_IF_INDEX (mp); rv = vnet_proxy_arp_enable_disable (vnm, ntohl (mp->sw_if_index), mp->enable_disable); BAD_SW_IF_INDEX_LABEL; REPLY_MACRO (VL_API_PROXY_ARP_INTFC_ENABLE_DISABLE_REPLY); } static void vl_api_ip_probe_neighbor_t_handler (vl_api_ip_probe_neighbor_t * mp) { int rv = 0; vlib_main_t *vm = vlib_get_main (); vl_api_ip_probe_neighbor_reply_t *rmp; clib_error_t *error; ip46_address_t dst; ip46_type_t itype; VALIDATE_SW_IF_INDEX (mp); u32 sw_if_index = ntohl (mp->sw_if_index); itype = ip_address_decode (&mp->dst, &dst); if (IP46_TYPE_IP6 == itype) error = ip6_probe_neighbor (vm, &dst.ip6, sw_if_index, 0); else error = ip4_probe_neighbor (vm, &dst.ip4, sw_if_index, 0); if (error) { clib_error_report (error); rv = clib_error_get_code (error); } BAD_SW_IF_INDEX_LABEL; REPLY_MACRO (VL_API_PROXY_ARP_INTFC_ENABLE_DISABLE_REPLY); } static void vl_api_ip_scan_neighbor_enable_disable_t_handler (vl_api_ip_scan_neighbor_enable_disable_t * mp) { int rv = 0; vl_api_ip_scan_neighbor_enable_disable_reply_t *rmp; ip_neighbor_scan_arg_t arg; arg.mode = mp->mode; arg.scan_interval = mp->scan_interval; arg.max_proc_time = mp->max_proc_time; arg.max_update = mp->max_update; arg.scan_int_delay = mp->scan_int_delay; arg.stale_threshold = mp->stale_threshold; ip_neighbor_scan_enable_disable (&arg); REPLY_MACRO (VL_API_IP_SCAN_NEIGHBOR_ENABLE_DISABLE_REPLY); } static int ip4_reset_fib_t_handler (vl_api_reset_fib_t * mp) { vnet_main_t *vnm = vnet_get_main (); vnet_interface_main_t *im = &vnm->interface_main; ip4_main_t *im4 = &ip4_main; static u32 *sw_if_indices_to_shut; fib_table_t *fib_table; ip4_fib_t *fib; u32 sw_if_index; int i; int rv = VNET_API_ERROR_NO_SUCH_FIB; u32 target_fib_id = ntohl (mp->vrf_id); /* *INDENT-OFF* */ pool_foreach (fib_table, im4->fibs, ({ vnet_sw_interface_t * si; fib = pool_elt_at_index (im4->v4_fibs, fib_table->ft_index); if (fib->table_id != target_fib_id) continue; /* remove any mpls encap/decap labels */ mpls_fib_reset_labels (fib->table_id); /* remove any proxy arps in this fib */ vnet_proxy_arp_fib_reset (fib->table_id); /* Set the flow hash for this fib to the default */ vnet_set_ip4_flow_hash (fib->table_id, IP_FLOW_HASH_DEFAULT); vec_reset_length (sw_if_indices_to_shut); /* Shut down interfaces in this FIB / clean out intfc routes */ pool_foreach (si, im->sw_interfaces, ({ u32 sw_if_index = si->sw_if_index; if (sw_if_index < vec_len (im4->fib_index_by_sw_if_index) && (im4->fib_index_by_sw_if_index[si->sw_if_index] == fib->index)) vec_add1 (sw_if_indices_to_shut, si->sw_if_index); })); for (i = 0; i < vec_len (sw_if_indices_to_shut); i++) { sw_if_index = sw_if_indices_to_shut[i]; u32 flags = vnet_sw_interface_get_flags (vnm, sw_if_index); flags &= ~(VNET_SW_INTERFACE_FLAG_ADMIN_UP); vnet_sw_interface_set_flags (vnm, sw_if_index, flags); } fib_table_flush(fib->index, FIB_PROTOCOL_IP4, FIB_SOURCE_API); rv = 0; break; })); /* pool_foreach (fib) */ /* *INDENT-ON* */ return rv; } static int ip6_reset_fib_t_handler (vl_api_reset_fib_t * mp) { vnet_main_t *vnm = vnet_get_main (); vnet_interface_main_t *im = &vnm->interface_main; ip6_main_t *im6 = &ip6_main; static u32 *sw_if_indices_to_shut; fib_table_t *fib_table; ip6_fib_t *fib; u32 sw_if_index; int i; int rv = VNET_API_ERROR_NO_SUCH_FIB; u32 target_fib_id = ntohl (mp->vrf_id); /* *INDENT-OFF* */ pool_foreach (fib_table, im6->fibs, ({ vnet_sw_interface_t * si; fib = pool_elt_at_index (im6->v6_fibs, fib_table->ft_index); if (fib->table_id != target_fib_id) continue; vec_reset_length (sw_if_indices_to_shut); /* Set the flow hash for this fib to the default */ vnet_set_ip6_flow_hash (fib->table_id, IP_FLOW_HASH_DEFAULT); /* Shut down interfaces in this FIB / clean out intfc routes */ pool_foreach (si, im->sw_interfaces, ({ if (im6->fib_index_by_sw_if_index[si->sw_if_index] == fib->index) vec_add1 (sw_if_indices_to_shut, si->sw_if_index); })); for (i = 0; i < vec_len (sw_if_indices_to_shut); i++) { sw_if_index = sw_if_indices_to_shut[i]; u32 flags = vnet_sw_interface_get_flags (vnm, sw_if_index); flags &= ~(VNET_SW_INTERFACE_FLAG_ADMIN_UP); vnet_sw_interface_set_flags (vnm, sw_if_index, flags); } fib_table_flush(fib->index, FIB_PROTOCOL_IP6, FIB_SOURCE_API); rv = 0; break; })); /* pool_foreach (fib) */ /* *INDENT-ON* */ return rv; } static void vl_api_reset_fib_t_handler (vl_api_reset_fib_t * mp) { int rv; vl_api_reset_fib_reply_t *rmp; if (mp->is_ipv6) rv = ip6_reset_fib_t_handler (mp); else rv = ip4_reset_fib_t_handler (mp); REPLY_MACRO (VL_API_RESET_FIB_REPLY); } static void vl_api_set_arp_neighbor_limit_t_handler (vl_api_set_arp_neighbor_limit_t * mp) { int rv; vl_api_set_arp_neighbor_limit_reply_t *rmp; vnet_main_t *vnm = vnet_get_main (); clib_error_t *error; vnm->api_errno = 0; if (mp->is_ipv6) error = ip6_set_neighbor_limit (ntohl (mp->arp_neighbor_limit)); else error = ip4_set_arp_limit (ntohl (mp->arp_neighbor_limit)); if (error) { clib_error_report (error); rv = VNET_API_ERROR_UNSPECIFIED; } else { rv = vnm->api_errno; } REPLY_MACRO (VL_API_SET_ARP_NEIGHBOR_LIMIT_REPLY); } void vl_api_ip_reassembly_set_t_handler (vl_api_ip_reassembly_set_t * mp) { vl_api_ip_reassembly_set_reply_t *rmp; int rv = 0; if (mp->is_ip6) { rv = ip6_reass_set (clib_net_to_host_u32 (mp->timeout_ms), clib_net_to_host_u32 (mp->max_reassemblies), clib_net_to_host_u32 (mp->max_reassembly_length), clib_net_to_host_u32 (mp->expire_walk_interval_ms)); } else { rv = ip4_reass_set (clib_net_to_host_u32 (mp->timeout_ms), clib_net_to_host_u32 (mp->max_reassemblies), clib_net_to_host_u32 (mp->max_reassembly_length), clib_net_to_host_u32 (mp->expire_walk_interval_ms)); } REPLY_MACRO (VL_API_IP_REASSEMBLY_SET_REPLY); } void vl_api_ip_reassembly_get_t_handler (vl_api_ip_reassembly_get_t * mp) { vl_api_registration_t *rp; rp = vl_api_client_index_to_registration (mp->client_index); if (rp == 0) return; vl_api_ip_reassembly_get_reply_t *rmp = vl_msg_api_alloc (sizeof (*rmp)); clib_memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_IP_REASSEMBLY_GET_REPLY); rmp->context = mp->context; rmp->retval = 0; if (mp->is_ip6) { rmp->is_ip6 = 1; ip6_reass_get (&rmp->timeout_ms, &rmp->max_reassemblies, &rmp->expire_walk_interval_ms); } else { rmp->is_ip6 = 0; ip4_reass_get (&rmp->timeout_ms, &rmp->max_reassemblies, &rmp->max_reassembly_length, &rmp->expire_walk_interval_ms); } rmp->timeout_ms = clib_host_to_net_u32 (rmp->timeout_ms); rmp->max_reassemblies = clib_host_to_net_u32 (rmp->max_reassemblies); rmp->expire_walk_interval_ms = clib_host_to_net_u32 (rmp->expire_walk_interval_ms); vl_api_send_msg (rp, (u8 *) rmp); } void vl_api_ip_reassembly_enable_disable_t_handler (vl_api_ip_reassembly_enable_disable_t * mp) { vl_api_ip_reassembly_enable_disable_reply_t *rmp; int rv = 0; rv = ip4_reass_enable_disable (clib_net_to_host_u32 (mp->sw_if_index), mp->enable_ip4); if (0 == rv) { rv = ip6_reass_enable_disable (clib_net_to_host_u32 (mp->sw_if_index), mp->enable_ip6); } REPLY_MACRO (VL_API_IP_REASSEMBLY_ENABLE_DISABLE_REPLY); } typedef struct ip_punt_redirect_walk_ctx_t_ { vl_api_registration_t *reg; u32 context; } ip_punt_redirect_walk_ctx_t; static walk_rc_t send_ip_punt_redirect_details (u32 rx_sw_if_index, const ip_punt_redirect_rx_t * ipr, void *arg) { ip_punt_redirect_walk_ctx_t *ctx = arg; vl_api_ip_punt_redirect_details_t *mp; fib_path_encode_ctx_t path_ctx = { .rpaths = NULL, }; mp = vl_msg_api_alloc (sizeof (*mp)); if (!mp) return (WALK_STOP);; clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_IP_PUNT_REDIRECT_DETAILS); mp->context = ctx->context; fib_path_list_walk_w_ext (ipr->pl, NULL, fib_path_encode, &path_ctx); mp->punt.rx_sw_if_index = htonl (rx_sw_if_index); mp->punt.tx_sw_if_index = htonl (path_ctx.rpaths[0].frp_sw_if_index); ip_address_encode (&path_ctx.rpaths[0].frp_addr, fib_proto_to_ip46 (ipr->fproto), &mp->punt.nh); vl_api_send_msg (ctx->reg, (u8 *) mp); vec_free (path_ctx.rpaths); return (WALK_CONTINUE); } static void vl_api_ip_punt_redirect_dump_t_handler (vl_api_ip_punt_redirect_dump_t * mp) { vl_api_registration_t *reg; fib_protocol_t fproto; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; fproto = mp->is_ipv6 ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4; ip_punt_redirect_walk_ctx_t ctx = { .reg = reg, .context = mp->context, }; if (~0 != mp->sw_if_index) { u32 rx_sw_if_index; index_t pri; rx_sw_if_index = ntohl (mp->sw_if_index); pri = ip_punt_redirect_find (fproto, rx_sw_if_index); if (INDEX_INVALID == pri) return; send_ip_punt_redirect_details (rx_sw_if_index, ip_punt_redirect_get (pri), &ctx); } else ip_punt_redirect_walk (fproto, send_ip_punt_redirect_details, &ctx); } #define vl_msg_name_crc_list #include #undef vl_msg_name_crc_list static void setup_message_id_table (api_main_t * am) { #define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id); foreach_vl_msg_name_crc_ip; #undef _ } static clib_error_t * ip_api_hookup (vlib_main_t * vm) { api_main_t *am = &api_main; #define _(N,n) \ vl_msg_api_set_handlers(VL_API_##N, #n, \ vl_api_##n##_t_handler, \ vl_noop_handler, \ vl_api_##n##_t_endian, \ vl_api_##n##_t_print, \ sizeof(vl_api_##n##_t), 1); foreach_ip_api_msg; #undef _ /* * Mark the route add/del API as MP safe */ am->is_mp_safe[VL_API_IP_ROUTE_ADD_DEL] = 1; am->is_mp_safe[VL_API_IP_ROUTE_ADD_DEL_REPLY] = 1; /* * Set up the (msg_name, crc, message-id) table */ setup_message_id_table (am); ra_set_publisher_node (wc_arp_process_node.index, RA_REPORT); return 0; } VLIB_API_INIT_FUNCTION (ip_api_hookup); /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */