/* * Copyright (c) 2017 Cisco and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file * @brief NAT plugin API implementation */ #include #include #include #include #include #include #include #include #include #include #include #include #define vl_api_nat44_add_del_lb_static_mapping_t_endian vl_noop_handler #define vl_api_nat44_nat44_lb_static_mapping_details_t_endian vl_noop_handler /* define message structures */ #define vl_typedefs #include #undef vl_typedefs /* define generated endian-swappers */ #define vl_endianfun #include #undef vl_endianfun #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) #define REPLY_MSG_ID_BASE sm->msg_id_base #include /* Get the API version number */ #define vl_api_version(n,v) static u32 api_version=(v); #include #undef vl_api_version /* Macro to finish up custom dump fns */ #define FINISH \ vec_add1 (s, 0); \ vl_print (handle, (char *)s); \ vec_free (s); \ return handle; /******************************/ /*** Common NAT plugin APIs ***/ /******************************/ static void vl_api_nat_control_ping_t_handler (vl_api_nat_control_ping_t * mp) { vl_api_nat_control_ping_reply_t *rmp; snat_main_t *sm = &snat_main; int rv = 0; /* *INDENT-OFF* */ REPLY_MACRO2 (VL_API_NAT_CONTROL_PING_REPLY, ({ rmp->vpe_pid = ntohl (getpid ()); })); /* *INDENT-ON* */ } static void * vl_api_nat_control_ping_t_print (vl_api_nat_control_ping_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat_control_ping "); FINISH; } static void vl_api_nat_show_config_t_handler (vl_api_nat_show_config_t * mp) { vl_api_nat_show_config_reply_t *rmp; snat_main_t *sm = &snat_main; //dslite_main_t *dm = &dslite_main; nat64_main_t *n64m = &nat64_main; int rv = 0; /* *INDENT-OFF* */ REPLY_MACRO2 (VL_API_NAT_SHOW_CONFIG_REPLY, ({ rmp->translation_buckets = htonl (sm->translation_buckets); rmp->translation_memory_size = clib_host_to_net_u64 (sm->translation_memory_size); rmp->user_buckets = htonl (sm->user_buckets); rmp->user_memory_size = clib_host_to_net_u64 (sm->user_memory_size); rmp->max_translations_per_user = htonl (sm->max_translations_per_user); rmp->outside_vrf_id = htonl (sm->outside_vrf_id); rmp->inside_vrf_id = htonl (sm->inside_vrf_id); rmp->static_mapping_only = sm->static_mapping_only; rmp->static_mapping_connection_tracking = sm->static_mapping_connection_tracking; rmp->deterministic = sm->deterministic; rmp->endpoint_dependent = sm->endpoint_dependent; rmp->out2in_dpo = sm->out2in_dpo; //rmp->dslite_ce = dm->is_ce; rmp->nat64_bib_buckets = clib_net_to_host_u32(n64m->bib_buckets); rmp->nat64_bib_memory_size = clib_net_to_host_u64(n64m->bib_memory_size); rmp->nat64_st_buckets = clib_net_to_host_u32(n64m->st_buckets); rmp->nat64_st_memory_size = clib_net_to_host_u64(n64m->st_memory_size); })); /* *INDENT-ON* */ } static void * vl_api_nat_show_config_t_print (vl_api_nat_show_config_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat_show_config "); FINISH; } static void vl_api_nat_set_workers_t_handler (vl_api_nat_set_workers_t * mp) { snat_main_t *sm = &snat_main; vl_api_nat_set_workers_reply_t *rmp; int rv = 0; uword *bitmap = 0; u64 mask; if (sm->deterministic) { rv = VNET_API_ERROR_UNSUPPORTED; goto send_reply; } mask = clib_net_to_host_u64 (mp->worker_mask); if (sm->num_workers < 2) { rv = VNET_API_ERROR_FEATURE_DISABLED; goto send_reply; } bitmap = clib_bitmap_set_multiple (bitmap, 0, mask, BITS (mask)); rv = snat_set_workers (bitmap); clib_bitmap_free (bitmap); send_reply: REPLY_MACRO (VL_API_NAT_SET_WORKERS_REPLY); } static void * vl_api_nat_set_workers_t_print (vl_api_nat_set_workers_t * mp, void *handle) { u8 *s; uword *bitmap = 0; u8 first = 1; int i; u64 mask = clib_net_to_host_u64 (mp->worker_mask); s = format (0, "SCRIPT: nat_set_workers "); bitmap = clib_bitmap_set_multiple (bitmap, 0, mask, BITS (mask)); /* *INDENT-OFF* */ clib_bitmap_foreach (i, bitmap, ({ if (first) s = format (s, "%d", i); else s = format (s, ",%d", i); first = 0; })); /* *INDENT-ON* */ clib_bitmap_free (bitmap); FINISH; } static void send_nat_worker_details (u32 worker_index, vl_api_registration_t * reg, u32 context) { vl_api_nat_worker_details_t *rmp; snat_main_t *sm = &snat_main; vlib_worker_thread_t *w = vlib_worker_threads + worker_index + sm->first_worker_index; rmp = vl_msg_api_alloc (sizeof (*rmp)); clib_memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_NAT_WORKER_DETAILS + sm->msg_id_base); rmp->context = context; rmp->worker_index = htonl (worker_index); rmp->lcore_id = htonl (w->cpu_id); strncpy ((char *) rmp->name, (char *) w->name, ARRAY_LEN (rmp->name) - 1); vl_api_send_msg (reg, (u8 *) rmp); } static void vl_api_nat_worker_dump_t_handler (vl_api_nat_worker_dump_t * mp) { vl_api_registration_t *reg; snat_main_t *sm = &snat_main; u32 *worker_index; if (sm->deterministic) return; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; /* *INDENT-OFF* */ vec_foreach (worker_index, sm->workers) send_nat_worker_details(*worker_index, reg, mp->context); /* *INDENT-ON* */ } static void * vl_api_nat_worker_dump_t_print (vl_api_nat_worker_dump_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat_worker_dump "); FINISH; } static void vl_api_nat_set_log_level_t_handler (vl_api_nat_set_log_level_t * mp) { snat_main_t *sm = &snat_main; vl_api_nat_set_log_level_reply_t *rmp; int rv = 0; if (sm->log_level > NAT_LOG_DEBUG) rv = VNET_API_ERROR_UNSUPPORTED; else sm->log_level = mp->log_level; REPLY_MACRO (VL_API_NAT_SET_WORKERS_REPLY); } static void * vl_api_nat_set_log_level_t_print (vl_api_nat_set_log_level_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat_set_log_level "); s = format (s, "log_level %d", mp->log_level); FINISH; } static void vl_api_nat_ipfix_enable_disable_t_handler (vl_api_nat_ipfix_enable_disable_t * mp) { snat_main_t *sm = &snat_main; vl_api_nat_ipfix_enable_disable_reply_t *rmp; int rv = 0; rv = snat_ipfix_logging_enable_disable (mp->enable, clib_host_to_net_u32 (mp->domain_id), clib_host_to_net_u16 (mp->src_port)); REPLY_MACRO (VL_API_NAT_IPFIX_ENABLE_DISABLE_REPLY); } static void * vl_api_nat_ipfix_enable_disable_t_print (vl_api_nat_ipfix_enable_disable_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat_ipfix_enable_disable "); if (mp->domain_id) s = format (s, "domain %d ", clib_net_to_host_u32 (mp->domain_id)); if (mp->src_port) s = format (s, "src_port %d ", clib_net_to_host_u16 (mp->src_port)); if (!mp->enable) s = format (s, "disable "); FINISH; } static void vl_api_nat_set_timeouts_t_handler (vl_api_nat_set_timeouts_t * mp) { snat_main_t *sm = &snat_main; vl_api_nat_set_timeouts_reply_t *rmp; int rv = 0; sm->udp_timeout = ntohl (mp->udp); sm->tcp_established_timeout = ntohl (mp->tcp_established); sm->tcp_transitory_timeout = ntohl (mp->tcp_transitory); sm->icmp_timeout = ntohl (mp->icmp); rv = nat64_set_icmp_timeout (ntohl (mp->icmp)); if (rv) goto send_reply; rv = nat64_set_udp_timeout (ntohl (mp->udp)); if (rv) goto send_reply; rv = nat64_set_tcp_timeouts (ntohl (mp->tcp_transitory), ntohl (mp->tcp_established)); send_reply: REPLY_MACRO (VL_API_NAT_SET_TIMEOUTS_REPLY); } static void * vl_api_nat_set_timeouts_t_print (vl_api_nat_set_timeouts_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat_set_timeouts "); s = format (s, "udp %d tcp_established %d tcp_transitory %d icmp %d\n", ntohl (mp->udp), ntohl (mp->tcp_established), ntohl (mp->tcp_transitory), ntohl (mp->icmp)); FINISH; } static void vl_api_nat_get_timeouts_t_handler (vl_api_nat_get_timeouts_t * mp) { snat_main_t *sm = &snat_main; vl_api_nat_get_timeouts_reply_t *rmp; int rv = 0; /* *INDENT-OFF* */ REPLY_MACRO2 (VL_API_NAT_GET_TIMEOUTS_REPLY, ({ rmp->udp = htonl (sm->udp_timeout); rmp->tcp_established = htonl (sm->tcp_established_timeout); rmp->tcp_transitory = htonl (sm->tcp_transitory_timeout); rmp->icmp = htonl (sm->icmp_timeout); })) /* *INDENT-ON* */ } static void * vl_api_nat_get_timeouts_t_print (vl_api_nat_get_timeouts_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat_get_timeouts"); FINISH; } static void vl_api_nat_set_addr_and_port_alloc_alg_t_handler (vl_api_nat_set_addr_and_port_alloc_alg_t * mp) { snat_main_t *sm = &snat_main; vl_api_nat_set_addr_and_port_alloc_alg_reply_t *rmp; int rv = 0; u16 port_start, port_end; if (sm->deterministic) { rv = VNET_API_ERROR_UNSUPPORTED; goto send_reply; } switch (mp->alg) { case NAT_ADDR_AND_PORT_ALLOC_ALG_DEFAULT: nat_set_alloc_addr_and_port_default (); break; case NAT_ADDR_AND_PORT_ALLOC_ALG_MAPE: nat_set_alloc_addr_and_port_mape (ntohs (mp->psid), mp->psid_offset, mp->psid_length); break; case NAT_ADDR_AND_PORT_ALLOC_ALG_RANGE: port_start = ntohs (mp->start_port); port_end = ntohs (mp->end_port); if (port_end <= port_start) { rv = VNET_API_ERROR_INVALID_VALUE; goto send_reply; } nat_set_alloc_addr_and_port_range (port_start, port_end); break; default: rv = VNET_API_ERROR_INVALID_VALUE; break; } send_reply: REPLY_MACRO (VL_API_NAT_SET_ADDR_AND_PORT_ALLOC_ALG_REPLY); } static void *vl_api_nat_set_addr_and_port_alloc_alg_t_print (vl_api_nat_set_addr_and_port_alloc_alg_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat_set_addr_and_port_alloc_alg "); s = format (s, "alg %d psid_offset %d psid_length %d psid %d start_port %d " "end_port %d\n", ntohl (mp->alg), ntohl (mp->psid_offset), ntohl (mp->psid_length), ntohs (mp->psid), ntohs (mp->start_port), ntohs (mp->end_port)); FINISH; } static void vl_api_nat_get_addr_and_port_alloc_alg_t_handler (vl_api_nat_get_addr_and_port_alloc_alg_t * mp) { snat_main_t *sm = &snat_main; vl_api_nat_get_addr_and_port_alloc_alg_reply_t *rmp; int rv = 0; /* *INDENT-OFF* */ REPLY_MACRO2 (VL_API_NAT_GET_ADDR_AND_PORT_ALLOC_ALG_REPLY, ({ rmp->alg = sm->addr_and_port_alloc_alg; rmp->psid_offset = sm->psid_offset; rmp->psid_length = sm->psid_length; rmp->psid = htons (sm->psid); rmp->start_port = htons (sm->start_port); rmp->end_port = htons (sm->end_port); })) /* *INDENT-ON* */ } static void *vl_api_nat_get_addr_and_port_alloc_alg_t_print (vl_api_nat_get_addr_and_port_alloc_alg_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat_get_addr_and_port_alloc_alg"); FINISH; } static void vl_api_nat_set_mss_clamping_t_handler (vl_api_nat_set_mss_clamping_t * mp) { snat_main_t *sm = &snat_main; vl_api_nat_set_mss_clamping_reply_t *rmp; int rv = 0; if (mp->enable) { sm->mss_clamping = ntohs (mp->mss_value); sm->mss_value_net = mp->mss_value; } else sm->mss_clamping = 0; REPLY_MACRO (VL_API_NAT_SET_MSS_CLAMPING_REPLY); } static void * vl_api_nat_set_mss_clamping_t_print (vl_api_nat_set_mss_clamping_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat_set_mss_clamping enable %d mss_value %d\n", mp->enable, ntohs (mp->mss_value)); FINISH; } static void vl_api_nat_get_mss_clamping_t_handler (vl_api_nat_get_mss_clamping_t * mp) { snat_main_t *sm = &snat_main; vl_api_nat_get_mss_clamping_reply_t *rmp; int rv = 0; /* *INDENT-OFF* */ REPLY_MACRO2 (VL_API_NAT_GET_MSS_CLAMPING_REPLY, ({ rmp->enable = sm->mss_clamping ? 1 : 0; rmp->mss_value = htons (sm->mss_clamping); })) /* *INDENT-ON* */ } static void * vl_api_nat_get_mss_clamping_t_print (vl_api_nat_get_mss_clamping_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat_get_mss_clamping"); FINISH; } static void vl_api_nat_ha_set_listener_t_handler (vl_api_nat_ha_set_listener_t * mp) { snat_main_t *sm = &snat_main; vl_api_nat_ha_set_listener_reply_t *rmp; ip4_address_t addr; int rv; memcpy (&addr, &mp->ip_address, sizeof (addr)); rv = nat_ha_set_listener (&addr, clib_net_to_host_u16 (mp->port), clib_net_to_host_u32 (mp->path_mtu)); REPLY_MACRO (VL_API_NAT_HA_SET_LISTENER_REPLY); } static void * vl_api_nat_ha_set_listener_t_print (vl_api_nat_ha_set_listener_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat_ha_set_listener "); s = format (s, "ip_address %U ", format_ip4_address, mp->ip_address); s = format (s, "port %d ", clib_net_to_host_u16 (mp->port)); s = format (s, "path_mtu %d", clib_net_to_host_u32 (mp->path_mtu)); FINISH; } static void vl_api_nat_ha_get_listener_t_handler (vl_api_nat_ha_get_listener_t * mp) { snat_main_t *sm = &snat_main; vl_api_nat_ha_get_listener_reply_t *rmp; int rv = 0; ip4_address_t addr; u16 port; u32 path_mtu; nat_ha_get_listener (&addr, &port, &path_mtu); /* *INDENT-OFF* */ REPLY_MACRO2 (VL_API_NAT_HA_GET_LISTENER_REPLY, ({ clib_memcpy (rmp->ip_address, &addr, sizeof (ip4_address_t)); rmp->port = clib_host_to_net_u16 (port); rmp->path_mtu = clib_host_to_net_u32 (path_mtu); })) /* *INDENT-ON* */ } static void * vl_api_nat_ha_get_listener_t_print (vl_api_nat_ha_get_listener_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat_ha_get_listener"); FINISH; } static void vl_api_nat_ha_set_failover_t_handler (vl_api_nat_ha_set_failover_t * mp) { snat_main_t *sm = &snat_main; vl_api_nat_ha_set_failover_reply_t *rmp; ip4_address_t addr; int rv; memcpy (&addr, &mp->ip_address, sizeof (addr)); rv = nat_ha_set_failover (&addr, clib_net_to_host_u16 (mp->port), clib_net_to_host_u32 (mp->session_refresh_interval)); REPLY_MACRO (VL_API_NAT_HA_SET_FAILOVER_REPLY); } static void * vl_api_nat_ha_set_failover_t_print (vl_api_nat_ha_set_failover_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat_ha_set_failover "); s = format (s, "ip_address %U ", format_ip4_address, mp->ip_address); s = format (s, "port %d ", clib_net_to_host_u16 (mp->port)); FINISH; } static void vl_api_nat_ha_get_failover_t_handler (vl_api_nat_ha_get_failover_t * mp) { snat_main_t *sm = &snat_main; vl_api_nat_ha_get_failover_reply_t *rmp; int rv = 0; ip4_address_t addr; u16 port; u32 session_refresh_interval; nat_ha_get_failover (&addr, &port, &session_refresh_interval); /* *INDENT-OFF* */ REPLY_MACRO2 (VL_API_NAT_HA_GET_FAILOVER_REPLY, ({ clib_memcpy (rmp->ip_address, &addr, sizeof (ip4_address_t)); rmp->port = clib_host_to_net_u16 (port); rmp->session_refresh_interval = clib_host_to_net_u32 (session_refresh_interval); })) /* *INDENT-ON* */ } static void * vl_api_nat_ha_get_failover_t_print (vl_api_nat_ha_get_failover_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat_ha_get_failover"); FINISH; } static void vl_api_nat_ha_flush_t_handler (vl_api_nat_ha_flush_t * mp) { snat_main_t *sm = &snat_main; vl_api_nat_ha_flush_reply_t *rmp; int rv = 0; nat_ha_flush (0); REPLY_MACRO (VL_API_NAT_HA_FLUSH_REPLY); } static void * vl_api_nat_ha_flush_t_print (vl_api_nat_ha_flush_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat_ha_flush "); FINISH; } static void nat_ha_resync_completed_event_cb (u32 client_index, u32 pid, u32 missed_count) { snat_main_t *sm = &snat_main; vl_api_registration_t *reg; vl_api_nat_ha_resync_completed_event_t *mp; reg = vl_api_client_index_to_registration (client_index); if (!reg) return; mp = vl_msg_api_alloc (sizeof (*mp)); clib_memset (mp, 0, sizeof (*mp)); mp->client_index = client_index; mp->pid = pid; mp->missed_count = clib_host_to_net_u32 (missed_count); mp->_vl_msg_id = ntohs (VL_API_NAT_HA_RESYNC_COMPLETED_EVENT + sm->msg_id_base); vl_api_send_msg (reg, (u8 *) mp); } static void vl_api_nat_ha_resync_t_handler (vl_api_nat_ha_resync_t * mp) { snat_main_t *sm = &snat_main; vl_api_nat_ha_resync_reply_t *rmp; int rv; rv = nat_ha_resync (mp->client_index, mp->pid, mp->want_resync_event ? nat_ha_resync_completed_event_cb : NULL); REPLY_MACRO (VL_API_NAT_HA_RESYNC_REPLY); } static void * vl_api_nat_ha_resync_t_print (vl_api_nat_ha_resync_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat_ha_resync "); s = format (s, "want_resync_event %d pid %d", mp->want_resync_event, clib_host_to_net_u32 (mp->pid)); FINISH; } /*************/ /*** NAT44 ***/ /*************/ static void vl_api_nat44_del_user_t_handler (vl_api_nat44_del_user_t * mp) { snat_main_t *sm = &snat_main; vl_api_nat44_del_user_reply_t *rmp; ip4_address_t addr; int rv; memcpy (&addr.as_u8, mp->ip_address, 4); rv = nat44_user_del (&addr, ntohl (mp->fib_index)); REPLY_MACRO (VL_API_NAT44_DEL_USER_REPLY); } static void *vl_api_nat44_del_user_t_print (vl_api_nat44_del_user_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat44_del_user "); s = format (s, "ip_address %U fib_index %U ", format_ip4_address, mp->ip_address, ntohl (mp->fib_index)); FINISH; } static void vl_api_nat44_add_del_address_range_t_handler (vl_api_nat44_add_del_address_range_t * mp) { snat_main_t *sm = &snat_main; vl_api_nat44_add_del_address_range_reply_t *rmp; ip4_address_t this_addr; u8 is_add, twice_nat; u32 start_host_order, end_host_order; u32 vrf_id; int i, count; int rv = 0; u32 *tmp; if (sm->deterministic) { rv = VNET_API_ERROR_UNSUPPORTED; goto send_reply; } if (sm->static_mapping_only) { rv = VNET_API_ERROR_FEATURE_DISABLED; goto send_reply; } is_add = mp->is_add; twice_nat = mp->flags & NAT_API_IS_TWICE_NAT; tmp = (u32 *) mp->first_ip_address; start_host_order = clib_host_to_net_u32 (tmp[0]); tmp = (u32 *) mp->last_ip_address; end_host_order = clib_host_to_net_u32 (tmp[0]); count = (end_host_order - start_host_order) + 1; vrf_id = clib_host_to_net_u32 (mp->vrf_id); if (count > 1024) nat_log_info ("%U - %U, %d addresses...", format_ip4_address, mp->first_ip_address, format_ip4_address, mp->last_ip_address, count); memcpy (&this_addr.as_u8, mp->first_ip_address, 4); for (i = 0; i < count; i++) { if (is_add) rv = snat_add_address (sm, &this_addr, vrf_id, twice_nat); else rv = snat_del_address (sm, this_addr, 0, twice_nat); if (rv) goto send_reply; if (sm->out2in_dpo) nat44_add_del_address_dpo (this_addr, is_add); increment_v4_address (&this_addr); } send_reply: REPLY_MACRO (VL_API_NAT44_ADD_DEL_ADDRESS_RANGE_REPLY); } static void *vl_api_nat44_add_del_address_range_t_print (vl_api_nat44_add_del_address_range_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat44_add_address_range "); s = format (s, "%U ", format_ip4_address, mp->first_ip_address); if (memcmp (mp->first_ip_address, mp->last_ip_address, 4)) { s = format (s, " - %U ", format_ip4_address, mp->last_ip_address); } s = format (s, "twice_nat %d ", mp->flags & NAT_API_IS_TWICE_NAT); FINISH; } static void send_nat44_address_details (snat_address_t * a, vl_api_registration_t * reg, u32 context, u8 twice_nat) { vl_api_nat44_address_details_t *rmp; snat_main_t *sm = &snat_main; rmp = vl_msg_api_alloc (sizeof (*rmp)); clib_memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_NAT44_ADDRESS_DETAILS + sm->msg_id_base); clib_memcpy (rmp->ip_address, &(a->addr), 4); if (a->fib_index != ~0) { fib_table_t *fib = fib_table_get (a->fib_index, FIB_PROTOCOL_IP4); rmp->vrf_id = ntohl (fib->ft_table_id); } else rmp->vrf_id = ~0; if (twice_nat) rmp->flags |= NAT_API_IS_TWICE_NAT; rmp->context = context; vl_api_send_msg (reg, (u8 *) rmp); } static void vl_api_nat44_address_dump_t_handler (vl_api_nat44_address_dump_t * mp) { vl_api_registration_t *reg; snat_main_t *sm = &snat_main; snat_address_t *a; if (sm->deterministic) return; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; /* *INDENT-OFF* */ vec_foreach (a, sm->addresses) send_nat44_address_details (a, reg, mp->context, 0); vec_foreach (a, sm->twice_nat_addresses) send_nat44_address_details (a, reg, mp->context, 1); /* *INDENT-ON* */ } static void * vl_api_nat44_address_dump_t_print (vl_api_nat44_address_dump_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat44_address_dump "); FINISH; } static void vl_api_nat44_interface_add_del_feature_t_handler (vl_api_nat44_interface_add_del_feature_t * mp) { snat_main_t *sm = &snat_main; vl_api_nat44_interface_add_del_feature_reply_t *rmp; u32 sw_if_index = ntohl (mp->sw_if_index); u8 is_del; int rv = 0; is_del = !mp->is_add; VALIDATE_SW_IF_INDEX (mp); rv = snat_interface_add_del (sw_if_index, mp->flags & NAT_API_IS_INSIDE, is_del); BAD_SW_IF_INDEX_LABEL; REPLY_MACRO (VL_API_NAT44_INTERFACE_ADD_DEL_FEATURE_REPLY); } static void *vl_api_nat44_interface_add_del_feature_t_print (vl_api_nat44_interface_add_del_feature_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat44_interface_add_del_feature "); s = format (s, "sw_if_index %d %s %s", clib_host_to_net_u32 (mp->sw_if_index), mp->flags & NAT_API_IS_INSIDE ? "in" : "out", mp->is_add ? "" : "del"); FINISH; } static void send_nat44_interface_details (snat_interface_t * i, vl_api_registration_t * reg, u32 context) { vl_api_nat44_interface_details_t *rmp; snat_main_t *sm = &snat_main; rmp = vl_msg_api_alloc (sizeof (*rmp)); clib_memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_NAT44_INTERFACE_DETAILS + sm->msg_id_base); rmp->sw_if_index = ntohl (i->sw_if_index); if (nat_interface_is_inside (i)) rmp->flags |= NAT_API_IS_INSIDE; if (nat_interface_is_outside (i)) rmp->flags |= NAT_API_IS_OUTSIDE; rmp->context = context; vl_api_send_msg (reg, (u8 *) rmp); } static void vl_api_nat44_interface_dump_t_handler (vl_api_nat44_interface_dump_t * mp) { vl_api_registration_t *reg; snat_main_t *sm = &snat_main; snat_interface_t *i; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; /* *INDENT-OFF* */ pool_foreach (i, sm->interfaces, ({ send_nat44_interface_details(i, reg, mp->context); })); /* *INDENT-ON* */ } static void * vl_api_nat44_interface_dump_t_print (vl_api_nat44_interface_dump_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat44_interface_dump "); FINISH; } static void vl_api_nat44_interface_add_del_output_feature_t_handler (vl_api_nat44_interface_add_del_output_feature_t * mp) { snat_main_t *sm = &snat_main; vl_api_nat44_interface_add_del_output_feature_reply_t *rmp; u32 sw_if_index = ntohl (mp->sw_if_index); int rv = 0; if (sm->deterministic) { rv = VNET_API_ERROR_UNSUPPORTED; goto send_reply; } VALIDATE_SW_IF_INDEX (mp); rv = snat_interface_add_del_output_feature (sw_if_index, mp->flags & NAT_API_IS_INSIDE, !mp->is_add); BAD_SW_IF_INDEX_LABEL; send_reply: REPLY_MACRO (VL_API_NAT44_INTERFACE_ADD_DEL_OUTPUT_FEATURE_REPLY); } static void *vl_api_nat44_interface_add_del_output_feature_t_print (vl_api_nat44_interface_add_del_output_feature_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat44_interface_add_del_output_feature "); s = format (s, "sw_if_index %d %s %s", clib_host_to_net_u32 (mp->sw_if_index), mp->flags & NAT_API_IS_INSIDE ? "in" : "out", mp->is_add ? "" : "del"); FINISH; } static void send_nat44_interface_output_feature_details (snat_interface_t * i, vl_api_registration_t * reg, u32 context) { vl_api_nat44_interface_output_feature_details_t *rmp; snat_main_t *sm = &snat_main; rmp = vl_msg_api_alloc (sizeof (*rmp)); clib_memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_NAT44_INTERFACE_OUTPUT_FEATURE_DETAILS + sm->msg_id_base); rmp->sw_if_index = ntohl (i->sw_if_index); rmp->context = context; if (nat_interface_is_inside (i)) rmp->flags |= NAT_API_IS_INSIDE; vl_api_send_msg (reg, (u8 *) rmp); } static void vl_api_nat44_interface_output_feature_dump_t_handler (vl_api_nat44_interface_output_feature_dump_t * mp) { vl_api_registration_t *reg; snat_main_t *sm = &snat_main; snat_interface_t *i; if (sm->deterministic) return; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; /* *INDENT-OFF* */ pool_for }
/*
 * Copyright (c) 2015 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vnet/vnet.h>
#include <vnet/devices/devices.h>
#include <vnet/feature/feature.h>
#include <vnet/ip/ip.h>
#include <vnet/ethernet/ethernet.h>

vnet_device_main_t vnet_device_main;

static uword
device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
		 vlib_frame_t * frame)
{
  return 0;
}

/* *INDENT-OFF* */
VLIB_REGISTER_NODE (device_input_node) = {
  .function = device_input_fn,
  .name = "device-input",
  .runtime_data_bytes = sizeof (vnet_device_input_runtime_t),
  .type = VLIB_NODE_TYPE_INPUT,
  .state = VLIB_NODE_STATE_DISABLED,
  .n_next_nodes = VNET_DEVICE_INPUT_N_NEXT_NODES,
  .next_nodes = VNET_DEVICE_INPUT_NEXT_NODES,
};

/* Table defines how much we need to advance current data pointer
   in the buffer if we shortcut to l3 nodes */

const u32 __attribute__((aligned (CLIB_CACHE_LINE_BYTES)))
device_input_next_node_advance[((VNET_DEVICE_INPUT_N_NEXT_NODES /
				CLIB_CACHE_LINE_BYTES) +1) * CLIB_CACHE_LINE_BYTES] =
{
      [VNET_DEVICE_INPUT_NEXT_IP4_INPUT] = sizeof (ethernet_header_t),
      [VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT] = sizeof (ethernet_header_t),
      [VNET_DEVICE_INPUT_NEXT_IP6_INPUT] = sizeof (ethernet_header_t),
      [VNET_DEVICE_INPUT_NEXT_MPLS_INPUT] = sizeof (ethernet_header_t),
};

const u32 __attribute__((aligned (CLIB_CACHE_LINE_BYTES)))
device_input_next_node_flags[((VNET_DEVICE_INPUT_N_NEXT_NODES /
				CLIB_CACHE_LINE_BYTES) +1) * CLIB_CACHE_LINE_BYTES] =
{
      [VNET_DEVICE_INPUT_NEXT_IP4_INPUT] = VNET_BUFFER_F_L3_HDR_OFFSET_VALID,
      [VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT] = VNET_BUFFER_F_L3_HDR_OFFSET_VALID,
      [VNET_DEVICE_INPUT_NEXT_IP6_INPUT] = VNET_BUFFER_F_L3_HDR_OFFSET_VALID,
      [VNET_DEVICE_INPUT_NEXT_MPLS_INPUT] = VNET_BUFFER_F_L3_HDR_OFFSET_VALID,
};

VNET_FEATURE_ARC_INIT (device_input, static) =
{
  .arc_name  = "device-input",
  .start_nodes = VNET_FEATURES ("device-input"),
  .last_in_arc = "ethernet-input",
  .arc_index_ptr = &feature_main.device_input_feature_arc_index,
};

VNET_FEATURE_INIT (l2_patch, static) = {
  .arc_name = "device-input",
  .node_name = "l2-patch",
  .runs_before = VNET_FEATURES ("ethernet-input"),
};

VNET_FEATURE_INIT (worker_handoff, static) = {
  .arc_name = "device-input",
  .node_name = "worker-handoff",
  .runs_before = VNET_FEATURES ("ethernet-input"),
};

VNET_FEATURE_INIT (span_input, static) = {
  .arc_name = "device-input",
  .node_name = "span-input",
  .runs_before = VNET_FEATURES ("ethernet-input"),
};

VNET_FEATURE_INIT (p2p_ethernet_node, static) = {
  .arc_name = "device-input",
  .node_name = "p2p-ethernet-input",
  .runs_before = VNET_FEATURES ("ethernet-input"),
};

VNET_FEATURE_INIT (ethernet_input, static) = {
  .arc_name = "device-input",
  .node_name = "ethernet-input",
  .runs_before = 0, /* not before any other features */
};
/* *INDENT-ON* */

static int
vnet_device_queue_sort (void *a1, void *a2)
{
  vnet_device_and_queue_t *dq1 = a1;
  vnet_device_and_queue_t *dq2 = a2;

  if (dq1->dev_instance > dq2->dev_instance)
    return 1;
  else if (dq1->dev_instance < dq2->dev_instance)
    return -1;
  else if (dq1->queue_id > dq2->queue_id)
    return 1;
  else if (dq1->queue_id < dq2->queue_id)
    return -1;
  else
    return 0;
}

static void
vnet_device_queue_update (vnet_main_t * vnm, vnet_device_input_runtime_t * rt)
{
  vnet_device_and_queue_t *dq;
  vnet_hw_interface_t *hw;

  vec_sort_with_function (rt->devices_and_queues, vnet_device_queue_sort);

  vec_foreach (dq, rt->devices_and_queues)
  {
    hw = vnet_get_hw_interface (vnm, dq->hw_if_index);
    vec_validate (hw->dq_runtime_index_by_queue, dq->queue_id);
    hw->dq_runtime_index_by_queue[dq->queue_id] = dq - rt->devices_and_queues;
  }
}

void
vnet_hw_interface_assign_rx_thread (vnet_main_t * vnm, u32 hw_if_index,
				    u16 queue_id, uword thread_index)
{
  vnet_device_main_t *vdm = &vnet_device_main;
  vlib_main_t *vm, *vm0;
  vnet_device_input_runtime_t *rt;
  vnet_device_and_queue_t *dq;
  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);

  ASSERT (hw->input_node_index > 0);

  if (vdm->first_worker_thread_index == 0)
    thread_index = 0;

  if (thread_index != 0 &&
      (thread_index < vdm->first_worker_thread_index ||
       thread_index > vdm->last_worker_thread_index))
    {
      thread_index = vdm->next_worker_thread_index++;
      if (vdm->next_worker_thread_index > vdm->last_worker_thread_index)
	vdm->next_worker_thread_index = vdm->first_worker_thread_index;
    }

  vm = vlib_mains[thread_index];
  vm0 = vlib_get_main ();

  vlib_worker_thread_barrier_sync (vm0);

  rt = vlib_node_get_runtime_data (vm, hw->input_node_index);

  vec_add2 (rt->devices_and_queues, dq, 1);
  dq->hw_if_index = hw_if_index;
  dq->dev_instance = hw->dev_instance;
  dq->queue_id = queue_id;
  dq->mode = VNET_HW_INTERFACE_RX_MODE_POLLING;
  rt->enabled_node_state = VLIB_NODE_STATE_POLLING;

  vnet_device_queue_update (vnm, rt);
  vec_validate (hw->input_node_thread_index_by_queue, queue_id);
  vec_validate (hw->rx_mode_by_queue, queue_id);
  hw->input_node_thread_index_by_queue[queue_id] = thread_index;
  hw->rx_mode_by_queue[queue_id] = VNET_HW_INTERFACE_RX_MODE_POLLING;

  vlib_worker_thread_barrier_release (vm0);

  vlib_node_set_state (vm, hw->input_node_index, rt->enabled_node_state);
}

int
vnet_hw_interface_unassign_rx_thread (vnet_main_t * vnm, u32 hw_if_index,
				      u16 queue_id)
{
  vlib_main_t *vm, *vm0;
  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
  vnet_device_input_runtime_t *rt;
  vnet_device_and_queue_t *dq;
  uword old_thread_index;
  vnet_hw_interface_rx_mode mode;

  if (hw->input_node_thread_index_by_queue == 0)
    return VNET_API_ERROR_INVALID_INTERFACE;

  if (vec_len (hw->input_node_thread_index_by_queue) < queue_id + 1)
    return VNET_API_ERROR_INVALID_INTERFACE;

  old_thread_index = hw->input_node_thread_index_by_queue[queue_id];

  vm = vlib_mains[old_thread_index];

  rt = vlib_node_get_runtime_data (vm, hw->input_node_index);

  vec_foreach (dq, rt->devices_and_queues)
    if (dq->hw_if_index == hw_if_index && dq->queue_id == queue_id)
    {
      mode = dq->mode;
      goto delete;
    }

  return VNET_API_ERROR_INVALID_INTERFACE;

delete:

  vm0 = vlib_get_main ();
  vlib_worker_thread_barrier_sync (vm0);
  vec_del1 (rt->devices_and_queues, dq - rt->devices_and_queues);
  vnet_device_queue_update (vnm, rt);
  hw->rx_mode_by_queue[queue_id] = VNET_HW_INTERFACE_RX_MODE_UNKNOWN;
  vlib_worker_thread_barrier_release (vm0);

  if (vec_len (rt->devices_and_queues) == 0)
    vlib_node_set_state (vm, hw->input_node_index, VLIB_NODE_STATE_DISABLED);
  else if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
    {
      /*
       * if the deleted interface is polling, we may need to set the node state
       * to interrupt if there is no more polling interface for this device's
       * corresponding thread. This is because mixed interfaces
       * (polling and interrupt), assigned to the same thread, set the
       * thread to polling prior to the deletion.
       */
      vec_foreach (dq, rt->devices_and_queues)
      {
	if (dq->mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
	  return 0;
      }
      rt->enabled_node_state = VLIB_NODE_STATE_INTERRUPT;
      vlib_node_set_state (vm, hw->input_node_index, rt->enabled_node_state);
    }

  return 0;
}


int
vnet_hw_interface_set_rx_mode (vnet_main_t * vnm, u32 hw_if_index,
			       u16 queue_id, vnet_hw_interface_rx_mode mode)
{
  vlib_main_t *vm;
  uword thread_index;
  vnet_device_and_queue_t *dq;
  vlib_node_state_t enabled_node_state;
  ASSERT (mode < VNET_HW_INTERFACE_NUM_RX_MODES);
  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
  vnet_device_input_runtime_t *rt;
  int is_polling = 0;

  if (mode == VNET_HW_INTERFACE_RX_MODE_DEFAULT)
    mode = hw->default_rx_mode;

  if (hw->input_node_thread_index_by_queue == 0 || hw->rx_mode_by_queue == 0)
    return VNET_API_ERROR_INVALID_INTERFACE;

  if (hw->rx_mode_by_queue[queue_id] == mode)
    return 0;

  if (mode != VNET_HW_INTERFACE_RX_MODE_POLLING &&
      (hw->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE) == 0)
    return VNET_API_ERROR_UNSUPPORTED;

  if ((vec_len (hw->input_node_thread_index_by_queue) < queue_id + 1) ||
      (vec_len (hw->rx_mode_by_queue) < queue_id + 1))
    return VNET_API_ERROR_INVALID_QUEUE;

  hw->rx_mode_by_queue[queue_id] = mode;
  thread_index = hw->input_node_thread_index_by_queue[queue_id];
  vm = vlib_mains[thread_index];

  rt = vlib_node_get_runtime_data (vm, hw->input_node_index);

  vec_foreach (dq, rt->devices_and_queues)
  {
    if (dq->hw_if_index == hw_if_index && dq->queue_id == queue_id)
      dq->mode = mode;
    if (dq->mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
      is_polling = 1;
  }

  if (is_polling)
    enabled_node_state = VLIB_NODE_STATE_POLLING;
  else
    enabled_node_state = VLIB_NODE_STATE_INTERRUPT;

  if (rt->enabled_node_state != enabled_node_state)
    {
      rt->enabled_node_state = enabled_node_state;
      if (vlib_node_get_state (vm, hw->input_node_index) !=
	  VLIB_NODE_STATE_DISABLED)
	vlib_node_set_state (vm, hw->input_node_index, enabled_node_state);
    }

  return 0;
}

int
vnet_hw_interface_get_rx_mode (vnet_main_t * vnm, u32 hw_if_index,
			       u16 queue_id, vnet_hw_interface_rx_mode * mode)
{
  vlib_main_t *vm;
  uword thread_index;
  vnet_device_and_queue_t *dq;
  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
  vnet_device_input_runtime_t *rt;

  if (hw->input_node_thread_index_by_queue == 0)
    return VNET_API_ERROR_INVALID_INTERFACE;

  if ((vec_len (hw->input_node_thread_index_by_queue) < queue_id + 1) ||
      (vec_len (hw->rx_mode_by_queue) < queue_id + 1))
    return VNET_API_ERROR_INVALID_QUEUE;

  thread_index = hw->input_node_thread_index_by_queue[queue_id];
  vm = vlib_mains[thread_index];

  rt = vlib_node_get_runtime_data (vm, hw->input_node_index);

  vec_foreach (dq, rt->devices_and_queues)
    if (dq->hw_if_index == hw_if_index && dq->queue_id == queue_id)
    {
      *mode = dq->mode;
      return 0;
    }

  return VNET_API_ERROR_INVALID_INTERFACE;
}



static clib_error_t *
vnet_device_init (vlib_main_t * vm)
{
  vnet_device_main_t *vdm = &vnet_device_main;
  vlib_thread_main_t *tm = vlib_get_thread_main ();
  vlib_thread_registration_t *tr;
  uword *p;

  vec_validate_aligned (vdm->workers, tm->n_vlib_mains - 1,
			CLIB_CACHE_LINE_BYTES);

  p = hash_get_mem (tm->thread_registrations_by_name, "workers");
  tr = p ? (vlib_thread_registration_t *) p[0] : 0;
  if (tr && tr->count > 0)
    {
      vdm->first_worker_thread_index = tr->first_index;
      vdm->next_worker_thread_index = tr->first_index;
      vdm->last_worker_thread_index = tr->first_index + tr->count - 1;
    }
  return 0;
}

VLIB_INIT_FUNCTION (vnet_device_init);

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
G_REPLY); } static void *vl_api_nat66_add_del_static_mapping_t_print (vl_api_nat66_add_del_static_mapping_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat66_add_del_static_mapping "); s = format (s, "local_ip_address %U external_ip_address %U vrf_id %d %s", format_ip6_address, mp->local_ip_address, format_ip6_address, mp->external_ip_address, clib_net_to_host_u32 (mp->vrf_id), mp->is_add ? "" : "del"); FINISH; } typedef struct nat66_api_walk_ctx_t_ { vl_api_registration_t *rp; u32 context; } nat66_api_walk_ctx_t; static int nat66_api_interface_walk (snat_interface_t * i, void *arg) { vl_api_nat66_interface_details_t *rmp; snat_main_t *sm = &snat_main; nat66_api_walk_ctx_t *ctx = arg; rmp = vl_msg_api_alloc (sizeof (*rmp)); clib_memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_NAT66_INTERFACE_DETAILS + sm->msg_id_base); rmp->sw_if_index = ntohl (i->sw_if_index); if (nat_interface_is_inside (i)) rmp->flags |= NAT_API_IS_INSIDE; rmp->context = ctx->context; vl_api_send_msg (ctx->rp, (u8 *) rmp); return 0; } static void vl_api_nat66_interface_dump_t_handler (vl_api_nat66_interface_dump_t * mp) { vl_api_registration_t *rp; rp = vl_api_client_index_to_registration (mp->client_index); if (rp == 0) return; nat66_api_walk_ctx_t ctx = { .rp = rp, .context = mp->context, }; nat66_interfaces_walk (nat66_api_interface_walk, &ctx); } static void * vl_api_nat66_interface_dump_t_print (vl_api_nat66_interface_dump_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat66_interface_dump "); FINISH; } static int nat66_api_static_mapping_walk (nat66_static_mapping_t * m, void *arg) { vl_api_nat66_static_mapping_details_t *rmp; nat66_main_t *nm = &nat66_main; snat_main_t *sm = &snat_main; nat66_api_walk_ctx_t *ctx = arg; fib_table_t *fib; vlib_counter_t vc; fib = fib_table_get (m->fib_index, FIB_PROTOCOL_IP6); if (!fib) return -1; vlib_get_combined_counter (&nm->session_counters, m - nm->sm, &vc); rmp = vl_msg_api_alloc (sizeof (*rmp)); clib_memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_NAT66_STATIC_MAPPING_DETAILS + sm->msg_id_base); clib_memcpy (rmp->local_ip_address, &m->l_addr, 16); clib_memcpy (rmp->external_ip_address, &m->e_addr, 16); rmp->vrf_id = ntohl (fib->ft_table_id); rmp->total_bytes = clib_host_to_net_u64 (vc.bytes); rmp->total_pkts = clib_host_to_net_u64 (vc.packets); rmp->context = ctx->context; vl_api_send_msg (ctx->rp, (u8 *) rmp); return 0; } static void vl_api_nat66_static_mapping_dump_t_handler (vl_api_nat66_static_mapping_dump_t * mp) { vl_api_registration_t *rp; rp = vl_api_client_index_to_registration (mp->client_index); if (rp == 0) return; nat66_api_walk_ctx_t ctx = { .rp = rp, .context = mp->context, }; nat66_static_mappings_walk (nat66_api_static_mapping_walk, &ctx); } static void * vl_api_nat66_static_mapping_dump_t_print (vl_api_nat66_static_mapping_dump_t * mp, void *handle) { u8 *s; s = format (0, "SCRIPT: nat66_static_mapping_dump "); FINISH; } /* List of message types that this plugin understands */ #define foreach_snat_plugin_api_msg \ _(NAT_CONTROL_PING, nat_control_ping) \ _(NAT_SHOW_CONFIG, nat_show_config) \ _(NAT_SET_WORKERS, nat_set_workers) \ _(NAT_WORKER_DUMP, nat_worker_dump) \ _(NAT44_DEL_USER, nat44_del_user) \ _(NAT_SET_LOG_LEVEL, nat_set_log_level) \ _(NAT_IPFIX_ENABLE_DISABLE, nat_ipfix_enable_disable) \ _(NAT_SET_TIMEOUTS, nat_set_timeouts) \ _(NAT_GET_TIMEOUTS, nat_get_timeouts) \ _(NAT_SET_ADDR_AND_PORT_ALLOC_ALG, nat_set_addr_and_port_alloc_alg) \ _(NAT_GET_ADDR_AND_PORT_ALLOC_ALG, nat_get_addr_and_port_alloc_alg) \ _(NAT_SET_MSS_CLAMPING, nat_set_mss_clamping) \ _(NAT_GET_MSS_CLAMPING, nat_get_mss_clamping) \ _(NAT_HA_SET_LISTENER, nat_ha_set_listener) \ _(NAT_HA_SET_FAILOVER, nat_ha_set_failover) \ _(NAT_HA_GET_LISTENER, nat_ha_get_listener) \ _(NAT_HA_GET_FAILOVER, nat_ha_get_failover) \ _(NAT_HA_FLUSH, nat_ha_flush) \ _(NAT_HA_RESYNC, nat_ha_resync) \ _(NAT44_ADD_DEL_ADDRESS_RANGE, nat44_add_del_address_range) \ _(NAT44_INTERFACE_ADD_DEL_FEATURE, nat44_interface_add_del_feature) \ _(NAT44_ADD_DEL_STATIC_MAPPING, nat44_add_del_static_mapping) \ _(NAT44_ADD_DEL_IDENTITY_MAPPING, nat44_add_del_identity_mapping) \ _(NAT44_STATIC_MAPPING_DUMP, nat44_static_mapping_dump) \ _(NAT44_IDENTITY_MAPPING_DUMP, nat44_identity_mapping_dump) \ _(NAT44_ADDRESS_DUMP, nat44_address_dump) \ _(NAT44_INTERFACE_DUMP, nat44_interface_dump) \ _(NAT44_ADD_DEL_INTERFACE_ADDR, nat44_add_del_interface_addr) \ _(NAT44_INTERFACE_ADDR_DUMP, nat44_interface_addr_dump) \ _(NAT44_USER_DUMP, nat44_user_dump) \ _(NAT44_USER_SESSION_DUMP, nat44_user_session_dump) \ _(NAT44_INTERFACE_ADD_DEL_OUTPUT_FEATURE, \ nat44_interface_add_del_output_feature) \ _(NAT44_INTERFACE_OUTPUT_FEATURE_DUMP, \ nat44_interface_output_feature_dump) \ _(NAT44_ADD_DEL_LB_STATIC_MAPPING, nat44_add_del_lb_static_mapping) \ _(NAT44_LB_STATIC_MAPPING_ADD_DEL_LOCAL, \ nat44_lb_static_mapping_add_del_local) \ _(NAT44_LB_STATIC_MAPPING_DUMP, nat44_lb_static_mapping_dump) \ _(NAT44_DEL_SESSION, nat44_del_session) \ _(NAT44_FORWARDING_ENABLE_DISABLE, nat44_forwarding_enable_disable) \ _(NAT44_FORWARDING_IS_ENABLED, nat44_forwarding_is_enabled) \ _(NAT_DET_ADD_DEL_MAP, nat_det_add_del_map) \ _(NAT_DET_FORWARD, nat_det_forward) \ _(NAT_DET_REVERSE, nat_det_reverse) \ _(NAT_DET_MAP_DUMP, nat_det_map_dump) \ _(NAT_DET_CLOSE_SESSION_OUT, nat_det_close_session_out) \ _(NAT_DET_CLOSE_SESSION_IN, nat_det_close_session_in) \ _(NAT_DET_SESSION_DUMP, nat_det_session_dump) \ _(NAT64_ADD_DEL_POOL_ADDR_RANGE, nat64_add_del_pool_addr_range) \ _(NAT64_POOL_ADDR_DUMP, nat64_pool_addr_dump) \ _(NAT64_ADD_DEL_INTERFACE, nat64_add_del_interface) \ _(NAT64_INTERFACE_DUMP, nat64_interface_dump) \ _(NAT64_ADD_DEL_STATIC_BIB, nat64_add_del_static_bib) \ _(NAT64_BIB_DUMP, nat64_bib_dump) \ _(NAT64_ST_DUMP, nat64_st_dump) \ _(NAT64_ADD_DEL_PREFIX, nat64_add_del_prefix) \ _(NAT64_PREFIX_DUMP, nat64_prefix_dump) \ _(NAT64_ADD_DEL_INTERFACE_ADDR, nat64_add_del_interface_addr) \ _(NAT66_ADD_DEL_INTERFACE, nat66_add_del_interface) \ _(NAT66_INTERFACE_DUMP, nat66_interface_dump) \ _(NAT66_ADD_DEL_STATIC_MAPPING, nat66_add_del_static_mapping) \ _(NAT66_STATIC_MAPPING_DUMP, nat66_static_mapping_dump) /* Set up the API message handling tables */ static clib_error_t * snat_plugin_api_hookup (vlib_main_t * vm) { snat_main_t *sm __attribute__ ((unused)) = &snat_main; #define _(N,n) \ vl_msg_api_set_handlers((VL_API_##N + sm->msg_id_base), \ #n, \ vl_api_##n##_t_handler, \ vl_noop_handler, \ vl_api_##n##_t_endian, \ vl_api_##n##_t_print, \ sizeof(vl_api_##n##_t), 1); foreach_snat_plugin_api_msg; #undef _ return 0; } #define vl_msg_name_crc_list #include #undef vl_msg_name_crc_list static void setup_message_id_table (snat_main_t * sm, api_main_t * am) { #define _(id,n,crc) \ vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id + sm->msg_id_base); foreach_vl_msg_name_crc_nat; #undef _ } static void plugin_custom_dump_configure (snat_main_t * sm) { #define _(n,f) sm->api_main->msg_print_handlers \ [VL_API_##n + sm->msg_id_base] \ = (void *) vl_api_##f##_t_print; foreach_snat_plugin_api_msg; #undef _ } clib_error_t * snat_api_init (vlib_main_t * vm, snat_main_t * sm) { u8 *name; clib_error_t *error = 0; name = format (0, "nat_%08x%c", api_version, 0); /* Ask for a correctly-sized block of API message decode slots */ sm->msg_id_base = vl_msg_api_get_msg_ids ((char *) name, VL_MSG_FIRST_AVAILABLE); error = snat_plugin_api_hookup (vm); /* Add our API messages to the global name_crc hash table */ setup_message_id_table (sm, sm->api_main); plugin_custom_dump_configure (sm); vec_free (name); return error; } /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */