From 7cd468a3d7dee7d6c92f69a0bb7061ae208ec727 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Mon, 19 Dec 2016 23:05:39 +0100 Subject: Reorganize source tree to use single autotools instance Change-Id: I7b51f88292e057c6443b12224486f2d0c9f8ae23 Signed-off-by: Damjan Marion --- src/vpp/api/api.c | 4922 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 4922 insertions(+) create mode 100644 src/vpp/api/api.c (limited to 'src/vpp/api/api.c') diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c new file mode 100644 index 00000000..6289249c --- /dev/null +++ b/src/vpp/api/api.c @@ -0,0 +1,4922 @@ +/* + *------------------------------------------------------------------ + * api.c - message handler registration + * + * Copyright (c) 2010-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if IPV6SR > 0 +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef BIHASH_TYPE +#undef __included_bihash_template_h__ +#include + +#if DPDK > 0 +#include +#endif + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) +#define vl_printfun +#include +#undef vl_printfun +#include +#define foreach_vpe_api_msg \ +_(WANT_OAM_EVENTS, want_oam_events) \ +_(OAM_ADD_DEL, oam_add_del) \ +_(MPLS_ROUTE_ADD_DEL, mpls_route_add_del) \ +_(MPLS_IP_BIND_UNBIND, mpls_ip_bind_unbind) \ +_(IS_ADDRESS_REACHABLE, is_address_reachable) \ +_(SW_INTERFACE_SET_MPLS_ENABLE, sw_interface_set_mpls_enable) \ +_(SW_INTERFACE_SET_VPATH, sw_interface_set_vpath) \ +_(SW_INTERFACE_SET_VXLAN_BYPASS, sw_interface_set_vxlan_bypass) \ +_(SW_INTERFACE_SET_L2_XCONNECT, sw_interface_set_l2_xconnect) \ +_(SW_INTERFACE_SET_L2_BRIDGE, sw_interface_set_l2_bridge) \ +_(SW_INTERFACE_SET_DPDK_HQOS_PIPE, sw_interface_set_dpdk_hqos_pipe) \ +_(SW_INTERFACE_SET_DPDK_HQOS_SUBPORT, sw_interface_set_dpdk_hqos_subport) \ +_(SW_INTERFACE_SET_DPDK_HQOS_TCTBL, sw_interface_set_dpdk_hqos_tctbl) \ +_(BRIDGE_DOMAIN_ADD_DEL, bridge_domain_add_del) \ +_(BRIDGE_DOMAIN_DUMP, bridge_domain_dump) \ +_(BRIDGE_DOMAIN_DETAILS, bridge_domain_details) \ +_(BRIDGE_DOMAIN_SW_IF_DETAILS, bridge_domain_sw_if_details) \ +_(L2FIB_ADD_DEL, l2fib_add_del) \ +_(L2_FLAGS, l2_flags) \ +_(BRIDGE_FLAGS, bridge_flags) \ +_(CREATE_VLAN_SUBIF, create_vlan_subif) \ +_(CREATE_SUBIF, create_subif) \ +_(MPLS_TUNNEL_ADD_DEL, mpls_tunnel_add_del) \ +_(PROXY_ARP_ADD_DEL, proxy_arp_add_del) \ +_(PROXY_ARP_INTFC_ENABLE_DISABLE, proxy_arp_intfc_enable_disable) \ +_(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \ +_(RESET_FIB, reset_fib) \ +_(DHCP_PROXY_CONFIG,dhcp_proxy_config) \ +_(DHCP_PROXY_CONFIG_2,dhcp_proxy_config_2) \ +_(DHCP_PROXY_SET_VSS,dhcp_proxy_set_vss) \ +_(DHCP_CLIENT_CONFIG, dhcp_client_config) \ +_(CREATE_LOOPBACK, create_loopback) \ +_(CONTROL_PING, control_ping) \ +_(CLI_REQUEST, cli_request) \ +_(CLI_INBAND, cli_inband) \ +_(SET_ARP_NEIGHBOR_LIMIT, set_arp_neighbor_limit) \ +_(L2_PATCH_ADD_DEL, l2_patch_add_del) \ +_(CLASSIFY_ADD_DEL_TABLE, classify_add_del_table) \ +_(CLASSIFY_ADD_DEL_SESSION, classify_add_del_session) \ +_(CLASSIFY_SET_INTERFACE_IP_TABLE, classify_set_interface_ip_table) \ +_(CLASSIFY_SET_INTERFACE_L2_TABLES, classify_set_interface_l2_tables) \ +_(GET_NODE_INDEX, get_node_index) \ +_(ADD_NODE_NEXT, add_node_next) \ +_(VXLAN_ADD_DEL_TUNNEL, vxlan_add_del_tunnel) \ +_(VXLAN_TUNNEL_DUMP, vxlan_tunnel_dump) \ +_(L2_FIB_CLEAR_TABLE, l2_fib_clear_table) \ +_(L2_INTERFACE_EFP_FILTER, l2_interface_efp_filter) \ +_(L2_INTERFACE_VLAN_TAG_REWRITE, l2_interface_vlan_tag_rewrite) \ +_(SHOW_VERSION, show_version) \ +_(L2_FIB_TABLE_DUMP, l2_fib_table_dump) \ +_(L2_FIB_TABLE_ENTRY, l2_fib_table_entry) \ +_(VXLAN_GPE_ADD_DEL_TUNNEL, vxlan_gpe_add_del_tunnel) \ +_(VXLAN_GPE_TUNNEL_DUMP, vxlan_gpe_tunnel_dump) \ +_(INTERFACE_NAME_RENUMBER, interface_name_renumber) \ +_(WANT_IP4_ARP_EVENTS, want_ip4_arp_events) \ +_(WANT_IP6_ND_EVENTS, want_ip6_nd_events) \ +_(INPUT_ACL_SET_INTERFACE, input_acl_set_interface) \ +_(DELETE_LOOPBACK, delete_loopback) \ +_(BD_IP_MAC_ADD_DEL, bd_ip_mac_add_del) \ +_(COP_INTERFACE_ENABLE_DISABLE, cop_interface_enable_disable) \ +_(COP_WHITELIST_ENABLE_DISABLE, cop_whitelist_enable_disable) \ +_(GET_NODE_GRAPH, get_node_graph) \ +_(IOAM_ENABLE, ioam_enable) \ +_(IOAM_DISABLE, ioam_disable) \ +_(SR_MULTICAST_MAP_ADD_DEL, sr_multicast_map_add_del) \ +_(POLICER_ADD_DEL, policer_add_del) \ +_(POLICER_DUMP, policer_dump) \ +_(POLICER_CLASSIFY_SET_INTERFACE, policer_classify_set_interface) \ +_(POLICER_CLASSIFY_DUMP, policer_classify_dump) \ +_(MPLS_TUNNEL_DUMP, mpls_tunnel_dump) \ +_(MPLS_TUNNEL_DETAILS, mpls_tunnel_details) \ +_(MPLS_FIB_DUMP, mpls_fib_dump) \ +_(MPLS_FIB_DETAILS, mpls_fib_details) \ +_(CLASSIFY_TABLE_IDS,classify_table_ids) \ +_(CLASSIFY_TABLE_BY_INTERFACE, classify_table_by_interface) \ +_(CLASSIFY_TABLE_INFO,classify_table_info) \ +_(CLASSIFY_SESSION_DUMP,classify_session_dump) \ +_(CLASSIFY_SESSION_DETAILS,classify_session_details) \ +_(SET_IPFIX_EXPORTER, set_ipfix_exporter) \ +_(IPFIX_EXPORTER_DUMP, ipfix_exporter_dump) \ +_(SET_IPFIX_CLASSIFY_STREAM, set_ipfix_classify_stream) \ +_(IPFIX_CLASSIFY_STREAM_DUMP, ipfix_classify_stream_dump) \ +_(IPFIX_CLASSIFY_TABLE_ADD_DEL, ipfix_classify_table_add_del) \ +_(IPFIX_CLASSIFY_TABLE_DUMP, ipfix_classify_table_dump) \ +_(GET_NEXT_INDEX, get_next_index) \ +_(PG_CREATE_INTERFACE, pg_create_interface) \ +_(PG_CAPTURE, pg_capture) \ +_(PG_ENABLE_DISABLE, pg_enable_disable) \ +_(IP_SOURCE_AND_PORT_RANGE_CHECK_ADD_DEL, \ + ip_source_and_port_range_check_add_del) \ +_(IP_SOURCE_AND_PORT_RANGE_CHECK_INTERFACE_ADD_DEL, \ + ip_source_and_port_range_check_interface_add_del) \ +_(DELETE_SUBIF, delete_subif) \ +_(L2_INTERFACE_PBB_TAG_REWRITE, l2_interface_pbb_tag_rewrite) \ +_(PUNT, punt) \ +_(FLOW_CLASSIFY_SET_INTERFACE, flow_classify_set_interface) \ +_(FLOW_CLASSIFY_DUMP, flow_classify_dump) \ +_(FEATURE_ENABLE_DISABLE, feature_enable_disable) + +#define QUOTE_(x) #x +#define QUOTE(x) QUOTE_(x) +typedef enum +{ + RESOLVE_IP4_ADD_DEL_ROUTE = 1, + RESOLVE_IP6_ADD_DEL_ROUTE, +} resolve_t; + +static vlib_node_registration_t vpe_resolver_process_node; +vpe_api_main_t vpe_api_main; + +static int arp_change_delete_callback (u32 pool_index, u8 * notused); +static int nd_change_delete_callback (u32 pool_index, u8 * notused); + +/* Clean up all registrations belonging to the indicated client */ +int +vl_api_memclnt_delete_callback (u32 client_index) +{ + vpe_api_main_t *vam = &vpe_api_main; + vpe_client_registration_t *rp; + uword *p; + int stats_memclnt_delete_callback (u32 client_index); + + stats_memclnt_delete_callback (client_index); + +#define _(a) \ + p = hash_get (vam->a##_registration_hash, client_index); \ + if (p) { \ + rp = pool_elt_at_index (vam->a##_registrations, p[0]); \ + pool_put (vam->a##_registrations, rp); \ + hash_unset (vam->a##_registration_hash, client_index); \ + } + foreach_registration_hash; +#undef _ + return 0; +} + +pub_sub_handler (oam_events, OAM_EVENTS); + +#define RESOLUTION_EVENT 1 +#define RESOLUTION_PENDING_EVENT 2 +#define IP4_ARP_EVENT 3 +#define IP6_ND_EVENT 4 + +int ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp); + +int ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp); + +void +handle_ip4_arp_event (u32 pool_index) +{ + vpe_api_main_t *vam = &vpe_api_main; + vnet_main_t *vnm = vam->vnet_main; + vlib_main_t *vm = vam->vlib_main; + vl_api_ip4_arp_event_t *event; + vl_api_ip4_arp_event_t *mp; + unix_shared_memory_queue_t *q; + + /* Client can cancel, die, etc. */ + if (pool_is_free_index (vam->arp_events, pool_index)) + return; + + event = pool_elt_at_index (vam->arp_events, pool_index); + + q = vl_api_client_index_to_input_queue (event->client_index); + if (!q) + { + (void) vnet_add_del_ip4_arp_change_event + (vnm, arp_change_delete_callback, + event->pid, &event->address, + vpe_resolver_process_node.index, IP4_ARP_EVENT, + ~0 /* pool index, notused */ , 0 /* is_add */ ); + return; + } + + if (q->cursize < q->maxsize) + { + mp = vl_msg_api_alloc (sizeof (*mp)); + clib_memcpy (mp, event, sizeof (*mp)); + vl_msg_api_send_shmem (q, (u8 *) & mp); + } + else + { + static f64 last_time; + /* + * Throttle syslog msgs. + * It's pretty tempting to just revoke the registration... + */ + if (vlib_time_now (vm) > last_time + 10.0) + { + clib_warning ("arp event for %U to pid %d: queue stuffed!", + format_ip4_address, &event->address, event->pid); + last_time = vlib_time_now (vm); + } + } +} + +void +handle_ip6_nd_event (u32 pool_index) +{ + vpe_api_main_t *vam = &vpe_api_main; + vnet_main_t *vnm = vam->vnet_main; + vlib_main_t *vm = vam->vlib_main; + vl_api_ip6_nd_event_t *event; + vl_api_ip6_nd_event_t *mp; + unix_shared_memory_queue_t *q; + + /* Client can cancel, die, etc. */ + if (pool_is_free_index (vam->nd_events, pool_index)) + return; + + event = pool_elt_at_index (vam->nd_events, pool_index); + + q = vl_api_client_index_to_input_queue (event->client_index); + if (!q) + { + (void) vnet_add_del_ip6_nd_change_event + (vnm, nd_change_delete_callback, + event->pid, &event->address, + vpe_resolver_process_node.index, IP6_ND_EVENT, + ~0 /* pool index, notused */ , 0 /* is_add */ ); + return; + } + + if (q->cursize < q->maxsize) + { + mp = vl_msg_api_alloc (sizeof (*mp)); + clib_memcpy (mp, event, sizeof (*mp)); + vl_msg_api_send_shmem (q, (u8 *) & mp); + } + else + { + static f64 last_time; + /* + * Throttle syslog msgs. + * It's pretty tempting to just revoke the registration... + */ + if (vlib_time_now (vm) > last_time + 10.0) + { + clib_warning ("ip6 nd event for %U to pid %d: queue stuffed!", + format_ip6_address, &event->address, event->pid); + last_time = vlib_time_now (vm); + } + } +} + +static uword +resolver_process (vlib_main_t * vm, + vlib_node_runtime_t * rt, vlib_frame_t * f) +{ + uword event_type; + uword *event_data = 0; + f64 timeout = 100.0; + int i; + + while (1) + { + vlib_process_wait_for_event_or_clock (vm, timeout); + + event_type = vlib_process_get_events (vm, &event_data); + + switch (event_type) + { + case RESOLUTION_PENDING_EVENT: + timeout = 1.0; + break; + + case RESOLUTION_EVENT: + clib_warning ("resolver: BOGUS TYPE"); + break; + + case IP4_ARP_EVENT: + for (i = 0; i < vec_len (event_data); i++) + handle_ip4_arp_event (event_data[i]); + break; + + case IP6_ND_EVENT: + for (i = 0; i < vec_len (event_data); i++) + handle_ip6_nd_event (event_data[i]); + break; + + case ~0: /* timeout */ + break; + } + + vec_reset_length (event_data); + } + return 0; /* or not */ +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (vpe_resolver_process_node,static) = { + .function = resolver_process, + .type = VLIB_NODE_TYPE_PROCESS, + .name = "vpe-route-resolver-process", +}; +/* *INDENT-ON* */ + +static int +mpls_route_add_del_t_handler (vnet_main_t * vnm, + vl_api_mpls_route_add_del_t * mp) +{ + u32 fib_index, next_hop_fib_index; + mpls_label_t *label_stack = NULL; + int rv, ii, n_labels;; + + fib_prefix_t pfx = { + .fp_len = 21, + .fp_proto = FIB_PROTOCOL_MPLS, + .fp_eos = mp->mr_eos, + .fp_label = ntohl (mp->mr_label), + }; + if (pfx.fp_eos) + { + if (mp->mr_next_hop_proto_is_ip4) + { + pfx.fp_payload_proto = DPO_PROTO_IP4; + } + else + { + pfx.fp_payload_proto = DPO_PROTO_IP6; + } + } + else + { + pfx.fp_payload_proto = DPO_PROTO_MPLS; + } + + rv = add_del_route_check (FIB_PROTOCOL_MPLS, + mp->mr_table_id, + mp->mr_next_hop_sw_if_index, + dpo_proto_to_fib (pfx.fp_payload_proto), + mp->mr_next_hop_table_id, + mp->mr_create_table_if_needed, + &fib_index, &next_hop_fib_index); + + if (0 != rv) + return (rv); + + ip46_address_t nh; + memset (&nh, 0, sizeof (nh)); + + if (mp->mr_next_hop_proto_is_ip4) + memcpy (&nh.ip4, mp->mr_next_hop, sizeof (nh.ip4)); + else + memcpy (&nh.ip6, mp->mr_next_hop, sizeof (nh.ip6)); + + n_labels = mp->mr_next_hop_n_out_labels; + if (n_labels == 0) + ; + else if (1 == n_labels) + vec_add1 (label_stack, ntohl (mp->mr_next_hop_out_label_stack[0])); + else + { + vec_validate (label_stack, n_labels - 1); + for (ii = 0; ii < n_labels; ii++) + label_stack[ii] = ntohl (mp->mr_next_hop_out_label_stack[ii]); + } + + return (add_del_route_t_handler (mp->mr_is_multipath, mp->mr_is_add, 0, // mp->is_drop, + 0, // mp->is_unreach, + 0, // mp->is_prohibit, + 0, // mp->is_local, + mp->mr_is_classify, + mp->mr_classify_table_index, + mp->mr_is_resolve_host, + mp->mr_is_resolve_attached, + fib_index, &pfx, + mp->mr_next_hop_proto_is_ip4, + &nh, ntohl (mp->mr_next_hop_sw_if_index), + next_hop_fib_index, + mp->mr_next_hop_weight, + ntohl (mp->mr_next_hop_via_label), + label_stack)); +} + +void +vl_api_mpls_route_add_del_t_handler (vl_api_mpls_route_add_del_t * mp) +{ + vl_api_mpls_route_add_del_reply_t *rmp; + vnet_main_t *vnm; + int rv; + + vnm = vnet_get_main (); + vnm->api_errno = 0; + + rv = mpls_route_add_del_t_handler (vnm, mp); + + rv = (rv == 0) ? vnm->api_errno : rv; + + REPLY_MACRO (VL_API_MPLS_ROUTE_ADD_DEL_REPLY); +} + +static int +mpls_ip_bind_unbind_handler (vnet_main_t * vnm, + vl_api_mpls_ip_bind_unbind_t * mp) +{ + u32 mpls_fib_index, ip_fib_index; + + mpls_fib_index = + fib_table_find (FIB_PROTOCOL_MPLS, ntohl (mp->mb_mpls_table_id)); + + if (~0 == mpls_fib_index) + { + if (mp->mb_create_table_if_needed) + { + mpls_fib_index = + fib_table_find_or_create_and_lock (FIB_PROTOCOL_MPLS, + ntohl (mp->mb_mpls_table_id)); + } + else + return VNET_API_ERROR_NO_SUCH_FIB; + } + + ip_fib_index = fib_table_find ((mp->mb_is_ip4 ? + FIB_PROTOCOL_IP4 : + FIB_PROTOCOL_IP6), + ntohl (mp->mb_ip_table_id)); + if (~0 == ip_fib_index) + return VNET_API_ERROR_NO_SUCH_FIB; + + fib_prefix_t pfx = { + .fp_len = mp->mb_address_length, + }; + + if (mp->mb_is_ip4) + { + pfx.fp_proto = FIB_PROTOCOL_IP4; + clib_memcpy (&pfx.fp_addr.ip4, mp->mb_address, + sizeof (pfx.fp_addr.ip4)); + } + else + { + pfx.fp_proto = FIB_PROTOCOL_IP6; + clib_memcpy (&pfx.fp_addr.ip6, mp->mb_address, + sizeof (pfx.fp_addr.ip6)); + } + + if (mp->mb_is_bind) + fib_table_entry_local_label_add (ip_fib_index, &pfx, + ntohl (mp->mb_label)); + else + fib_table_entry_local_label_remove (ip_fib_index, &pfx, + ntohl (mp->mb_label)); + + return (0); +} + +void +vl_api_mpls_ip_bind_unbind_t_handler (vl_api_mpls_ip_bind_unbind_t * mp) +{ + vl_api_mpls_route_add_del_reply_t *rmp; + vnet_main_t *vnm; + int rv; + + vnm = vnet_get_main (); + vnm->api_errno = 0; + + rv = mpls_ip_bind_unbind_handler (vnm, mp); + + rv = (rv == 0) ? vnm->api_errno : rv; + + REPLY_MACRO (VL_API_MPLS_ROUTE_ADD_DEL_REPLY); +} + +static void +vl_api_sw_interface_set_vpath_t_handler (vl_api_sw_interface_set_vpath_t * mp) +{ + vl_api_sw_interface_set_vpath_reply_t *rmp; + int rv = 0; + u32 sw_if_index = ntohl (mp->sw_if_index); + + VALIDATE_SW_IF_INDEX (mp); + + l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_VPATH, mp->enable); + vnet_feature_enable_disable ("ip4-unicast", "vpath-input-ip4", + sw_if_index, mp->enable, 0, 0); + vnet_feature_enable_disable ("ip4-multicast", "vpath-input-ip4", + sw_if_index, mp->enable, 0, 0); + vnet_feature_enable_disable ("ip6-unicast", "vpath-input-ip6", + sw_if_index, mp->enable, 0, 0); + vnet_feature_enable_disable ("ip6-multicast", "vpath-input-ip6", + sw_if_index, mp->enable, 0, 0); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_SW_INTERFACE_SET_VPATH_REPLY); +} + +static void + vl_api_sw_interface_set_vxlan_bypass_t_handler + (vl_api_sw_interface_set_vxlan_bypass_t * mp) +{ + vl_api_sw_interface_set_vxlan_bypass_reply_t *rmp; + int rv = 0; + u32 sw_if_index = ntohl (mp->sw_if_index); + + VALIDATE_SW_IF_INDEX (mp); + + if (mp->is_ipv6) + { + /* not yet implemented */ + } + else + vnet_feature_enable_disable ("ip4-unicast", "ip4-vxlan-bypass", + sw_if_index, mp->enable, 0, 0); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_SW_INTERFACE_SET_VXLAN_BYPASS_REPLY); +} + +static void + vl_api_sw_interface_set_l2_xconnect_t_handler + (vl_api_sw_interface_set_l2_xconnect_t * mp) +{ + vl_api_sw_interface_set_l2_xconnect_reply_t *rmp; + int rv = 0; + u32 rx_sw_if_index = ntohl (mp->rx_sw_if_index); + u32 tx_sw_if_index = ntohl (mp->tx_sw_if_index); + vlib_main_t *vm = vlib_get_main (); + vnet_main_t *vnm = vnet_get_main (); + + VALIDATE_RX_SW_IF_INDEX (mp); + + if (mp->enable) + { + VALIDATE_TX_SW_IF_INDEX (mp); + rv = set_int_l2_mode (vm, vnm, MODE_L2_XC, + rx_sw_if_index, 0, 0, 0, tx_sw_if_index); + } + else + { + rv = set_int_l2_mode (vm, vnm, MODE_L3, rx_sw_if_index, 0, 0, 0, 0); + } + + BAD_RX_SW_IF_INDEX_LABEL; + BAD_TX_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_SW_INTERFACE_SET_L2_XCONNECT_REPLY); +} + +static void + vl_api_sw_interface_set_l2_bridge_t_handler + (vl_api_sw_interface_set_l2_bridge_t * mp) +{ + bd_main_t *bdm = &bd_main; + vl_api_sw_interface_set_l2_bridge_reply_t *rmp; + int rv = 0; + u32 rx_sw_if_index = ntohl (mp->rx_sw_if_index); + u32 bd_id = ntohl (mp->bd_id); + u32 bd_index; + u32 bvi = mp->bvi; + u8 shg = mp->shg; + vlib_main_t *vm = vlib_get_main (); + vnet_main_t *vnm = vnet_get_main (); + + VALIDATE_RX_SW_IF_INDEX (mp); + + bd_index = bd_find_or_add_bd_index (bdm, bd_id); + + if (mp->enable) + { + //VALIDATE_TX_SW_IF_INDEX(mp); + rv = set_int_l2_mode (vm, vnm, MODE_L2_BRIDGE, + rx_sw_if_index, bd_index, bvi, shg, 0); + } + else + { + rv = set_int_l2_mode (vm, vnm, MODE_L3, rx_sw_if_index, 0, 0, 0, 0); + } + + BAD_RX_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_SW_INTERFACE_SET_L2_BRIDGE_REPLY); +} + +static void + vl_api_sw_interface_set_dpdk_hqos_pipe_t_handler + (vl_api_sw_interface_set_dpdk_hqos_pipe_t * mp) +{ + vl_api_sw_interface_set_dpdk_hqos_pipe_reply_t *rmp; + int rv = 0; + +#if DPDK > 0 + dpdk_main_t *dm = &dpdk_main; + dpdk_device_t *xd; + + u32 sw_if_index = ntohl (mp->sw_if_index); + u32 subport = ntohl (mp->subport); + u32 pipe = ntohl (mp->pipe); + u32 profile = ntohl (mp->profile); + vnet_hw_interface_t *hw; + + VALIDATE_SW_IF_INDEX (mp); + + /* hw_if & dpdk device */ + hw = vnet_get_sup_hw_interface (dm->vnet_main, sw_if_index); + + xd = vec_elt_at_index (dm->devices, hw->dev_instance); + + rv = rte_sched_pipe_config (xd->hqos_ht->hqos, subport, pipe, profile); + + BAD_SW_IF_INDEX_LABEL; +#else + clib_warning ("setting HQoS pipe parameters without DPDK not implemented"); + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif /* DPDK */ + + REPLY_MACRO (VL_API_SW_INTERFACE_SET_DPDK_HQOS_PIPE_REPLY); +} + +static void + vl_api_sw_interface_set_dpdk_hqos_subport_t_handler + (vl_api_sw_interface_set_dpdk_hqos_subport_t * mp) +{ + vl_api_sw_interface_set_dpdk_hqos_subport_reply_t *rmp; + int rv = 0; + +#if DPDK > 0 + dpdk_main_t *dm = &dpdk_main; + dpdk_device_t *xd; + struct rte_sched_subport_params p; + + u32 sw_if_index = ntohl (mp->sw_if_index); + u32 subport = ntohl (mp->subport); + p.tb_rate = ntohl (mp->tb_rate); + p.tb_size = ntohl (mp->tb_size); + p.tc_rate[0] = ntohl (mp->tc_rate[0]); + p.tc_rate[1] = ntohl (mp->tc_rate[1]); + p.tc_rate[2] = ntohl (mp->tc_rate[2]); + p.tc_rate[3] = ntohl (mp->tc_rate[3]); + p.tc_period = ntohl (mp->tc_period); + + vnet_hw_interface_t *hw; + + VALIDATE_SW_IF_INDEX (mp); + + /* hw_if & dpdk device */ + hw = vnet_get_sup_hw_interface (dm->vnet_main, sw_if_index); + + xd = vec_elt_at_index (dm->devices, hw->dev_instance); + + rv = rte_sched_subport_config (xd->hqos_ht->hqos, subport, &p); + + BAD_SW_IF_INDEX_LABEL; +#else + clib_warning + ("setting HQoS subport parameters without DPDK not implemented"); + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif /* DPDK */ + + REPLY_MACRO (VL_API_SW_INTERFACE_SET_DPDK_HQOS_SUBPORT_REPLY); +} + +static void + vl_api_sw_interface_set_dpdk_hqos_tctbl_t_handler + (vl_api_sw_interface_set_dpdk_hqos_tctbl_t * mp) +{ + vl_api_sw_interface_set_dpdk_hqos_tctbl_reply_t *rmp; + int rv = 0; + +#if DPDK > 0 + dpdk_main_t *dm = &dpdk_main; + vlib_thread_main_t *tm = vlib_get_thread_main (); + dpdk_device_t *xd; + + u32 sw_if_index = ntohl (mp->sw_if_index); + u32 entry = ntohl (mp->entry); + u32 tc = ntohl (mp->tc); + u32 queue = ntohl (mp->queue); + u32 val, i; + + vnet_hw_interface_t *hw; + + VALIDATE_SW_IF_INDEX (mp); + + /* hw_if & dpdk device */ + hw = vnet_get_sup_hw_interface (dm->vnet_main, sw_if_index); + + xd = vec_elt_at_index (dm->devices, hw->dev_instance); + + if (tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) + { + clib_warning ("invalid traffic class !!"); + rv = VNET_API_ERROR_INVALID_VALUE; + goto done; + } + if (queue >= RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS) + { + clib_warning ("invalid queue !!"); + rv = VNET_API_ERROR_INVALID_VALUE; + goto done; + } + + /* Detect the set of worker threads */ + uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers"); + + if (p == 0) + { + clib_warning ("worker thread registration AWOL !!"); + rv = VNET_API_ERROR_INVALID_VALUE_2; + goto done; + } + + vlib_thread_registration_t *tr = (vlib_thread_registration_t *) p[0]; + int worker_thread_first = tr->first_index; + int worker_thread_count = tr->count; + + val = tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue; + for (i = 0; i < worker_thread_count; i++) + xd->hqos_wt[worker_thread_first + i].hqos_tc_table[entry] = val; + + BAD_SW_IF_INDEX_LABEL; +done: +#else + clib_warning ("setting HQoS DSCP table entry without DPDK not implemented"); + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif /* DPDK */ + + REPLY_MACRO (VL_API_SW_INTERFACE_SET_DPDK_HQOS_TCTBL_REPLY); +} + +static void +vl_api_bridge_domain_add_del_t_handler (vl_api_bridge_domain_add_del_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + bd_main_t *bdm = &bd_main; + vl_api_bridge_domain_add_del_reply_t *rmp; + int rv = 0; + u32 enable_flags = 0, disable_flags = 0; + u32 bd_id = ntohl (mp->bd_id); + u32 bd_index; + + if (mp->is_add) + { + bd_index = bd_find_or_add_bd_index (bdm, bd_id); + + if (mp->flood) + enable_flags |= L2_FLOOD; + else + disable_flags |= L2_FLOOD; + + if (mp->uu_flood) + enable_flags |= L2_UU_FLOOD; + else + disable_flags |= L2_UU_FLOOD; + + if (mp->forward) + enable_flags |= L2_FWD; + else + disable_flags |= L2_FWD; + + if (mp->arp_term) + enable_flags |= L2_ARP_TERM; + else + disable_flags |= L2_ARP_TERM; + + if (mp->learn) + enable_flags |= L2_LEARN; + else + disable_flags |= L2_LEARN; + + if (enable_flags) + bd_set_flags (vm, bd_index, enable_flags, 1 /* enable */ ); + + if (disable_flags) + bd_set_flags (vm, bd_index, disable_flags, 0 /* disable */ ); + + bd_set_mac_age (vm, bd_index, mp->mac_age); + } + else + rv = bd_delete_bd_index (bdm, bd_id); + + REPLY_MACRO (VL_API_BRIDGE_DOMAIN_ADD_DEL_REPLY); +} + +static void +vl_api_bridge_domain_details_t_handler (vl_api_bridge_domain_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void + vl_api_bridge_domain_sw_if_details_t_handler + (vl_api_bridge_domain_sw_if_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void +send_bridge_domain_details (unix_shared_memory_queue_t * q, + l2_bridge_domain_t * bd_config, + u32 n_sw_ifs, u32 context) +{ + vl_api_bridge_domain_details_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_BRIDGE_DOMAIN_DETAILS); + mp->bd_id = ntohl (bd_config->bd_id); + mp->flood = bd_feature_flood (bd_config); + mp->uu_flood = bd_feature_uu_flood (bd_config); + mp->forward = bd_feature_forward (bd_config); + mp->learn = bd_feature_learn (bd_config); + mp->arp_term = bd_feature_arp_term (bd_config); + mp->bvi_sw_if_index = ntohl (bd_config->bvi_sw_if_index); + mp->mac_age = bd_config->mac_age; + mp->n_sw_ifs = ntohl (n_sw_ifs); + mp->context = context; + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +send_bd_sw_if_details (l2input_main_t * l2im, + unix_shared_memory_queue_t * q, + l2_flood_member_t * member, u32 bd_id, u32 context) +{ + vl_api_bridge_domain_sw_if_details_t *mp; + l2_input_config_t *input_cfg; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_BRIDGE_DOMAIN_SW_IF_DETAILS); + mp->bd_id = ntohl (bd_id); + mp->sw_if_index = ntohl (member->sw_if_index); + input_cfg = vec_elt_at_index (l2im->configs, member->sw_if_index); + mp->shg = input_cfg->shg; + mp->context = context; + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +vl_api_bridge_domain_dump_t_handler (vl_api_bridge_domain_dump_t * mp) +{ + bd_main_t *bdm = &bd_main; + l2input_main_t *l2im = &l2input_main; + unix_shared_memory_queue_t *q; + l2_bridge_domain_t *bd_config; + u32 bd_id, bd_index; + u32 end; + + q = vl_api_client_index_to_input_queue (mp->client_index); + + if (q == 0) + return; + + bd_id = ntohl (mp->bd_id); + + bd_index = (bd_id == ~0) ? 0 : bd_find_or_add_bd_index (bdm, bd_id); + end = (bd_id == ~0) ? vec_len (l2im->bd_configs) : bd_index + 1; + for (; bd_index < end; bd_index++) + { + bd_config = l2input_bd_config_from_index (l2im, bd_index); + /* skip dummy bd_id 0 */ + if (bd_config && (bd_config->bd_id > 0)) + { + u32 n_sw_ifs; + l2_flood_member_t *m; + + n_sw_ifs = vec_len (bd_config->members); + send_bridge_domain_details (q, bd_config, n_sw_ifs, mp->context); + + vec_foreach (m, bd_config->members) + { + send_bd_sw_if_details (l2im, q, m, bd_config->bd_id, mp->context); + } + } + } +} + +static void +vl_api_l2fib_add_del_t_handler (vl_api_l2fib_add_del_t * mp) +{ + bd_main_t *bdm = &bd_main; + l2input_main_t *l2im = &l2input_main; + vl_api_l2fib_add_del_reply_t *rmp; + int rv = 0; + u64 mac = 0; + u32 sw_if_index = ntohl (mp->sw_if_index); + u32 bd_id = ntohl (mp->bd_id); + u32 bd_index; + u32 static_mac; + u32 filter_mac; + u32 bvi_mac; + uword *p; + + mac = mp->mac; + + p = hash_get (bdm->bd_index_by_bd_id, bd_id); + if (!p) + { + rv = VNET_API_ERROR_NO_SUCH_ENTRY; + goto bad_sw_if_index; + } + bd_index = p[0]; + + if (mp->is_add) + { + filter_mac = mp->filter_mac ? 1 : 0; + if (filter_mac == 0) + { + VALIDATE_SW_IF_INDEX (mp); + if (vec_len (l2im->configs) <= sw_if_index) + { + rv = VNET_API_ERROR_INVALID_SW_IF_INDEX; + goto bad_sw_if_index; + } + else + { + l2_input_config_t *config; + config = vec_elt_at_index (l2im->configs, sw_if_index); + if (config->bridge == 0) + { + rv = VNET_API_ERROR_INVALID_SW_IF_INDEX; + goto bad_sw_if_index; + } + } + } + static_mac = mp->static_mac ? 1 : 0; + bvi_mac = mp->bvi_mac ? 1 : 0; + l2fib_add_entry (mac, bd_index, sw_if_index, static_mac, filter_mac, + bvi_mac); + } + else + { + l2fib_del_entry (mac, bd_index); + } + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_L2FIB_ADD_DEL_REPLY); +} + +static void +vl_api_l2_flags_t_handler (vl_api_l2_flags_t * mp) +{ + vl_api_l2_flags_reply_t *rmp; + int rv = 0; + u32 sw_if_index = ntohl (mp->sw_if_index); + u32 flags = ntohl (mp->feature_bitmap); + u32 rbm = 0; + + VALIDATE_SW_IF_INDEX (mp); + +#define _(a,b) \ + if (flags & L2INPUT_FEAT_ ## a) \ + rbm = l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_ ## a, mp->is_set); + foreach_l2input_feat; +#undef _ + + BAD_SW_IF_INDEX_LABEL; + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_L2_FLAGS_REPLY, + ({ + rmp->resulting_feature_bitmap = ntohl(rbm); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_bridge_flags_t_handler (vl_api_bridge_flags_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + bd_main_t *bdm = &bd_main; + vl_api_bridge_flags_reply_t *rmp; + int rv = 0; + u32 bd_id = ntohl (mp->bd_id); + u32 bd_index; + u32 flags = ntohl (mp->feature_bitmap); + uword *p; + + p = hash_get (bdm->bd_index_by_bd_id, bd_id); + if (p == 0) + { + rv = VNET_API_ERROR_NO_SUCH_ENTRY; + goto out; + } + + bd_index = p[0]; + + bd_set_flags (vm, bd_index, flags, mp->is_set); + +out: + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_BRIDGE_FLAGS_REPLY, + ({ + rmp->resulting_feature_bitmap = ntohl(flags); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_bd_ip_mac_add_del_t_handler (vl_api_bd_ip_mac_add_del_t * mp) +{ + bd_main_t *bdm = &bd_main; + vl_api_bd_ip_mac_add_del_reply_t *rmp; + int rv = 0; + u32 bd_id = ntohl (mp->bd_id); + u32 bd_index; + uword *p; + + p = hash_get (bdm->bd_index_by_bd_id, bd_id); + if (p == 0) + { + rv = VNET_API_ERROR_NO_SUCH_ENTRY; + goto out; + } + + bd_index = p[0]; + if (bd_add_del_ip_mac (bd_index, mp->ip_address, + mp->mac_address, mp->is_ipv6, mp->is_add)) + rv = VNET_API_ERROR_UNSPECIFIED; + +out: + REPLY_MACRO (VL_API_BD_IP_MAC_ADD_DEL_REPLY); +} + +static void +vl_api_create_vlan_subif_t_handler (vl_api_create_vlan_subif_t * mp) +{ + vl_api_create_vlan_subif_reply_t *rmp; + vnet_main_t *vnm = vnet_get_main (); + u32 hw_if_index, sw_if_index = (u32) ~ 0; + vnet_hw_interface_t *hi; + int rv = 0; + u32 id; + vnet_sw_interface_t template; + uword *p; + vnet_interface_main_t *im = &vnm->interface_main; + u64 sup_and_sub_key; + u64 *kp; + unix_shared_memory_queue_t *q; + clib_error_t *error; + + VALIDATE_SW_IF_INDEX (mp); + + hw_if_index = ntohl (mp->sw_if_index); + hi = vnet_get_hw_interface (vnm, hw_if_index); + + id = ntohl (mp->vlan_id); + if (id == 0 || id > 4095) + { + rv = VNET_API_ERROR_INVALID_VLAN; + goto out; + } + + sup_and_sub_key = ((u64) (hi->sw_if_index) << 32) | (u64) id; + + p = hash_get_mem (im->sw_if_index_by_sup_and_sub, &sup_and_sub_key); + if (p) + { + rv = VNET_API_ERROR_VLAN_ALREADY_EXISTS; + goto out; + } + + kp = clib_mem_alloc (sizeof (*kp)); + *kp = sup_and_sub_key; + + memset (&template, 0, sizeof (template)); + template.type = VNET_SW_INTERFACE_TYPE_SUB; + template.sup_sw_if_index = hi->sw_if_index; + template.sub.id = id; + template.sub.eth.raw_flags = 0; + template.sub.eth.flags.one_tag = 1; + template.sub.eth.outer_vlan_id = id; + template.sub.eth.flags.exact_match = 1; + + error = vnet_create_sw_interface (vnm, &template, &sw_if_index); + if (error) + { + clib_error_report (error); + rv = VNET_API_ERROR_INVALID_REGISTRATION; + goto out; + } + hash_set (hi->sub_interface_sw_if_index_by_id, id, sw_if_index); + hash_set_mem (im->sw_if_index_by_sup_and_sub, kp, sw_if_index); + + BAD_SW_IF_INDEX_LABEL; + +out: + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + return; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_CREATE_VLAN_SUBIF_REPLY); + rmp->context = mp->context; + rmp->retval = ntohl (rv); + rmp->sw_if_index = ntohl (sw_if_index); + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +vl_api_create_subif_t_handler (vl_api_create_subif_t * mp) +{ + vl_api_create_subif_reply_t *rmp; + vnet_main_t *vnm = vnet_get_main (); + u32 sw_if_index = ~0; + int rv = 0; + u32 sub_id; + vnet_sw_interface_t *si; + vnet_hw_interface_t *hi; + vnet_sw_interface_t template; + uword *p; + vnet_interface_main_t *im = &vnm->interface_main; + u64 sup_and_sub_key; + u64 *kp; + clib_error_t *error; + + VALIDATE_SW_IF_INDEX (mp); + + si = vnet_get_sup_sw_interface (vnm, ntohl (mp->sw_if_index)); + hi = vnet_get_sup_hw_interface (vnm, ntohl (mp->sw_if_index)); + + if (hi->bond_info == VNET_HW_INTERFACE_BOND_INFO_SLAVE) + { + rv = VNET_API_ERROR_BOND_SLAVE_NOT_ALLOWED; + goto out; + } + + sw_if_index = si->sw_if_index; + sub_id = ntohl (mp->sub_id); + + sup_and_sub_key = ((u64) (sw_if_index) << 32) | (u64) sub_id; + + p = hash_get_mem (im->sw_if_index_by_sup_and_sub, &sup_and_sub_key); + if (p) + { + if (CLIB_DEBUG > 0) + clib_warning ("sup sw_if_index %d, sub id %d already exists\n", + sw_if_index, sub_id); + rv = VNET_API_ERROR_SUBIF_ALREADY_EXISTS; + goto out; + } + + kp = clib_mem_alloc (sizeof (*kp)); + *kp = sup_and_sub_key; + + memset (&template, 0, sizeof (template)); + template.type = VNET_SW_INTERFACE_TYPE_SUB; + template.sup_sw_if_index = sw_if_index; + template.sub.id = sub_id; + template.sub.eth.flags.no_tags = mp->no_tags; + template.sub.eth.flags.one_tag = mp->one_tag; + template.sub.eth.flags.two_tags = mp->two_tags; + template.sub.eth.flags.dot1ad = mp->dot1ad; + template.sub.eth.flags.exact_match = mp->exact_match; + template.sub.eth.flags.default_sub = mp->default_sub; + template.sub.eth.flags.outer_vlan_id_any = mp->outer_vlan_id_any; + template.sub.eth.flags.inner_vlan_id_any = mp->inner_vlan_id_any; + template.sub.eth.outer_vlan_id = ntohs (mp->outer_vlan_id); + template.sub.eth.inner_vlan_id = ntohs (mp->inner_vlan_id); + + error = vnet_create_sw_interface (vnm, &template, &sw_if_index); + if (error) + { + clib_error_report (error); + rv = VNET_API_ERROR_SUBIF_CREATE_FAILED; + goto out; + } + + hash_set (hi->sub_interface_sw_if_index_by_id, sub_id, sw_if_index); + hash_set_mem (im->sw_if_index_by_sup_and_sub, kp, sw_if_index); + + BAD_SW_IF_INDEX_LABEL; + +out: + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_CREATE_SUBIF_REPLY, + ({ + rmp->sw_if_index = ntohl(sw_if_index); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_mpls_tunnel_add_del_t_handler (vl_api_mpls_tunnel_add_del_t * mp) +{ + vl_api_mpls_tunnel_add_del_reply_t *rmp; + int rv = 0; + stats_main_t *sm = &stats_main; + u32 tunnel_sw_if_index; + int ii; + + dslock (sm, 1 /* release hint */ , 5 /* tag */ ); + + if (mp->mt_is_add) + { + fib_route_path_t rpath, *rpaths = NULL; + mpls_label_t *label_stack = NULL; + + memset (&rpath, 0, sizeof (rpath)); + + if (mp->mt_next_hop_proto_is_ip4) + { + rpath.frp_proto = FIB_PROTOCOL_IP4; + clib_memcpy (&rpath.frp_addr.ip4, + mp->mt_next_hop, sizeof (rpath.frp_addr.ip4)); + } + else + { + rpath.frp_proto = FIB_PROTOCOL_IP6; + clib_memcpy (&rpath.frp_addr.ip6, + mp->mt_next_hop, sizeof (rpath.frp_addr.ip6)); + } + rpath.frp_sw_if_index = ntohl (mp->mt_next_hop_sw_if_index); + + for (ii = 0; ii < mp->mt_next_hop_n_out_labels; ii++) + vec_add1 (label_stack, ntohl (mp->mt_next_hop_out_label_stack[ii])); + + vec_add1 (rpaths, rpath); + + vnet_mpls_tunnel_add (rpaths, label_stack, + mp->mt_l2_only, &tunnel_sw_if_index); + vec_free (rpaths); + vec_free (label_stack); + } + else + { + tunnel_sw_if_index = ntohl (mp->mt_sw_if_index); + vnet_mpls_tunnel_del (tunnel_sw_if_index); + } + + dsunlock (sm); + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_MPLS_TUNNEL_ADD_DEL_REPLY, + ({ + rmp->sw_if_index = ntohl(tunnel_sw_if_index); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_proxy_arp_add_del_t_handler (vl_api_proxy_arp_add_del_t * mp) +{ + vl_api_proxy_arp_add_del_reply_t *rmp; + u32 fib_index; + int rv; + ip4_main_t *im = &ip4_main; + stats_main_t *sm = &stats_main; + int vnet_proxy_arp_add_del (ip4_address_t * lo_addr, + ip4_address_t * hi_addr, + u32 fib_index, int is_del); + uword *p; + + dslock (sm, 1 /* release hint */ , 6 /* tag */ ); + + p = hash_get (im->fib_index_by_table_id, ntohl (mp->vrf_id)); + + if (!p) + { + rv = VNET_API_ERROR_NO_SUCH_FIB; + goto out; + } + + fib_index = p[0]; + + rv = vnet_proxy_arp_add_del ((ip4_address_t *) mp->low_address, + (ip4_address_t *) mp->hi_address, + fib_index, mp->is_add == 0); + +out: + dsunlock (sm); + REPLY_MACRO (VL_API_PROXY_ARP_ADD_DEL_REPLY); +} + +static void + vl_api_proxy_arp_intfc_enable_disable_t_handler + (vl_api_proxy_arp_intfc_enable_disable_t * mp) +{ + int rv = 0; + vnet_main_t *vnm = vnet_get_main (); + vl_api_proxy_arp_intfc_enable_disable_reply_t *rmp; + vnet_sw_interface_t *si; + u32 sw_if_index; + + VALIDATE_SW_IF_INDEX (mp); + + sw_if_index = ntohl (mp->sw_if_index); + + if (pool_is_free_index (vnm->interface_main.sw_interfaces, sw_if_index)) + { + rv = VNET_API_ERROR_INVALID_SW_IF_INDEX; + goto out; + } + + si = vnet_get_sw_interface (vnm, sw_if_index); + + ASSERT (si); + + if (mp->enable_disable) + si->flags |= VNET_SW_INTERFACE_FLAG_PROXY_ARP; + else + si->flags &= ~VNET_SW_INTERFACE_FLAG_PROXY_ARP; + + BAD_SW_IF_INDEX_LABEL; + +out: + REPLY_MACRO (VL_API_PROXY_ARP_INTFC_ENABLE_DISABLE_REPLY); +} + +static void +vl_api_is_address_reachable_t_handler (vl_api_is_address_reachable_t * mp) +{ +#if 0 + vpe_main_t *rm = &vpe_main; + ip4_main_t *im4 = &ip4_main; + ip6_main_t *im6 = &ip6_main; + ip_lookup_main_t *lm; + union + { + ip4_address_t ip4; + ip6_address_t ip6; + } addr; + u32 adj_index, sw_if_index; + vl_api_is_address_reachable_t *rmp; + ip_adjacency_t *adj; + unix_shared_memory_queue_t *q; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + { + increment_missing_api_client_counter (rm->vlib_main); + return; + } + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + clib_memcpy (rmp, mp, sizeof (*rmp)); + + sw_if_index = mp->next_hop_sw_if_index; + clib_memcpy (&addr, mp->address, sizeof (addr)); + if (mp->is_ipv6) + { + lm = &im6->lookup_main; + adj_index = ip6_fib_lookup (im6, sw_if_index, &addr.ip6); + } + else + { + lm = &im4->lookup_main; + // FIXME NOT an ADJ + adj_index = ip4_fib_lookup (im4, sw_if_index, &addr.ip4); + } + if (adj_index == ~0) + { + rmp->is_error = 1; + goto send; + } + adj = ip_get_adjacency (lm, adj_index); + + if (adj->lookup_next_index == IP_LOOKUP_NEXT_REWRITE + && adj->rewrite_header.sw_if_index == sw_if_index) + { + rmp->is_known = 1; + } + else + { + if (adj->lookup_next_index == IP_LOOKUP_NEXT_ARP + && adj->rewrite_header.sw_if_index == sw_if_index) + { + if (mp->is_ipv6) + ip6_probe_neighbor (rm->vlib_main, &addr.ip6, sw_if_index); + else + ip4_probe_neighbor (rm->vlib_main, &addr.ip4, sw_if_index); + } + else if (adj->lookup_next_index == IP_LOOKUP_NEXT_DROP) + { + rmp->is_known = 1; + goto send; + } + rmp->is_known = 0; + } + +send: + vl_msg_api_send_shmem (q, (u8 *) & rmp); +#endif +} + +static void + vl_api_sw_interface_set_mpls_enable_t_handler + (vl_api_sw_interface_set_mpls_enable_t * mp) +{ + vl_api_sw_interface_set_mpls_enable_reply_t *rmp; + int rv = 0; + + VALIDATE_SW_IF_INDEX (mp); + + mpls_sw_interface_enable_disable (&mpls_main, + ntohl (mp->sw_if_index), mp->enable); + + BAD_SW_IF_INDEX_LABEL; + REPLY_MACRO (VL_API_SW_INTERFACE_SET_MPLS_ENABLE_REPLY); +} + +void +send_oam_event (oam_target_t * t) +{ + vpe_api_main_t *vam = &vpe_api_main; + unix_shared_memory_queue_t *q; + vpe_client_registration_t *reg; + vl_api_oam_event_t *mp; + + /* *INDENT-OFF* */ + pool_foreach(reg, vam->oam_events_registrations, + ({ + q = vl_api_client_index_to_input_queue (reg->client_index); + if (q) + { + mp = vl_msg_api_alloc (sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_OAM_EVENT); + clib_memcpy (mp->dst_address, &t->dst_address, + sizeof (mp->dst_address)); + mp->state = t->state; + vl_msg_api_send_shmem (q, (u8 *)&mp); + } + })); + /* *INDENT-ON* */ +} + +static void +vl_api_oam_add_del_t_handler (vl_api_oam_add_del_t * mp) +{ + vl_api_oam_add_del_reply_t *rmp; + int rv; + + rv = vpe_oam_add_del_target ((ip4_address_t *) mp->src_address, + (ip4_address_t *) mp->dst_address, + ntohl (mp->vrf_id), (int) (mp->is_add)); + + REPLY_MACRO (VL_API_OAM_ADD_DEL_REPLY); +} + +static void +vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp) +{ + stats_main_t *sm = &stats_main; + vnet_interface_main_t *im = sm->interface_main; + vl_api_vnet_summary_stats_reply_t *rmp; + vlib_combined_counter_main_t *cm; + vlib_counter_t v; + int i, which; + u64 total_pkts[VLIB_N_RX_TX]; + u64 total_bytes[VLIB_N_RX_TX]; + + unix_shared_memory_queue_t *q = + vl_api_client_index_to_input_queue (mp->client_index); + + if (!q) + return; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_VNET_SUMMARY_STATS_REPLY); + rmp->context = mp->context; + rmp->retval = 0; + + memset (total_pkts, 0, sizeof (total_pkts)); + memset (total_bytes, 0, sizeof (total_bytes)); + + vnet_interface_counter_lock (im); + + vec_foreach (cm, im->combined_sw_if_counters) + { + which = cm - im->combined_sw_if_counters; + + for (i = 0; i < vec_len (cm->maxi); i++) + { + vlib_get_combined_counter (cm, i, &v); + total_pkts[which] += v.packets; + total_bytes[which] += v.bytes; + } + } + vnet_interface_counter_unlock (im); + + rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]); + rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]); + rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]); + rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]); + rmp->vector_rate = + clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main)); + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +/* *INDENT-OFF* */ +typedef CLIB_PACKED (struct { + ip4_address_t address; + u32 address_length: 6; + u32 index:26; +}) ip4_route_t; +/* *INDENT-ON* */ + +static int +ip4_reset_fib_t_handler (vl_api_reset_fib_t * mp) +{ + vnet_main_t *vnm = vnet_get_main (); + vnet_interface_main_t *im = &vnm->interface_main; + ip4_main_t *im4 = &ip4_main; + static u32 *sw_if_indices_to_shut; + stats_main_t *sm = &stats_main; + fib_table_t *fib_table; + ip4_fib_t *fib; + u32 sw_if_index; + int i; + int rv = VNET_API_ERROR_NO_SUCH_FIB; + u32 target_fib_id = ntohl (mp->vrf_id); + + dslock (sm, 1 /* release hint */ , 8 /* tag */ ); + + /* *INDENT-OFF* */ + pool_foreach (fib_table, im4->fibs, + ({ + fib = &fib_table->v4; + vnet_sw_interface_t * si; + + if (fib->table_id != target_fib_id) + continue; + + /* remove any mpls encap/decap labels */ + mpls_fib_reset_labels (fib->table_id); + + /* remove any proxy arps in this fib */ + vnet_proxy_arp_fib_reset (fib->table_id); + + /* Set the flow hash for this fib to the default */ + vnet_set_ip4_flow_hash (fib->table_id, IP_FLOW_HASH_DEFAULT); + + vec_reset_length (sw_if_indices_to_shut); + + /* Shut down interfaces in this FIB / clean out intfc routes */ + pool_foreach (si, im->sw_interfaces, + ({ + u32 sw_if_index = si->sw_if_index; + + if (sw_if_index < vec_len (im4->fib_index_by_sw_if_index) + && (im4->fib_index_by_sw_if_index[si->sw_if_index] == + fib->index)) + vec_add1 (sw_if_indices_to_shut, si->sw_if_index); + })); + + for (i = 0; i < vec_len (sw_if_indices_to_shut); i++) { + sw_if_index = sw_if_indices_to_shut[i]; + // vec_foreach (sw_if_index, sw_if_indices_to_shut) { + + u32 flags = vnet_sw_interface_get_flags (vnm, sw_if_index); + flags &= ~(VNET_SW_INTERFACE_FLAG_ADMIN_UP); + vnet_sw_interface_set_flags (vnm, sw_if_index, flags); + } + + fib_table_flush(fib->index, FIB_PROTOCOL_IP4, FIB_SOURCE_API); + fib_table_flush(fib->index, FIB_PROTOCOL_IP4, FIB_SOURCE_INTERFACE); + + rv = 0; + break; + })); /* pool_foreach (fib) */ + /* *INDENT-ON* */ + + dsunlock (sm); + return rv; +} + +static int +ip6_reset_fib_t_handler (vl_api_reset_fib_t * mp) +{ + vnet_main_t *vnm = vnet_get_main (); + vnet_interface_main_t *im = &vnm->interface_main; + ip6_main_t *im6 = &ip6_main; + stats_main_t *sm = &stats_main; + static u32 *sw_if_indices_to_shut; + fib_table_t *fib_table; + ip6_fib_t *fib; + u32 sw_if_index; + int i; + int rv = VNET_API_ERROR_NO_SUCH_FIB; + u32 target_fib_id = ntohl (mp->vrf_id); + + dslock (sm, 1 /* release hint */ , 9 /* tag */ ); + + /* *INDENT-OFF* */ + pool_foreach (fib_table, im6->fibs, + ({ + vnet_sw_interface_t * si; + fib = &(fib_table->v6); + + if (fib->table_id != target_fib_id) + continue; + + vec_reset_length (sw_if_indices_to_shut); + + /* Shut down interfaces in this FIB / clean out intfc routes */ + pool_foreach (si, im->sw_interfaces, + ({ + if (im6->fib_index_by_sw_if_index[si->sw_if_index] == + fib->index) + vec_add1 (sw_if_indices_to_shut, si->sw_if_index); + })); + + for (i = 0; i < vec_len (sw_if_indices_to_shut); i++) { + sw_if_index = sw_if_indices_to_shut[i]; + // vec_foreach (sw_if_index, sw_if_indices_to_shut) { + + u32 flags = vnet_sw_interface_get_flags (vnm, sw_if_index); + flags &= ~(VNET_SW_INTERFACE_FLAG_ADMIN_UP); + vnet_sw_interface_set_flags (vnm, sw_if_index, flags); + } + + fib_table_flush(fib->index, FIB_PROTOCOL_IP6, FIB_SOURCE_API); + fib_table_flush(fib->index, FIB_PROTOCOL_IP6, FIB_SOURCE_INTERFACE); + + rv = 0; + break; + })); /* pool_foreach (fib) */ + /* *INDENT-ON* */ + + dsunlock (sm); + return rv; +} + +static void +vl_api_reset_fib_t_handler (vl_api_reset_fib_t * mp) +{ + int rv; + vl_api_reset_fib_reply_t *rmp; + + if (mp->is_ipv6) + rv = ip6_reset_fib_t_handler (mp); + else + rv = ip4_reset_fib_t_handler (mp); + + REPLY_MACRO (VL_API_RESET_FIB_REPLY); +} + + +static void +dhcpv4_proxy_config (vl_api_dhcp_proxy_config_t * mp) +{ + vl_api_dhcp_proxy_config_reply_t *rmp; + int rv; + + rv = dhcp_proxy_set_server ((ip4_address_t *) (&mp->dhcp_server), + (ip4_address_t *) (&mp->dhcp_src_address), + (u32) ntohl (mp->vrf_id), + (int) mp->insert_circuit_id, + (int) (mp->is_add == 0)); + + REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_REPLY); +} + + +static void +dhcpv6_proxy_config (vl_api_dhcp_proxy_config_t * mp) +{ + vl_api_dhcp_proxy_config_reply_t *rmp; + int rv = -1; + + rv = dhcpv6_proxy_set_server ((ip6_address_t *) (&mp->dhcp_server), + (ip6_address_t *) (&mp->dhcp_src_address), + (u32) ntohl (mp->vrf_id), + (int) mp->insert_circuit_id, + (int) (mp->is_add == 0)); + + REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_REPLY); +} + +static void +dhcpv4_proxy_config_2 (vl_api_dhcp_proxy_config_2_t * mp) +{ + vl_api_dhcp_proxy_config_reply_t *rmp; + int rv; + + rv = dhcp_proxy_set_server_2 ((ip4_address_t *) (&mp->dhcp_server), + (ip4_address_t *) (&mp->dhcp_src_address), + (u32) ntohl (mp->rx_vrf_id), + (u32) ntohl (mp->server_vrf_id), + (int) mp->insert_circuit_id, + (int) (mp->is_add == 0)); + + REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_2_REPLY); +} + + +static void +dhcpv6_proxy_config_2 (vl_api_dhcp_proxy_config_2_t * mp) +{ + vl_api_dhcp_proxy_config_reply_t *rmp; + int rv = -1; + + rv = dhcpv6_proxy_set_server_2 ((ip6_address_t *) (&mp->dhcp_server), + (ip6_address_t *) (&mp->dhcp_src_address), + (u32) ntohl (mp->rx_vrf_id), + (u32) ntohl (mp->server_vrf_id), + (int) mp->insert_circuit_id, + (int) (mp->is_add == 0)); + + REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_2_REPLY); +} + + +static void +vl_api_dhcp_proxy_set_vss_t_handler (vl_api_dhcp_proxy_set_vss_t * mp) +{ + vl_api_dhcp_proxy_set_vss_reply_t *rmp; + int rv; + if (!mp->is_ipv6) + rv = dhcp_proxy_set_option82_vss (ntohl (mp->tbl_id), + ntohl (mp->oui), + ntohl (mp->fib_id), + (int) mp->is_add == 0); + else + rv = dhcpv6_proxy_set_vss (ntohl (mp->tbl_id), + ntohl (mp->oui), + ntohl (mp->fib_id), (int) mp->is_add == 0); + + REPLY_MACRO (VL_API_DHCP_PROXY_SET_VSS_REPLY); +} + + +static void vl_api_dhcp_proxy_config_t_handler + (vl_api_dhcp_proxy_config_t * mp) +{ + if (mp->is_ipv6 == 0) + dhcpv4_proxy_config (mp); + else + dhcpv6_proxy_config (mp); +} + +static void vl_api_dhcp_proxy_config_2_t_handler + (vl_api_dhcp_proxy_config_2_t * mp) +{ + if (mp->is_ipv6 == 0) + dhcpv4_proxy_config_2 (mp); + else + dhcpv6_proxy_config_2 (mp); +} + +void +dhcp_compl_event_callback (u32 client_index, u32 pid, u8 * hostname, + u8 is_ipv6, u8 * host_address, u8 * router_address, + u8 * host_mac) +{ + unix_shared_memory_queue_t *q; + vl_api_dhcp_compl_event_t *mp; + + q = vl_api_client_index_to_input_queue (client_index); + if (!q) + return; + + mp = vl_msg_api_alloc (sizeof (*mp)); + mp->client_index = client_index; + mp->pid = pid; + mp->is_ipv6 = is_ipv6; + clib_memcpy (&mp->hostname, hostname, vec_len (hostname)); + mp->hostname[vec_len (hostname) + 1] = '\n'; + clib_memcpy (&mp->host_address[0], host_address, 16); + clib_memcpy (&mp->router_address[0], router_address, 16); + + if (NULL != host_mac) + clib_memcpy (&mp->host_mac[0], host_mac, 6); + + mp->_vl_msg_id = ntohs (VL_API_DHCP_COMPL_EVENT); + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void vl_api_dhcp_client_config_t_handler + (vl_api_dhcp_client_config_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_dhcp_client_config_reply_t *rmp; + int rv = 0; + + VALIDATE_SW_IF_INDEX (mp); + + rv = dhcp_client_config (vm, ntohl (mp->sw_if_index), + mp->hostname, mp->is_add, mp->client_index, + mp->want_dhcp_event ? dhcp_compl_event_callback : + NULL, mp->pid); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_DHCP_CLIENT_CONFIG_REPLY); +} + +static void +vl_api_create_loopback_t_handler (vl_api_create_loopback_t * mp) +{ + vl_api_create_loopback_reply_t *rmp; + u32 sw_if_index; + int rv; + + rv = vnet_create_loopback_interface (&sw_if_index, mp->mac_address); + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_CREATE_LOOPBACK_REPLY, + ({ + rmp->sw_if_index = ntohl (sw_if_index); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_delete_loopback_t_handler (vl_api_delete_loopback_t * mp) +{ + vl_api_delete_loopback_reply_t *rmp; + u32 sw_if_index; + int rv; + + sw_if_index = ntohl (mp->sw_if_index); + rv = vnet_delete_loopback_interface (sw_if_index); + + REPLY_MACRO (VL_API_DELETE_LOOPBACK_REPLY); +} + +static void +vl_api_control_ping_t_handler (vl_api_control_ping_t * mp) +{ + vl_api_control_ping_reply_t *rmp; + int rv = 0; + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_CONTROL_PING_REPLY, + ({ + rmp->vpe_pid = ntohl (getpid()); + })); + /* *INDENT-ON* */ +} + +static void +shmem_cli_output (uword arg, u8 * buffer, uword buffer_bytes) +{ + u8 **shmem_vecp = (u8 **) arg; + u8 *shmem_vec; + void *oldheap; + api_main_t *am = &api_main; + u32 offset; + + shmem_vec = *shmem_vecp; + + offset = vec_len (shmem_vec); + + pthread_mutex_lock (&am->vlib_rp->mutex); + oldheap = svm_push_data_heap (am->vlib_rp); + + vec_validate (shmem_vec, offset + buffer_bytes - 1); + + clib_memcpy (shmem_vec + offset, buffer, buffer_bytes); + + svm_pop_heap (oldheap); + pthread_mutex_unlock (&am->vlib_rp->mutex); + + *shmem_vecp = shmem_vec; +} + + +static void +vl_api_cli_request_t_handler (vl_api_cli_request_t * mp) +{ + vl_api_cli_reply_t *rp; + unix_shared_memory_queue_t *q; + vlib_main_t *vm = vlib_get_main (); + api_main_t *am = &api_main; + unformat_input_t input; + u8 *shmem_vec = 0; + void *oldheap; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + return; + + rp = vl_msg_api_alloc (sizeof (*rp)); + rp->_vl_msg_id = ntohs (VL_API_CLI_REPLY); + rp->context = mp->context; + + unformat_init_vector (&input, (u8 *) (uword) mp->cmd_in_shmem); + + vlib_cli_input (vm, &input, shmem_cli_output, (uword) & shmem_vec); + + pthread_mutex_lock (&am->vlib_rp->mutex); + oldheap = svm_push_data_heap (am->vlib_rp); + + vec_add1 (shmem_vec, 0); + + svm_pop_heap (oldheap); + pthread_mutex_unlock (&am->vlib_rp->mutex); + + rp->reply_in_shmem = (uword) shmem_vec; + + vl_msg_api_send_shmem (q, (u8 *) & rp); +} + +static void +inband_cli_output (uword arg, u8 * buffer, uword buffer_bytes) +{ + u8 **mem_vecp = (u8 **) arg; + u8 *mem_vec = *mem_vecp; + u32 offset = vec_len (mem_vec); + + vec_validate (mem_vec, offset + buffer_bytes - 1); + clib_memcpy (mem_vec + offset, buffer, buffer_bytes); + *mem_vecp = mem_vec; +} + +static void +vl_api_cli_inband_t_handler (vl_api_cli_inband_t * mp) +{ + vl_api_cli_inband_reply_t *rmp; + int rv = 0; + unix_shared_memory_queue_t *q; + vlib_main_t *vm = vlib_get_main (); + unformat_input_t input; + u8 *out_vec = 0; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + return; + + unformat_init_string (&input, (char *) mp->cmd, ntohl (mp->length)); + vlib_cli_input (vm, &input, inband_cli_output, (uword) & out_vec); + + u32 len = vec_len (out_vec); + /* *INDENT-OFF* */ + REPLY_MACRO3(VL_API_CLI_INBAND_REPLY, len, + ({ + rmp->length = htonl (len); + clib_memcpy (rmp->reply, out_vec, len); + })); + /* *INDENT-ON* */ + vec_free (out_vec); +} + +static void +vl_api_set_arp_neighbor_limit_t_handler (vl_api_set_arp_neighbor_limit_t * mp) +{ + int rv; + vl_api_set_arp_neighbor_limit_reply_t *rmp; + vnet_main_t *vnm = vnet_get_main (); + clib_error_t *error; + + vnm->api_errno = 0; + + if (mp->is_ipv6) + error = ip6_set_neighbor_limit (ntohl (mp->arp_neighbor_limit)); + else + error = ip4_set_arp_limit (ntohl (mp->arp_neighbor_limit)); + + if (error) + { + clib_error_report (error); + rv = VNET_API_ERROR_UNSPECIFIED; + } + else + { + rv = vnm->api_errno; + } + + REPLY_MACRO (VL_API_SET_ARP_NEIGHBOR_LIMIT_REPLY); +} + +static void vl_api_sr_tunnel_add_del_t_handler + (vl_api_sr_tunnel_add_del_t * mp) +{ +#if IP6SR == 0 + clib_warning ("unimplemented"); +#else + ip6_sr_add_del_tunnel_args_t _a, *a = &_a; + int rv = 0; + vl_api_sr_tunnel_add_del_reply_t *rmp; + ip6_address_t *segments = 0, *seg; + ip6_address_t *tags = 0, *tag; + ip6_address_t *this_address; + int i; + + if (mp->n_segments == 0) + { + rv = -11; + goto out; + } + + memset (a, 0, sizeof (*a)); + a->src_address = (ip6_address_t *) & mp->src_address; + a->dst_address = (ip6_address_t *) & mp->dst_address; + a->dst_mask_width = mp->dst_mask_width; + a->flags_net_byte_order = mp->flags_net_byte_order; + a->is_del = (mp->is_add == 0); + a->rx_table_id = ntohl (mp->outer_vrf_id); + a->tx_table_id = ntohl (mp->inner_vrf_id); + + a->name = format (0, "%s", mp->name); + if (!(vec_len (a->name))) + a->name = 0; + + a->policy_name = format (0, "%s", mp->policy_name); + if (!(vec_len (a->policy_name))) + a->policy_name = 0; + + /* Yank segments and tags out of the API message */ + this_address = (ip6_address_t *) mp->segs_and_tags; + for (i = 0; i < mp->n_segments; i++) + { + vec_add2 (segments, seg, 1); + clib_memcpy (seg->as_u8, this_address->as_u8, sizeof (*this_address)); + this_address++; + } + for (i = 0; i < mp->n_tags; i++) + { + vec_add2 (tags, tag, 1); + clib_memcpy (tag->as_u8, this_address->as_u8, sizeof (*this_address)); + this_address++; + } + + a->segments = segments; + a->tags = tags; + + rv = ip6_sr_add_del_tunnel (a); + +out: + + REPLY_MACRO (VL_API_SR_TUNNEL_ADD_DEL_REPLY); +#endif +} + +static void vl_api_sr_policy_add_del_t_handler + (vl_api_sr_policy_add_del_t * mp) +{ +#if IP6SR == 0 + clib_warning ("unimplemented"); +#else + ip6_sr_add_del_policy_args_t _a, *a = &_a; + int rv = 0; + vl_api_sr_policy_add_del_reply_t *rmp; + int i; + + memset (a, 0, sizeof (*a)); + a->is_del = (mp->is_add == 0); + + a->name = format (0, "%s", mp->name); + if (!(vec_len (a->name))) + { + rv = VNET_API_ERROR_NO_SUCH_NODE2; + goto out; + } + + if (!(mp->tunnel_names[0])) + { + rv = VNET_API_ERROR_NO_SUCH_NODE2; + goto out; + } + + // start deserializing tunnel_names + int num_tunnels = mp->tunnel_names[0]; //number of tunnels + u8 *deser_tun_names = mp->tunnel_names; + deser_tun_names += 1; //moving along + + u8 *tun_name = 0; + int tun_name_len = 0; + + for (i = 0; i < num_tunnels; i++) + { + tun_name_len = *deser_tun_names; + deser_tun_names += 1; + vec_resize (tun_name, tun_name_len); + memcpy (tun_name, deser_tun_names, tun_name_len); + vec_add1 (a->tunnel_names, tun_name); + deser_tun_names += tun_name_len; + tun_name = 0; + } + + rv = ip6_sr_add_del_policy (a); + +out: + + REPLY_MACRO (VL_API_SR_POLICY_ADD_DEL_REPLY); +#endif +} + +static void vl_api_sr_multicast_map_add_del_t_handler + (vl_api_sr_multicast_map_add_del_t * mp) +{ +#if IP6SR == 0 + clib_warning ("unimplemented"); +#else + ip6_sr_add_del_multicastmap_args_t _a, *a = &_a; + int rv = 0; + vl_api_sr_multicast_map_add_del_reply_t *rmp; + + memset (a, 0, sizeof (*a)); + a->is_del = (mp->is_add == 0); + + a->multicast_address = (ip6_address_t *) & mp->multicast_address; + a->policy_name = format (0, "%s", mp->policy_name); + + if (a->multicast_address == 0) + { + rv = -1; + goto out; + } + + if (!(a->policy_name)) + { + rv = -2; + goto out; + } + +#if DPDK > 0 /* Cannot call replicate without DPDK */ + rv = ip6_sr_add_del_multicastmap (a); +#else + clib_warning ("multicast replication without DPDK not implemented"); + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif /* DPDK */ + +out: + + REPLY_MACRO (VL_API_SR_MULTICAST_MAP_ADD_DEL_REPLY); +#endif +} + +#define foreach_classify_add_del_table_field \ +_(table_index) \ +_(nbuckets) \ +_(memory_size) \ +_(skip_n_vectors) \ +_(match_n_vectors) \ +_(next_table_index) \ +_(miss_next_index) \ +_(current_data_flag) \ +_(current_data_offset) + +static void vl_api_classify_add_del_table_t_handler + (vl_api_classify_add_del_table_t * mp) +{ + vl_api_classify_add_del_table_reply_t *rmp; + vnet_classify_main_t *cm = &vnet_classify_main; + vnet_classify_table_t *t; + int rv; + +#define _(a) u32 a; + foreach_classify_add_del_table_field; +#undef _ + +#define _(a) a = ntohl(mp->a); + foreach_classify_add_del_table_field; +#undef _ + + /* The underlying API fails silently, on purpose, so check here */ + if (mp->is_add == 0) /* delete */ + { + if (pool_is_free_index (cm->tables, table_index)) + { + rv = VNET_API_ERROR_NO_SUCH_TABLE; + goto out; + } + } + else /* add or update */ + { + if (table_index != ~0 && pool_is_free_index (cm->tables, table_index)) + table_index = ~0; + } + + rv = vnet_classify_add_del_table + (cm, mp->mask, nbuckets, memory_size, + skip_n_vectors, match_n_vectors, + next_table_index, miss_next_index, &table_index, + current_data_flag, current_data_offset, mp->is_add, mp->del_chain); + +out: + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_CLASSIFY_ADD_DEL_TABLE_REPLY, + ({ + if (rv == 0 && mp->is_add) + { + t = pool_elt_at_index (cm->tables, table_index); + rmp->skip_n_vectors = ntohl(t->skip_n_vectors); + rmp->match_n_vectors = ntohl(t->match_n_vectors); + rmp->new_table_index = ntohl(table_index); + } + else + { + rmp->skip_n_vectors = ~0; + rmp->match_n_vectors = ~0; + rmp->new_table_index = ~0; + } + })); + /* *INDENT-ON* */ +} + +static void vl_api_classify_add_del_session_t_handler + (vl_api_classify_add_del_session_t * mp) +{ + vnet_classify_main_t *cm = &vnet_classify_main; + vl_api_classify_add_del_session_reply_t *rmp; + int rv; + u32 table_index, hit_next_index, opaque_index, metadata; + i32 advance; + u8 action; + + table_index = ntohl (mp->table_index); + hit_next_index = ntohl (mp->hit_next_index); + opaque_index = ntohl (mp->opaque_index); + advance = ntohl (mp->advance); + action = mp->action; + metadata = ntohl (mp->metadata); + + rv = vnet_classify_add_del_session + (cm, table_index, mp->match, hit_next_index, opaque_index, + advance, action, metadata, mp->is_add); + + REPLY_MACRO (VL_API_CLASSIFY_ADD_DEL_SESSION_REPLY); +} + +static void vl_api_classify_set_interface_ip_table_t_handler + (vl_api_classify_set_interface_ip_table_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_classify_set_interface_ip_table_reply_t *rmp; + int rv; + u32 table_index, sw_if_index; + + table_index = ntohl (mp->table_index); + sw_if_index = ntohl (mp->sw_if_index); + + VALIDATE_SW_IF_INDEX (mp); + + if (mp->is_ipv6) + rv = vnet_set_ip6_classify_intfc (vm, sw_if_index, table_index); + else + rv = vnet_set_ip4_classify_intfc (vm, sw_if_index, table_index); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_CLASSIFY_SET_INTERFACE_IP_TABLE_REPLY); +} + +static void vl_api_classify_set_interface_l2_tables_t_handler + (vl_api_classify_set_interface_l2_tables_t * mp) +{ + vl_api_classify_set_interface_l2_tables_reply_t *rmp; + int rv; + u32 sw_if_index, ip4_table_index, ip6_table_index, other_table_index; + int enable; + + ip4_table_index = ntohl (mp->ip4_table_index); + ip6_table_index = ntohl (mp->ip6_table_index); + other_table_index = ntohl (mp->other_table_index); + sw_if_index = ntohl (mp->sw_if_index); + + VALIDATE_SW_IF_INDEX (mp); + + if (mp->is_input) + rv = vnet_l2_input_classify_set_tables (sw_if_index, ip4_table_index, + ip6_table_index, + other_table_index); + else + rv = vnet_l2_output_classify_set_tables (sw_if_index, ip4_table_index, + ip6_table_index, + other_table_index); + + if (rv == 0) + { + if (ip4_table_index != ~0 || ip6_table_index != ~0 + || other_table_index != ~0) + enable = 1; + else + enable = 0; + + if (mp->is_input) + vnet_l2_input_classify_enable_disable (sw_if_index, enable); + else + vnet_l2_output_classify_enable_disable (sw_if_index, enable); + } + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_CLASSIFY_SET_INTERFACE_L2_TABLES_REPLY); +} + +static void +vl_api_l2_fib_clear_table_t_handler (vl_api_l2_fib_clear_table_t * mp) +{ + int rv = 0; + vl_api_l2_fib_clear_table_reply_t *rmp; + + /* DAW-FIXME: This API should only clear non-static l2fib entries, but + * that is not currently implemented. When that TODO is fixed + * this call should be changed to pass 1 instead of 0. + */ + l2fib_clear_table (0); + + REPLY_MACRO (VL_API_L2_FIB_CLEAR_TABLE_REPLY); +} + +extern void l2_efp_filter_configure (vnet_main_t * vnet_main, + u32 sw_if_index, u32 enable); + +static void +vl_api_l2_interface_efp_filter_t_handler (vl_api_l2_interface_efp_filter_t * + mp) +{ + int rv; + vl_api_l2_interface_efp_filter_reply_t *rmp; + vnet_main_t *vnm = vnet_get_main (); + + // enable/disable the feature + l2_efp_filter_configure (vnm, mp->sw_if_index, mp->enable_disable); + rv = vnm->api_errno; + + REPLY_MACRO (VL_API_L2_INTERFACE_EFP_FILTER_REPLY); +} + +static void + vl_api_l2_interface_vlan_tag_rewrite_t_handler + (vl_api_l2_interface_vlan_tag_rewrite_t * mp) +{ + int rv = 0; + vl_api_l2_interface_vlan_tag_rewrite_reply_t *rmp; + vnet_main_t *vnm = vnet_get_main (); + vlib_main_t *vm = vlib_get_main (); + u32 vtr_op; + + VALIDATE_SW_IF_INDEX (mp); + + vtr_op = ntohl (mp->vtr_op); + + /* The L2 code is unsuspicious */ + switch (vtr_op) + { + case L2_VTR_DISABLED: + case L2_VTR_PUSH_1: + case L2_VTR_PUSH_2: + case L2_VTR_POP_1: + case L2_VTR_POP_2: + case L2_VTR_TRANSLATE_1_1: + case L2_VTR_TRANSLATE_1_2: + case L2_VTR_TRANSLATE_2_1: + case L2_VTR_TRANSLATE_2_2: + break; + + default: + rv = VNET_API_ERROR_INVALID_VALUE; + goto bad_sw_if_index; + } + + rv = l2vtr_configure (vm, vnm, ntohl (mp->sw_if_index), vtr_op, + ntohl (mp->push_dot1q), ntohl (mp->tag1), + ntohl (mp->tag2)); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_L2_INTERFACE_VLAN_TAG_REWRITE_REPLY); +} + +static void +vl_api_l2_fib_table_entry_t_handler (vl_api_l2_fib_table_entry_t * mp) +{ + clib_warning ("BUG"); +} + +static void +send_l2fib_table_entry (vpe_api_main_t * am, + unix_shared_memory_queue_t * q, + l2fib_entry_key_t * l2fe_key, + l2fib_entry_result_t * l2fe_res, u32 context) +{ + vl_api_l2_fib_table_entry_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_L2_FIB_TABLE_ENTRY); + + mp->bd_id = + ntohl (l2input_main.bd_configs[l2fe_key->fields.bd_index].bd_id); + + mp->mac = l2fib_make_key (l2fe_key->fields.mac, 0); + mp->sw_if_index = ntohl (l2fe_res->fields.sw_if_index); + mp->static_mac = l2fe_res->fields.static_mac; + mp->filter_mac = l2fe_res->fields.filter; + mp->bvi_mac = l2fe_res->fields.bvi; + mp->context = context; + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +vl_api_l2_fib_table_dump_t_handler (vl_api_l2_fib_table_dump_t * mp) +{ + vpe_api_main_t *am = &vpe_api_main; + bd_main_t *bdm = &bd_main; + l2fib_entry_key_t *l2fe_key = NULL; + l2fib_entry_result_t *l2fe_res = NULL; + u32 ni, bd_id = ntohl (mp->bd_id); + u32 bd_index; + unix_shared_memory_queue_t *q; + uword *p; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + /* see l2fib_table_dump: ~0 means "any" */ + if (bd_id == ~0) + bd_index = ~0; + else + { + p = hash_get (bdm->bd_index_by_bd_id, bd_id); + if (p == 0) + return; + + bd_index = p[0]; + } + + l2fib_table_dump (bd_index, &l2fe_key, &l2fe_res); + + vec_foreach_index (ni, l2fe_key) + { + send_l2fib_table_entry (am, q, vec_elt_at_index (l2fe_key, ni), + vec_elt_at_index (l2fe_res, ni), mp->context); + } + vec_free (l2fe_key); + vec_free (l2fe_res); +} + +static void +vl_api_show_version_t_handler (vl_api_show_version_t * mp) +{ + vl_api_show_version_reply_t *rmp; + int rv = 0; + char *vpe_api_get_build_directory (void); + char *vpe_api_get_version (void); + char *vpe_api_get_build_date (void); + + unix_shared_memory_queue_t *q = + vl_api_client_index_to_input_queue (mp->client_index); + + if (!q) + return; + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_SHOW_VERSION_REPLY, + ({ + strncpy ((char *) rmp->program, "vpe", ARRAY_LEN(rmp->program)-1); + strncpy ((char *) rmp->build_directory, vpe_api_get_build_directory(), + ARRAY_LEN(rmp->build_directory)-1); + strncpy ((char *) rmp->version, vpe_api_get_version(), + ARRAY_LEN(rmp->version)-1); + strncpy ((char *) rmp->build_date, vpe_api_get_build_date(), + ARRAY_LEN(rmp->build_date)-1); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_get_node_index_t_handler (vl_api_get_node_index_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_get_node_index_reply_t *rmp; + vlib_node_t *n; + int rv = 0; + u32 node_index = ~0; + + n = vlib_get_node_by_name (vm, mp->node_name); + + if (n == 0) + rv = VNET_API_ERROR_NO_SUCH_NODE; + else + node_index = n->index; + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_GET_NODE_INDEX_REPLY, + ({ + rmp->node_index = ntohl(node_index); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_get_next_index_t_handler (vl_api_get_next_index_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_get_next_index_reply_t *rmp; + vlib_node_t *node, *next_node; + int rv = 0; + u32 next_node_index = ~0, next_index = ~0; + uword *p; + + node = vlib_get_node_by_name (vm, mp->node_name); + + if (node == 0) + { + rv = VNET_API_ERROR_NO_SUCH_NODE; + goto out; + } + + next_node = vlib_get_node_by_name (vm, mp->next_name); + + if (next_node == 0) + { + rv = VNET_API_ERROR_NO_SUCH_NODE2; + goto out; + } + else + next_node_index = next_node->index; + + p = hash_get (node->next_slot_by_node, next_node_index); + + if (p == 0) + { + rv = VNET_API_ERROR_NO_SUCH_ENTRY; + goto out; + } + else + next_index = p[0]; + +out: + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_GET_NEXT_INDEX_REPLY, + ({ + rmp->next_index = ntohl(next_index); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_add_node_next_t_handler (vl_api_add_node_next_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_add_node_next_reply_t *rmp; + vlib_node_t *n, *next; + int rv = 0; + u32 next_index = ~0; + + n = vlib_get_node_by_name (vm, mp->node_name); + + if (n == 0) + { + rv = VNET_API_ERROR_NO_SUCH_NODE; + goto out; + } + + next = vlib_get_node_by_name (vm, mp->next_name); + + if (next == 0) + rv = VNET_API_ERROR_NO_SUCH_NODE2; + else + next_index = vlib_node_add_next (vm, n->index, next->index); + +out: + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_GET_NODE_INDEX_REPLY, + ({ + rmp->next_index = ntohl(next_index); + })); + /* *INDENT-ON* */ +} + +static void vl_api_vxlan_add_del_tunnel_t_handler + (vl_api_vxlan_add_del_tunnel_t * mp) +{ + vl_api_vxlan_add_del_tunnel_reply_t *rmp; + int rv = 0; + vnet_vxlan_add_del_tunnel_args_t _a, *a = &_a; + u32 encap_fib_index; + uword *p; + ip4_main_t *im = &ip4_main; + vnet_main_t *vnm = vnet_get_main (); + u32 sw_if_index = ~0; + + p = hash_get (im->fib_index_by_table_id, ntohl (mp->encap_vrf_id)); + if (!p) + { + rv = VNET_API_ERROR_NO_SUCH_FIB; + goto out; + } + encap_fib_index = p[0]; + memset (a, 0, sizeof (*a)); + + a->is_add = mp->is_add; + a->is_ip6 = mp->is_ipv6; + + /* ip addresses sent in network byte order */ + ip46_from_addr_buf (mp->is_ipv6, mp->dst_address, &a->dst); + ip46_from_addr_buf (mp->is_ipv6, mp->src_address, &a->src); + + /* Check src & dst are different */ + if (ip46_address_cmp (&a->dst, &a->src) == 0) + { + rv = VNET_API_ERROR_SAME_SRC_DST; + goto out; + } + a->mcast_sw_if_index = ntohl (mp->mcast_sw_if_index); + if (ip46_address_is_multicast (&a->dst) && + pool_is_free_index (vnm->interface_main.sw_interfaces, + a->mcast_sw_if_index)) + { + rv = VNET_API_ERROR_INVALID_SW_IF_INDEX; + goto out; + } + a->encap_fib_index = encap_fib_index; + a->decap_next_index = ntohl (mp->decap_next_index); + a->vni = ntohl (mp->vni); + rv = vnet_vxlan_add_del_tunnel (a, &sw_if_index); + +out: + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_VXLAN_ADD_DEL_TUNNEL_REPLY, + ({ + rmp->sw_if_index = ntohl (sw_if_index); + })); + /* *INDENT-ON* */ +} + +static void send_vxlan_tunnel_details + (vxlan_tunnel_t * t, unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_vxlan_tunnel_details_t *rmp; + ip4_main_t *im4 = &ip4_main; + ip6_main_t *im6 = &ip6_main; + u8 is_ipv6 = !ip46_address_is_ip4 (&t->dst); + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_VXLAN_TUNNEL_DETAILS); + if (is_ipv6) + { + memcpy (rmp->src_address, t->src.ip6.as_u8, 16); + memcpy (rmp->dst_address, t->dst.ip6.as_u8, 16); + rmp->encap_vrf_id = htonl (im6->fibs[t->encap_fib_index].ft_table_id); + } + else + { + memcpy (rmp->src_address, t->src.ip4.as_u8, 4); + memcpy (rmp->dst_address, t->dst.ip4.as_u8, 4); + rmp->encap_vrf_id = htonl (im4->fibs[t->encap_fib_index].ft_table_id); + } + rmp->mcast_sw_if_index = htonl (t->mcast_sw_if_index); + rmp->vni = htonl (t->vni); + rmp->decap_next_index = htonl (t->decap_next_index); + rmp->sw_if_index = htonl (t->sw_if_index); + rmp->is_ipv6 = is_ipv6; + rmp->context = context; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void vl_api_vxlan_tunnel_dump_t_handler + (vl_api_vxlan_tunnel_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + vxlan_main_t *vxm = &vxlan_main; + vxlan_tunnel_t *t; + u32 sw_if_index; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + sw_if_index = ntohl (mp->sw_if_index); + + if (~0 == sw_if_index) + { + /* *INDENT-OFF* */ + pool_foreach (t, vxm->tunnels, + ({ + send_vxlan_tunnel_details(t, q, mp->context); + })); + /* *INDENT-ON* */ + } + else + { + if ((sw_if_index >= vec_len (vxm->tunnel_index_by_sw_if_index)) || + (~0 == vxm->tunnel_index_by_sw_if_index[sw_if_index])) + { + return; + } + t = &vxm->tunnels[vxm->tunnel_index_by_sw_if_index[sw_if_index]]; + send_vxlan_tunnel_details (t, q, mp->context); + } +} + +static void +vl_api_l2_patch_add_del_t_handler (vl_api_l2_patch_add_del_t * mp) +{ + extern int vnet_l2_patch_add_del (u32 rx_sw_if_index, u32 tx_sw_if_index, + int is_add); + vl_api_l2_patch_add_del_reply_t *rmp; + int vnet_l2_patch_add_del (u32 rx_sw_if_index, u32 tx_sw_if_index, + int is_add); + int rv = 0; + + VALIDATE_RX_SW_IF_INDEX (mp); + VALIDATE_TX_SW_IF_INDEX (mp); + + rv = vnet_l2_patch_add_del (ntohl (mp->rx_sw_if_index), + ntohl (mp->tx_sw_if_index), + (int) (mp->is_add != 0)); + + BAD_RX_SW_IF_INDEX_LABEL; + BAD_TX_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_L2_PATCH_ADD_DEL_REPLY); +} + +static void + vl_api_vxlan_gpe_add_del_tunnel_t_handler + (vl_api_vxlan_gpe_add_del_tunnel_t * mp) +{ + vl_api_vxlan_gpe_add_del_tunnel_reply_t *rmp; + int rv = 0; + vnet_vxlan_gpe_add_del_tunnel_args_t _a, *a = &_a; + u32 encap_fib_index, decap_fib_index; + u8 protocol; + uword *p; + ip4_main_t *im = &ip4_main; + u32 sw_if_index = ~0; + + + p = hash_get (im->fib_index_by_table_id, ntohl (mp->encap_vrf_id)); + if (!p) + { + rv = VNET_API_ERROR_NO_SUCH_FIB; + goto out; + } + encap_fib_index = p[0]; + + protocol = mp->protocol; + + /* Interpret decap_vrf_id as an opaque if sending to other-than-ip4-input */ + if (protocol == VXLAN_GPE_INPUT_NEXT_IP4_INPUT) + { + p = hash_get (im->fib_index_by_table_id, ntohl (mp->decap_vrf_id)); + if (!p) + { + rv = VNET_API_ERROR_NO_SUCH_INNER_FIB; + goto out; + } + decap_fib_index = p[0]; + } + else + { + decap_fib_index = ntohl (mp->decap_vrf_id); + } + + /* Check src & dst are different */ + if ((mp->is_ipv6 && memcmp (mp->local, mp->remote, 16) == 0) || + (!mp->is_ipv6 && memcmp (mp->local, mp->remote, 4) == 0)) + { + rv = VNET_API_ERROR_SAME_SRC_DST; + goto out; + } + memset (a, 0, sizeof (*a)); + + a->is_add = mp->is_add; + a->is_ip6 = mp->is_ipv6; + /* ip addresses sent in network byte order */ + if (a->is_ip6) + { + clib_memcpy (&(a->local.ip6), mp->local, 16); + clib_memcpy (&(a->remote.ip6), mp->remote, 16); + } + else + { + clib_memcpy (&(a->local.ip4), mp->local, 4); + clib_memcpy (&(a->remote.ip4), mp->remote, 4); + } + a->encap_fib_index = encap_fib_index; + a->decap_fib_index = decap_fib_index; + a->protocol = protocol; + a->vni = ntohl (mp->vni); + rv = vnet_vxlan_gpe_add_del_tunnel (a, &sw_if_index); + +out: + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_VXLAN_GPE_ADD_DEL_TUNNEL_REPLY, + ({ + rmp->sw_if_index = ntohl (sw_if_index); + })); + /* *INDENT-ON* */ +} + +static void send_vxlan_gpe_tunnel_details + (vxlan_gpe_tunnel_t * t, unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_vxlan_gpe_tunnel_details_t *rmp; + ip4_main_t *im4 = &ip4_main; + ip6_main_t *im6 = &ip6_main; + u8 is_ipv6 = !(t->flags & VXLAN_GPE_TUNNEL_IS_IPV4); + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_VXLAN_GPE_TUNNEL_DETAILS); + if (is_ipv6) + { + memcpy (rmp->local, &(t->local.ip6), 16); + memcpy (rmp->remote, &(t->remote.ip6), 16); + rmp->encap_vrf_id = htonl (im6->fibs[t->encap_fib_index].ft_table_id); + rmp->decap_vrf_id = htonl (im6->fibs[t->decap_fib_index].ft_table_id); + } + else + { + memcpy (rmp->local, &(t->local.ip4), 4); + memcpy (rmp->remote, &(t->remote.ip4), 4); + rmp->encap_vrf_id = htonl (im4->fibs[t->encap_fib_index].ft_table_id); + rmp->decap_vrf_id = htonl (im4->fibs[t->decap_fib_index].ft_table_id); + } + rmp->vni = htonl (t->vni); + rmp->protocol = t->protocol; + rmp->sw_if_index = htonl (t->sw_if_index); + rmp->is_ipv6 = is_ipv6; + rmp->context = context; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void vl_api_vxlan_gpe_tunnel_dump_t_handler + (vl_api_vxlan_gpe_tunnel_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + vxlan_gpe_main_t *vgm = &vxlan_gpe_main; + vxlan_gpe_tunnel_t *t; + u32 sw_if_index; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + sw_if_index = ntohl (mp->sw_if_index); + + if (~0 == sw_if_index) + { + /* *INDENT-OFF* */ + pool_foreach (t, vgm->tunnels, + ({ + send_vxlan_gpe_tunnel_details(t, q, mp->context); + })); + /* *INDENT-ON* */ + } + else + { + if ((sw_if_index >= vec_len (vgm->tunnel_index_by_sw_if_index)) || + (~0 == vgm->tunnel_index_by_sw_if_index[sw_if_index])) + { + return; + } + t = &vgm->tunnels[vgm->tunnel_index_by_sw_if_index[sw_if_index]]; + send_vxlan_gpe_tunnel_details (t, q, mp->context); + } +} + +static void +vl_api_interface_name_renumber_t_handler (vl_api_interface_name_renumber_t * + mp) +{ + vl_api_interface_name_renumber_reply_t *rmp; + int rv = 0; + + VALIDATE_SW_IF_INDEX (mp); + + rv = vnet_interface_name_renumber + (ntohl (mp->sw_if_index), ntohl (mp->new_show_dev_instance)); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_INTERFACE_NAME_RENUMBER_REPLY); +} + +static int +arp_change_data_callback (u32 pool_index, u8 * new_mac, + u32 sw_if_index, u32 address) +{ + vpe_api_main_t *am = &vpe_api_main; + vlib_main_t *vm = am->vlib_main; + vl_api_ip4_arp_event_t *event; + static f64 arp_event_last_time; + f64 now = vlib_time_now (vm); + + if (pool_is_free_index (am->arp_events, pool_index)) + return 1; + + event = pool_elt_at_index (am->arp_events, pool_index); + /* *INDENT-OFF* */ + if (memcmp (&event->new_mac, new_mac, sizeof (event->new_mac))) + { + clib_memcpy (event->new_mac, new_mac, sizeof (event->new_mac)); + } + else + { /* same mac */ + if (sw_if_index == event->sw_if_index && + (!event->mac_ip || + /* for BD case, also check IP address with 10 sec timeout */ + (address == event->address && + (now - arp_event_last_time) < 10.0))) + return 1; + } + /* *INDENT-ON* */ + + arp_event_last_time = now; + event->sw_if_index = sw_if_index; + if (event->mac_ip) + event->address = address; + return 0; +} + +static int +nd_change_data_callback (u32 pool_index, u8 * new_mac, + u32 sw_if_index, ip6_address_t * address) +{ + vpe_api_main_t *am = &vpe_api_main; + vlib_main_t *vm = am->vlib_main; + vl_api_ip6_nd_event_t *event; + static f64 nd_event_last_time; + f64 now = vlib_time_now (vm); + + if (pool_is_free_index (am->nd_events, pool_index)) + return 1; + + event = pool_elt_at_index (am->nd_events, pool_index); + + /* *INDENT-OFF* */ + if (memcmp (&event->new_mac, new_mac, sizeof (event->new_mac))) + { + clib_memcpy (event->new_mac, new_mac, sizeof (event->new_mac)); + } + else + { /* same mac */ + if (sw_if_index == event->sw_if_index && + (!event->mac_ip || + /* for BD case, also check IP address with 10 sec timeout */ + (ip6_address_is_equal (address, + (ip6_address_t *) event->address) && + (now - nd_event_last_time) < 10.0))) + return 1; + } + /* *INDENT-ON* */ + + nd_event_last_time = now; + event->sw_if_index = sw_if_index; + if (event->mac_ip) + clib_memcpy (event->address, address, sizeof (event->address)); + return 0; +} + +static int +arp_change_delete_callback (u32 pool_index, u8 * notused) +{ + vpe_api_main_t *am = &vpe_api_main; + + if (pool_is_free_index (am->arp_events, pool_index)) + return 1; + + pool_put_index (am->arp_events, pool_index); + return 0; +} + +static int +nd_change_delete_callback (u32 pool_index, u8 * notused) +{ + vpe_api_main_t *am = &vpe_api_main; + + if (pool_is_free_index (am->nd_events, pool_index)) + return 1; + + pool_put_index (am->nd_events, pool_index); + return 0; +} + +static void +vl_api_want_ip4_arp_events_t_handler (vl_api_want_ip4_arp_events_t * mp) +{ + vpe_api_main_t *am = &vpe_api_main; + vnet_main_t *vnm = vnet_get_main (); + vl_api_want_ip4_arp_events_reply_t *rmp; + vl_api_ip4_arp_event_t *event; + int rv; + + if (mp->enable_disable) + { + pool_get (am->arp_events, event); + memset (event, 0, sizeof (*event)); + + event->_vl_msg_id = ntohs (VL_API_IP4_ARP_EVENT); + event->client_index = mp->client_index; + event->context = mp->context; + event->address = mp->address; + event->pid = mp->pid; + if (mp->address == 0) + event->mac_ip = 1; + + rv = vnet_add_del_ip4_arp_change_event + (vnm, arp_change_data_callback, + mp->pid, &mp->address /* addr, in net byte order */ , + vpe_resolver_process_node.index, + IP4_ARP_EVENT, event - am->arp_events, 1 /* is_add */ ); + } + else + { + rv = vnet_add_del_ip4_arp_change_event + (vnm, arp_change_delete_callback, + mp->pid, &mp->address /* addr, in net byte order */ , + vpe_resolver_process_node.index, + IP4_ARP_EVENT, ~0 /* pool index */ , 0 /* is_add */ ); + } + REPLY_MACRO (VL_API_WANT_IP4_ARP_EVENTS_REPLY); +} + +static void +vl_api_want_ip6_nd_events_t_handler (vl_api_want_ip6_nd_events_t * mp) +{ + vpe_api_main_t *am = &vpe_api_main; + vnet_main_t *vnm = vnet_get_main (); + vl_api_want_ip6_nd_events_reply_t *rmp; + vl_api_ip6_nd_event_t *event; + int rv; + + if (mp->enable_disable) + { + pool_get (am->nd_events, event); + memset (event, 0, sizeof (*event)); + + event->_vl_msg_id = ntohs (VL_API_IP6_ND_EVENT); + event->client_index = mp->client_index; + event->context = mp->context; + clib_memcpy (event->address, mp->address, 16); + event->pid = mp->pid; + if (ip6_address_is_zero ((ip6_address_t *) mp->address)) + event->mac_ip = 1; + + rv = vnet_add_del_ip6_nd_change_event + (vnm, nd_change_data_callback, + mp->pid, mp->address /* addr, in net byte order */ , + vpe_resolver_process_node.index, + IP6_ND_EVENT, event - am->nd_events, 1 /* is_add */ ); + } + else + { + rv = vnet_add_del_ip6_nd_change_event + (vnm, nd_change_delete_callback, + mp->pid, mp->address /* addr, in net byte order */ , + vpe_resolver_process_node.index, + IP6_ND_EVENT, ~0 /* pool index */ , 0 /* is_add */ ); + } + REPLY_MACRO (VL_API_WANT_IP6_ND_EVENTS_REPLY); +} + +static void vl_api_input_acl_set_interface_t_handler + (vl_api_input_acl_set_interface_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_input_acl_set_interface_reply_t *rmp; + int rv; + u32 sw_if_index, ip4_table_index, ip6_table_index, l2_table_index; + + ip4_table_index = ntohl (mp->ip4_table_index); + ip6_table_index = ntohl (mp->ip6_table_index); + l2_table_index = ntohl (mp->l2_table_index); + sw_if_index = ntohl (mp->sw_if_index); + + VALIDATE_SW_IF_INDEX (mp); + + rv = vnet_set_input_acl_intfc (vm, sw_if_index, ip4_table_index, + ip6_table_index, l2_table_index, mp->is_add); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_INPUT_ACL_SET_INTERFACE_REPLY); +} + +static void vl_api_cop_interface_enable_disable_t_handler + (vl_api_cop_interface_enable_disable_t * mp) +{ + vl_api_cop_interface_enable_disable_reply_t *rmp; + int rv; + u32 sw_if_index = ntohl (mp->sw_if_index); + int enable_disable; + + VALIDATE_SW_IF_INDEX (mp); + + enable_disable = (int) mp->enable_disable; + + rv = cop_interface_enable_disable (sw_if_index, enable_disable); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_COP_INTERFACE_ENABLE_DISABLE_REPLY); +} + +static void vl_api_cop_whitelist_enable_disable_t_handler + (vl_api_cop_whitelist_enable_disable_t * mp) +{ + vl_api_cop_whitelist_enable_disable_reply_t *rmp; + cop_whitelist_enable_disable_args_t _a, *a = &_a; + u32 sw_if_index = ntohl (mp->sw_if_index); + int rv; + + VALIDATE_SW_IF_INDEX (mp); + + a->sw_if_index = sw_if_index; + a->ip4 = mp->ip4; + a->ip6 = mp->ip6; + a->default_cop = mp->default_cop; + a->fib_id = ntohl (mp->fib_id); + + rv = cop_whitelist_enable_disable (a); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_COP_WHITELIST_ENABLE_DISABLE_REPLY); +} + +static void +vl_api_get_node_graph_t_handler (vl_api_get_node_graph_t * mp) +{ + int rv = 0; + u8 *vector = 0; + api_main_t *am = &api_main; + vlib_main_t *vm = vlib_get_main (); + void *oldheap; + vl_api_get_node_graph_reply_t *rmp; + + pthread_mutex_lock (&am->vlib_rp->mutex); + oldheap = svm_push_data_heap (am->vlib_rp); + + /* + * Keep the number of memcpy ops to a minimum (e.g. 1). + */ + vec_validate (vector, 16384); + vec_reset_length (vector); + + /* $$$$ FIXME */ + vector = vlib_node_serialize (&vm->node_main, vector, + (u32) ~ 0 /* all threads */ , + 1 /* include nexts */ , + 1 /* include stats */ ); + + svm_pop_heap (oldheap); + pthread_mutex_unlock (&am->vlib_rp->mutex); + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_GET_NODE_GRAPH_REPLY, + ({ + rmp->reply_in_shmem = (uword) vector; + })); + /* *INDENT-ON* */ +} + +static void +vl_api_ioam_enable_t_handler (vl_api_ioam_enable_t * mp) +{ + int rv = 0; + vl_api_ioam_enable_reply_t *rmp; + clib_error_t *error; + + /* Ignoring the profile id as currently a single profile + * is supported */ + error = ip6_ioam_enable (mp->trace_enable, mp->pot_enable, + mp->seqno, mp->analyse); + if (error) + { + clib_error_report (error); + rv = clib_error_get_code (error); + } + + REPLY_MACRO (VL_API_IOAM_ENABLE_REPLY); +} + +static void +vl_api_ioam_disable_t_handler (vl_api_ioam_disable_t * mp) +{ + int rv = 0; + vl_api_ioam_disable_reply_t *rmp; + clib_error_t *error; + + error = clear_ioam_rewrite_fn (); + if (error) + { + clib_error_report (error); + rv = clib_error_get_code (error); + } + + REPLY_MACRO (VL_API_IOAM_DISABLE_REPLY); +} + +static void +vl_api_policer_add_del_t_handler (vl_api_policer_add_del_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_policer_add_del_reply_t *rmp; + int rv = 0; + u8 *name = NULL; + sse2_qos_pol_cfg_params_st cfg; + clib_error_t *error; + u32 policer_index; + + name = format (0, "%s", mp->name); + + memset (&cfg, 0, sizeof (cfg)); + cfg.rfc = mp->type; + cfg.rnd_type = mp->round_type; + cfg.rate_type = mp->rate_type; + cfg.rb.kbps.cir_kbps = mp->cir; + cfg.rb.kbps.eir_kbps = mp->eir; + cfg.rb.kbps.cb_bytes = mp->cb; + cfg.rb.kbps.eb_bytes = mp->eb; + cfg.conform_action.action_type = mp->conform_action_type; + cfg.conform_action.dscp = mp->conform_dscp; + cfg.exceed_action.action_type = mp->exceed_action_type; + cfg.exceed_action.dscp = mp->exceed_dscp; + cfg.violate_action.action_type = mp->violate_action_type; + cfg.violate_action.dscp = mp->violate_dscp; + cfg.color_aware = mp->color_aware; + + error = policer_add_del (vm, name, &cfg, &policer_index, mp->is_add); + + if (error) + rv = VNET_API_ERROR_UNSPECIFIED; + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_POLICER_ADD_DEL_REPLY, + ({ + if (rv == 0 && mp->is_add) + rmp->policer_index = ntohl(policer_index); + else + rmp->policer_index = ~0; + })); + /* *INDENT-ON* */ +} + +static void +send_policer_details (u8 * name, + sse2_qos_pol_cfg_params_st * config, + policer_read_response_type_st * templ, + unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_policer_details_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_POLICER_DETAILS); + mp->context = context; + mp->cir = htonl (config->rb.kbps.cir_kbps); + mp->eir = htonl (config->rb.kbps.eir_kbps); + mp->cb = htonl (config->rb.kbps.cb_bytes); + mp->eb = htonl (config->rb.kbps.eb_bytes); + mp->rate_type = config->rate_type; + mp->round_type = config->rnd_type; + mp->type = config->rfc; + mp->conform_action_type = config->conform_action.action_type; + mp->conform_dscp = config->conform_action.dscp; + mp->exceed_action_type = config->exceed_action.action_type; + mp->exceed_dscp = config->exceed_action.dscp; + mp->violate_action_type = config->violate_action.action_type; + mp->violate_dscp = config->violate_action.dscp; + mp->single_rate = templ->single_rate ? 1 : 0; + mp->color_aware = templ->color_aware ? 1 : 0; + mp->scale = htonl (templ->scale); + mp->cir_tokens_per_period = htonl (templ->cir_tokens_per_period); + mp->pir_tokens_per_period = htonl (templ->pir_tokens_per_period); + mp->current_limit = htonl (templ->current_limit); + mp->current_bucket = htonl (templ->current_bucket); + mp->extended_limit = htonl (templ->extended_limit); + mp->extended_bucket = htonl (templ->extended_bucket); + mp->last_update_time = clib_host_to_net_u64 (templ->last_update_time); + + strncpy ((char *) mp->name, (char *) name, ARRAY_LEN (mp->name) - 1); + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +vl_api_policer_dump_t_handler (vl_api_policer_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + vnet_policer_main_t *pm = &vnet_policer_main; + hash_pair_t *hp; + uword *p; + u32 pool_index; + u8 *match_name = 0; + u8 *name; + sse2_qos_pol_cfg_params_st *config; + policer_read_response_type_st *templ; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + if (mp->match_name_valid) + { + match_name = format (0, "%s%c", mp->match_name, 0); + } + + if (mp->match_name_valid) + { + p = hash_get_mem (pm->policer_config_by_name, match_name); + if (p) + { + pool_index = p[0]; + config = pool_elt_at_index (pm->configs, pool_index); + templ = pool_elt_at_index (pm->policer_templates, pool_index); + send_policer_details (match_name, config, templ, q, mp->context); + } + } + else + { + /* *INDENT-OFF* */ + hash_foreach_pair (hp, pm->policer_config_by_name, + ({ + name = (u8 *) hp->key; + pool_index = hp->value[0]; + config = pool_elt_at_index (pm->configs, pool_index); + templ = pool_elt_at_index (pm->policer_templates, pool_index); + send_policer_details(name, config, templ, q, mp->context); + })); + /* *INDENT-ON* */ + } +} + +static void + vl_api_policer_classify_set_interface_t_handler + (vl_api_policer_classify_set_interface_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_policer_classify_set_interface_reply_t *rmp; + int rv; + u32 sw_if_index, ip4_table_index, ip6_table_index, l2_table_index; + + ip4_table_index = ntohl (mp->ip4_table_index); + ip6_table_index = ntohl (mp->ip6_table_index); + l2_table_index = ntohl (mp->l2_table_index); + sw_if_index = ntohl (mp->sw_if_index); + + VALIDATE_SW_IF_INDEX (mp); + + rv = vnet_set_policer_classify_intfc (vm, sw_if_index, ip4_table_index, + ip6_table_index, l2_table_index, + mp->is_add); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_POLICER_CLASSIFY_SET_INTERFACE_REPLY); +} + +static void +send_policer_classify_details (u32 sw_if_index, + u32 table_index, + unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_policer_classify_details_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_POLICER_CLASSIFY_DETAILS); + mp->context = context; + mp->sw_if_index = htonl (sw_if_index); + mp->table_index = htonl (table_index); + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +vl_api_policer_classify_dump_t_handler (vl_api_policer_classify_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + policer_classify_main_t *pcm = &policer_classify_main; + u32 *vec_tbl; + int i; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + vec_tbl = pcm->classify_table_index_by_sw_if_index[mp->type]; + + if (vec_len (vec_tbl)) + { + for (i = 0; i < vec_len (vec_tbl); i++) + { + if (vec_elt (vec_tbl, i) == ~0) + continue; + + send_policer_classify_details (i, vec_elt (vec_tbl, i), q, + mp->context); + } + } +} + +static void +vl_api_mpls_tunnel_details_t_handler (vl_api_mpls_fib_details_t * mp) +{ + clib_warning ("BUG"); +} + +typedef struct mpls_tunnel_send_walk_ctx_t_ +{ + unix_shared_memory_queue_t *q; + u32 index; + u32 context; +} mpls_tunnel_send_walk_ctx_t; + +static void +send_mpls_tunnel_entry (u32 mti, void *arg) +{ + mpls_tunnel_send_walk_ctx_t *ctx; + vl_api_mpls_tunnel_details_t *mp; + const mpls_tunnel_t *mt; + u32 nlabels; + + ctx = arg; + + if (~0 != ctx->index && mti != ctx->index) + return; + + mt = mpls_tunnel_get (mti); + nlabels = vec_len (mt->mt_label_stack); + + mp = vl_msg_api_alloc (sizeof (*mp) + nlabels * sizeof (u32)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_MPLS_TUNNEL_DETAILS); + mp->context = ctx->context; + + mp->tunnel_index = ntohl (mti); + memcpy (mp->mt_next_hop_out_labels, + mt->mt_label_stack, nlabels * sizeof (u32)); + + // FIXME + + vl_msg_api_send_shmem (ctx->q, (u8 *) & mp); +} + +static void +vl_api_mpls_tunnel_dump_t_handler (vl_api_mpls_tunnel_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + mpls_tunnel_send_walk_ctx_t ctx = { + .q = q, + .index = ntohl (mp->tunnel_index), + .context = mp->context, + }; + mpls_tunnel_walk (send_mpls_tunnel_entry, &ctx); +} + +static void +vl_api_mpls_fib_details_t_handler (vl_api_mpls_fib_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void +vl_api_mpls_fib_details_t_endian (vl_api_mpls_fib_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void +vl_api_mpls_fib_details_t_print (vl_api_mpls_fib_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void +send_mpls_fib_details (vpe_api_main_t * am, + unix_shared_memory_queue_t * q, + u32 table_id, u32 label, u32 eos, + fib_route_path_encode_t * api_rpaths, u32 context) +{ + vl_api_mpls_fib_details_t *mp; + fib_route_path_encode_t *api_rpath; + vl_api_fib_path2_t *fp; + int path_count; + + path_count = vec_len (api_rpaths); + mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp)); + if (!mp) + return; + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_MPLS_FIB_DETAILS); + mp->context = context; + + mp->table_id = htonl (table_id); + mp->eos_bit = eos; + mp->label = htonl (label); + + mp->count = htonl (path_count); + fp = mp->path; + vec_foreach (api_rpath, api_rpaths) + { + memset (fp, 0, sizeof (*fp)); + fp->weight = htonl (api_rpath->rpath.frp_weight); + fp->sw_if_index = htonl (api_rpath->rpath.frp_sw_if_index); + copy_fib_next_hop (api_rpath, fp); + fp++; + } + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +vl_api_mpls_fib_dump_t_handler (vl_api_mpls_fib_dump_t * mp) +{ + vpe_api_main_t *am = &vpe_api_main; + unix_shared_memory_queue_t *q; + mpls_main_t *mm = &mpls_main; + fib_table_t *fib_table; + fib_node_index_t lfei, *lfeip, *lfeis = NULL; + mpls_label_t key; + fib_prefix_t pfx; + u32 fib_index; + fib_route_path_encode_t *api_rpaths; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + /* *INDENT-OFF* */ + pool_foreach (fib_table, mm->fibs, + ({ + hash_foreach(key, lfei, fib_table->mpls.mf_entries, + ({ + vec_add1(lfeis, lfei); + })); + })); + vec_sort_with_function(lfeis, fib_entry_cmp_for_sort); + + vec_foreach(lfeip, lfeis) + { + fib_entry_get_prefix(*lfeip, &pfx); + fib_index = fib_entry_get_fib_index(*lfeip); + fib_table = fib_table_get(fib_index, pfx.fp_proto); + api_rpaths = NULL; + fib_entry_encode(*lfeip, &api_rpaths); + send_mpls_fib_details (am, q, + fib_table->ft_table_id, + pfx.fp_label, + pfx.fp_eos, + api_rpaths, + mp->context); + vec_free(api_rpaths); + } + + vec_free (lfeis); +} + +static void +vl_api_classify_table_ids_t_handler (vl_api_classify_table_ids_t * mp) +{ + unix_shared_memory_queue_t *q; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + vnet_classify_main_t *cm = &vnet_classify_main; + vnet_classify_table_t *t; + u32 *table_ids = 0; + u32 count; + + /* *INDENT-OFF* */ + pool_foreach (t, cm->tables, + ({ + vec_add1 (table_ids, ntohl(t - cm->tables)); + })); + /* *INDENT-ON* */ + count = vec_len (table_ids); + + vl_api_classify_table_ids_reply_t *rmp; + rmp = vl_msg_api_alloc_as_if_client (sizeof (*rmp) + count * sizeof (u32)); + rmp->_vl_msg_id = ntohs (VL_API_CLASSIFY_TABLE_IDS_REPLY); + rmp->context = mp->context; + rmp->count = ntohl (count); + clib_memcpy (rmp->ids, table_ids, count * sizeof (u32)); + rmp->retval = 0; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); + + vec_free (table_ids); +} + +static void + vl_api_classify_table_by_interface_t_handler + (vl_api_classify_table_by_interface_t * mp) +{ + vl_api_classify_table_by_interface_reply_t *rmp; + int rv = 0; + + u32 sw_if_index = ntohl (mp->sw_if_index); + u32 *acl = 0; + + vec_validate (acl, INPUT_ACL_N_TABLES - 1); + vec_set (acl, ~0); + + VALIDATE_SW_IF_INDEX (mp); + + input_acl_main_t *am = &input_acl_main; + + int if_idx; + u32 type; + + for (type = 0; type < INPUT_ACL_N_TABLES; type++) + { + u32 *vec_tbl = am->classify_table_index_by_sw_if_index[type]; + if (vec_len (vec_tbl)) + { + for (if_idx = 0; if_idx < vec_len (vec_tbl); if_idx++) + { + if (vec_elt (vec_tbl, if_idx) == ~0 || sw_if_index != if_idx) + { + continue; + } + acl[type] = vec_elt (vec_tbl, if_idx); + } + } + } + + BAD_SW_IF_INDEX_LABEL; + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_CLASSIFY_TABLE_BY_INTERFACE_REPLY, + ({ + rmp->sw_if_index = ntohl(sw_if_index); + rmp->l2_table_id = ntohl(acl[INPUT_ACL_TABLE_L2]); + rmp->ip4_table_id = ntohl(acl[INPUT_ACL_TABLE_IP4]); + rmp->ip6_table_id = ntohl(acl[INPUT_ACL_TABLE_IP6]); + })); + /* *INDENT-ON* */ + vec_free (acl); +} + +static void +vl_api_classify_table_info_t_handler (vl_api_classify_table_info_t * mp) +{ + unix_shared_memory_queue_t *q; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + vl_api_classify_table_info_reply_t *rmp = 0; + + vnet_classify_main_t *cm = &vnet_classify_main; + u32 table_id = ntohl (mp->table_id); + vnet_classify_table_t *t; + + /* *INDENT-OFF* */ + pool_foreach (t, cm->tables, + ({ + if (table_id == t - cm->tables) + { + rmp = vl_msg_api_alloc_as_if_client + (sizeof (*rmp) + t->match_n_vectors * sizeof (u32x4)); + rmp->_vl_msg_id = ntohs (VL_API_CLASSIFY_TABLE_INFO_REPLY); + rmp->context = mp->context; + rmp->table_id = ntohl(table_id); + rmp->nbuckets = ntohl(t->nbuckets); + rmp->match_n_vectors = ntohl(t->match_n_vectors); + rmp->skip_n_vectors = ntohl(t->skip_n_vectors); + rmp->active_sessions = ntohl(t->active_elements); + rmp->next_table_index = ntohl(t->next_table_index); + rmp->miss_next_index = ntohl(t->miss_next_index); + rmp->mask_length = ntohl(t->match_n_vectors * sizeof (u32x4)); + clib_memcpy(rmp->mask, t->mask, t->match_n_vectors * sizeof(u32x4)); + rmp->retval = 0; + break; + } + })); + /* *INDENT-ON* */ + + if (rmp == 0) + { + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs ((VL_API_CLASSIFY_TABLE_INFO_REPLY)); + rmp->context = mp->context; + rmp->retval = ntohl (VNET_API_ERROR_CLASSIFY_TABLE_NOT_FOUND); + } + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +vl_api_classify_session_details_t_handler (vl_api_classify_session_details_t * + mp) +{ + clib_warning ("BUG"); +} + +static void +send_classify_session_details (unix_shared_memory_queue_t * q, + u32 table_id, + u32 match_length, + vnet_classify_entry_t * e, u32 context) +{ + vl_api_classify_session_details_t *rmp; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_CLASSIFY_SESSION_DETAILS); + rmp->context = context; + rmp->table_id = ntohl (table_id); + rmp->hit_next_index = ntohl (e->next_index); + rmp->advance = ntohl (e->advance); + rmp->opaque_index = ntohl (e->opaque_index); + rmp->match_length = ntohl (match_length); + clib_memcpy (rmp->match, e->key, match_length); + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +vl_api_classify_session_dump_t_handler (vl_api_classify_session_dump_t * mp) +{ + vnet_classify_main_t *cm = &vnet_classify_main; + unix_shared_memory_queue_t *q; + + u32 table_id = ntohl (mp->table_id); + vnet_classify_table_t *t; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + return; + + /* *INDENT-OFF* */ + pool_foreach (t, cm->tables, + ({ + if (table_id == t - cm->tables) + { + vnet_classify_bucket_t * b; + vnet_classify_entry_t * v, * save_v; + int i, j, k; + + for (i = 0; i < t->nbuckets; i++) + { + b = &t->buckets [i]; + if (b->offset == 0) + continue; + + save_v = vnet_classify_get_entry (t, b->offset); + for (j = 0; j < (1<log2_pages); j++) + { + for (k = 0; k < t->entries_per_page; k++) + { + v = vnet_classify_entry_at_index + (t, save_v, j*t->entries_per_page + k); + if (vnet_classify_entry_is_free (v)) + continue; + + send_classify_session_details + (q, table_id, t->match_n_vectors * sizeof (u32x4), + v, mp->context); + } + } + } + break; + } + })); + /* *INDENT-ON* */ +} + +static void +vl_api_set_ipfix_exporter_t_handler (vl_api_set_ipfix_exporter_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + flow_report_main_t *frm = &flow_report_main; + vl_api_set_ipfix_exporter_reply_t *rmp; + ip4_address_t collector, src; + u16 collector_port = UDP_DST_PORT_ipfix; + u32 path_mtu; + u32 template_interval; + u8 udp_checksum; + u32 fib_id; + u32 fib_index = ~0; + int rv = 0; + + memcpy (collector.data, mp->collector_address, sizeof (collector.data)); + collector_port = ntohs (mp->collector_port); + if (collector_port == (u16) ~ 0) + collector_port = UDP_DST_PORT_ipfix; + memcpy (src.data, mp->src_address, sizeof (src.data)); + fib_id = ntohl (mp->vrf_id); + + ip4_main_t *im = &ip4_main; + if (fib_id == ~0) + { + fib_index = ~0; + } + else + { + uword *p = hash_get (im->fib_index_by_table_id, fib_id); + if (!p) + { + rv = VNET_API_ERROR_NO_SUCH_FIB; + goto out; + } + fib_index = p[0]; + } + + path_mtu = ntohl (mp->path_mtu); + if (path_mtu == ~0) + path_mtu = 512; // RFC 7011 section 10.3.3. + template_interval = ntohl (mp->template_interval); + if (template_interval == ~0) + template_interval = 20; + udp_checksum = mp->udp_checksum; + + if (collector.as_u32 == 0) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto out; + } + + if (src.as_u32 == 0) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto out; + } + + if (path_mtu > 1450 /* vpp does not support fragmentation */ ) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto out; + } + + if (path_mtu < 68) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto out; + } + + /* Reset report streams if we are reconfiguring IP addresses */ + if (frm->ipfix_collector.as_u32 != collector.as_u32 || + frm->src_address.as_u32 != src.as_u32 || + frm->collector_port != collector_port) + vnet_flow_reports_reset (frm); + + frm->ipfix_collector.as_u32 = collector.as_u32; + frm->collector_port = collector_port; + frm->src_address.as_u32 = src.as_u32; + frm->fib_index = fib_index; + frm->path_mtu = path_mtu; + frm->template_interval = template_interval; + frm->udp_checksum = udp_checksum; + + /* Turn on the flow reporting process */ + vlib_process_signal_event (vm, flow_report_process_node.index, 1, 0); + +out: + REPLY_MACRO (VL_API_SET_IPFIX_EXPORTER_REPLY); +} + +static void +vl_api_ipfix_exporter_dump_t_handler (vl_api_ipfix_exporter_dump_t * mp) +{ + flow_report_main_t *frm = &flow_report_main; + unix_shared_memory_queue_t *q; + vl_api_ipfix_exporter_details_t *rmp; + ip4_main_t *im = &ip4_main; + u32 vrf_id; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + return; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_IPFIX_EXPORTER_DETAILS); + rmp->context = mp->context; + memcpy (rmp->collector_address, frm->ipfix_collector.data, + sizeof (frm->ipfix_collector.data)); + rmp->collector_port = htons (frm->collector_port); + memcpy (rmp->src_address, frm->src_address.data, + sizeof (frm->src_address.data)); + if (frm->fib_index == ~0) + vrf_id = ~0; + else + vrf_id = im->fibs[frm->fib_index].ft_table_id; + rmp->vrf_id = htonl (vrf_id); + rmp->path_mtu = htonl (frm->path_mtu); + rmp->template_interval = htonl (frm->template_interval); + rmp->udp_checksum = (frm->udp_checksum != 0); + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void + vl_api_set_ipfix_classify_stream_t_handler + (vl_api_set_ipfix_classify_stream_t * mp) +{ + vl_api_set_ipfix_classify_stream_reply_t *rmp; + flow_report_classify_main_t *fcm = &flow_report_classify_main; + flow_report_main_t *frm = &flow_report_main; + u32 domain_id = 0; + u32 src_port = UDP_DST_PORT_ipfix; + int rv = 0; + + domain_id = ntohl (mp->domain_id); + src_port = ntohs (mp->src_port); + + if (fcm->src_port != 0 && + (fcm->domain_id != domain_id || fcm->src_port != (u16) src_port)) + { + int rv = vnet_stream_change (frm, fcm->domain_id, fcm->src_port, + domain_id, (u16) src_port); + ASSERT (rv == 0); + } + + fcm->domain_id = domain_id; + fcm->src_port = (u16) src_port; + + REPLY_MACRO (VL_API_SET_IPFIX_CLASSIFY_STREAM_REPLY); +} + +static void + vl_api_ipfix_classify_stream_dump_t_handler + (vl_api_ipfix_classify_stream_dump_t * mp) +{ + flow_report_classify_main_t *fcm = &flow_report_classify_main; + unix_shared_memory_queue_t *q; + vl_api_ipfix_classify_stream_details_t *rmp; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + return; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_IPFIX_CLASSIFY_STREAM_DETAILS); + rmp->context = mp->context; + rmp->domain_id = htonl (fcm->domain_id); + rmp->src_port = htons (fcm->src_port); + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void + vl_api_ipfix_classify_table_add_del_t_handler + (vl_api_ipfix_classify_table_add_del_t * mp) +{ + vl_api_ipfix_classify_table_add_del_reply_t *rmp; + flow_report_classify_main_t *fcm = &flow_report_classify_main; + flow_report_main_t *frm = &flow_report_main; + vnet_flow_report_add_del_args_t args; + ipfix_classify_table_t *table; + int is_add; + u32 classify_table_index; + u8 ip_version; + u8 transport_protocol; + int rv = 0; + + classify_table_index = ntohl (mp->table_id); + ip_version = mp->ip_version; + transport_protocol = mp->transport_protocol; + is_add = mp->is_add; + + if (fcm->src_port == 0) + { + /* call set_ipfix_classify_stream first */ + rv = VNET_API_ERROR_UNSPECIFIED; + goto out; + } + + memset (&args, 0, sizeof (args)); + + table = 0; + int i; + for (i = 0; i < vec_len (fcm->tables); i++) + if (ipfix_classify_table_index_valid (i)) + if (fcm->tables[i].classify_table_index == classify_table_index) + { + table = &fcm->tables[i]; + break; + } + + if (is_add) + { + if (table) + { + rv = VNET_API_ERROR_VALUE_EXIST; + goto out; + } + table = ipfix_classify_add_table (); + table->classify_table_index = classify_table_index; + } + else + { + if (!table) + { + rv = VNET_API_ERROR_NO_SUCH_ENTRY; + goto out; + } + } + + table->ip_version = ip_version; + table->transport_protocol = transport_protocol; + + args.opaque.as_uword = table - fcm->tables; + args.rewrite_callback = ipfix_classify_template_rewrite; + args.flow_data_callback = ipfix_classify_send_flows; + args.is_add = is_add; + args.domain_id = fcm->domain_id; + args.src_port = fcm->src_port; + + rv = vnet_flow_report_add_del (frm, &args); + + /* If deleting, or add failed */ + if (is_add == 0 || (rv && is_add)) + ipfix_classify_delete_table (table - fcm->tables); + +out: + REPLY_MACRO (VL_API_SET_IPFIX_CLASSIFY_STREAM_REPLY); +} + +static void +send_ipfix_classify_table_details (u32 table_index, + unix_shared_memory_queue_t * q, + u32 context) +{ + flow_report_classify_main_t *fcm = &flow_report_classify_main; + vl_api_ipfix_classify_table_details_t *mp; + + ipfix_classify_table_t *table = &fcm->tables[table_index]; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_IPFIX_CLASSIFY_TABLE_DETAILS); + mp->context = context; + mp->table_id = htonl (table->classify_table_index); + mp->ip_version = table->ip_version; + mp->transport_protocol = table->transport_protocol; + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void + vl_api_ipfix_classify_table_dump_t_handler + (vl_api_ipfix_classify_table_dump_t * mp) +{ + flow_report_classify_main_t *fcm = &flow_report_classify_main; + unix_shared_memory_queue_t *q; + u32 i; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + return; + + for (i = 0; i < vec_len (fcm->tables); i++) + if (ipfix_classify_table_index_valid (i)) + send_ipfix_classify_table_details (i, q, mp->context); +} + +static void +vl_api_pg_create_interface_t_handler (vl_api_pg_create_interface_t * mp) +{ + vl_api_pg_create_interface_reply_t *rmp; + int rv = 0; + + pg_main_t *pg = &pg_main; + u32 pg_if_id = pg_interface_add_or_get (pg, ntohl (mp->interface_id)); + pg_interface_t *pi = pool_elt_at_index (pg->interfaces, pg_if_id); + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_PG_CREATE_INTERFACE_REPLY, + ({ + rmp->sw_if_index = ntohl(pi->sw_if_index); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_pg_capture_t_handler (vl_api_pg_capture_t * mp) +{ + vl_api_pg_capture_reply_t *rmp; + int rv = 0; + + vnet_main_t *vnm = vnet_get_main (); + vnet_interface_main_t *im = &vnm->interface_main; + vnet_hw_interface_t *hi = 0; + + u8 *intf_name = format (0, "pg%d", ntohl (mp->interface_id), 0); + u32 hw_if_index = ~0; + uword *p = hash_get_mem (im->hw_interface_by_name, intf_name); + if (p) + hw_if_index = *p; + vec_free (intf_name); + + if (hw_if_index != ~0) + { + pg_capture_args_t _a, *a = &_a; + + u32 len = ntohl (mp->pcap_name_length); + u8 *pcap_file_name = vec_new (u8, len); + clib_memcpy (pcap_file_name, mp->pcap_file_name, len); + + hi = vnet_get_sup_hw_interface (vnm, hw_if_index); + a->hw_if_index = hw_if_index; + a->dev_instance = hi->dev_instance; + a->is_enabled = mp->is_enabled; + a->pcap_file_name = pcap_file_name; + a->count = ntohl (mp->count); + + clib_error_t *e = pg_capture (a); + if (e) + { + clib_error_report (e); + rv = VNET_API_ERROR_CANNOT_CREATE_PCAP_FILE; + } + + vec_free (pcap_file_name); + } + REPLY_MACRO (VL_API_PG_CAPTURE_REPLY); +} + +static void +vl_api_pg_enable_disable_t_handler (vl_api_pg_enable_disable_t * mp) +{ + vl_api_pg_enable_disable_reply_t *rmp; + int rv = 0; + + pg_main_t *pg = &pg_main; + u32 stream_index = ~0; + + int is_enable = mp->is_enabled != 0; + u32 len = ntohl (mp->stream_name_length) - 1; + + if (len > 0) + { + u8 *stream_name = vec_new (u8, len); + clib_memcpy (stream_name, mp->stream_name, len); + uword *p = hash_get_mem (pg->stream_index_by_name, stream_name); + if (p) + stream_index = *p; + vec_free (stream_name); + } + + pg_enable_disable (stream_index, is_enable); + + REPLY_MACRO (VL_API_PG_ENABLE_DISABLE_REPLY); +} + +static void + vl_api_ip_source_and_port_range_check_add_del_t_handler + (vl_api_ip_source_and_port_range_check_add_del_t * mp) +{ + vl_api_ip_source_and_port_range_check_add_del_reply_t *rmp; + int rv = 0; + + u8 is_ipv6 = mp->is_ipv6; + u8 is_add = mp->is_add; + u8 mask_length = mp->mask_length; + ip4_address_t ip4_addr; + ip6_address_t ip6_addr; + u16 *low_ports = 0; + u16 *high_ports = 0; + u32 vrf_id; + u16 tmp_low, tmp_high; + u8 num_ranges; + int i; + + // Validate port range + num_ranges = mp->number_of_ranges; + if (num_ranges > 32) + { // This is size of array in VPE.API + rv = VNET_API_ERROR_EXCEEDED_NUMBER_OF_RANGES_CAPACITY; + goto reply; + } + + vec_reset_length (low_ports); + vec_reset_length (high_ports); + + for (i = 0; i < num_ranges; i++) + { + tmp_low = mp->low_ports[i]; + tmp_high = mp->high_ports[i]; + // If tmp_low <= tmp_high then only need to check tmp_low = 0 + // If tmp_low <= tmp_high then only need to check tmp_high > 65535 + if (tmp_low > tmp_high || tmp_low == 0 || tmp_high > 65535) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto reply; + } + vec_add1 (low_ports, tmp_low); + vec_add1 (high_ports, tmp_high + 1); + } + + // Validate mask_length + if ((is_ipv6 && mask_length > 128) || (!is_ipv6 && mask_length > 32)) + { + rv = VNET_API_ERROR_ADDRESS_LENGTH_MISMATCH; + goto reply; + } + + vrf_id = ntohl (mp->vrf_id); + + if (vrf_id < 1) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto reply; + } + + + if (is_ipv6) + { + clib_memcpy (ip6_addr.as_u8, mp->address, sizeof (ip6_addr.as_u8)); + rv = ip6_source_and_port_range_check_add_del (&ip6_addr, + mask_length, + vrf_id, + low_ports, + high_ports, is_add); + } + else + { + clib_memcpy (ip4_addr.data, mp->address, sizeof (ip4_addr)); + rv = ip4_source_and_port_range_check_add_del (&ip4_addr, + mask_length, + vrf_id, + low_ports, + high_ports, is_add); + } + +reply: + vec_free (low_ports); + vec_free (high_ports); + REPLY_MACRO (VL_API_IP_SOURCE_AND_PORT_RANGE_CHECK_ADD_DEL_REPLY); +} + +static void + vl_api_ip_source_and_port_range_check_interface_add_del_t_handler + (vl_api_ip_source_and_port_range_check_interface_add_del_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_ip_source_and_port_range_check_interface_add_del_reply_t *rmp; + ip4_main_t *im = &ip4_main; + int rv; + u32 sw_if_index; + u32 fib_index[IP_SOURCE_AND_PORT_RANGE_CHECK_N_PROTOCOLS]; + u32 vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_N_PROTOCOLS]; + uword *p = 0; + int i; + + vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_TCP_OUT] = + ntohl (mp->tcp_out_vrf_id); + vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_UDP_OUT] = + ntohl (mp->udp_out_vrf_id); + vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_TCP_IN] = + ntohl (mp->tcp_in_vrf_id); + vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_UDP_IN] = + ntohl (mp->udp_in_vrf_id); + + + for (i = 0; i < IP_SOURCE_AND_PORT_RANGE_CHECK_N_PROTOCOLS; i++) + { + if (vrf_id[i] != 0 && vrf_id[i] != ~0) + { + p = hash_get (im->fib_index_by_table_id, vrf_id[i]); + + if (p == 0) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto reply; + } + + fib_index[i] = p[0]; + } + else + fib_index[i] = ~0; + } + sw_if_index = ntohl (mp->sw_if_index); + + VALIDATE_SW_IF_INDEX (mp); + + rv = + set_ip_source_and_port_range_check (vm, fib_index, sw_if_index, + mp->is_add); + + BAD_SW_IF_INDEX_LABEL; +reply: + + REPLY_MACRO (VL_API_IP_SOURCE_AND_PORT_RANGE_CHECK_INTERFACE_ADD_DEL_REPLY); +} + +static void +vl_api_delete_subif_t_handler (vl_api_delete_subif_t * mp) +{ + vl_api_delete_subif_reply_t *rmp; + int rv; + + rv = vnet_delete_sub_interface (ntohl (mp->sw_if_index)); + + REPLY_MACRO (VL_API_DELETE_SUBIF_REPLY); +} + +static void + vl_api_l2_interface_pbb_tag_rewrite_t_handler + (vl_api_l2_interface_pbb_tag_rewrite_t * mp) +{ + vl_api_l2_interface_pbb_tag_rewrite_reply_t *rmp; + vnet_main_t *vnm = vnet_get_main (); + vlib_main_t *vm = vlib_get_main (); + u32 vtr_op; + int rv = 0; + + VALIDATE_SW_IF_INDEX (mp); + + vtr_op = ntohl (mp->vtr_op); + + switch (vtr_op) + { + case L2_VTR_DISABLED: + case L2_VTR_PUSH_2: + case L2_VTR_POP_2: + case L2_VTR_TRANSLATE_2_1: + break; + + default: + rv = VNET_API_ERROR_INVALID_VALUE; + goto bad_sw_if_index; + } + + rv = l2pbb_configure (vm, vnm, ntohl (mp->sw_if_index), vtr_op, + mp->b_dmac, mp->b_smac, ntohs (mp->b_vlanid), + ntohl (mp->i_sid), ntohs (mp->outer_tag)); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_L2_INTERFACE_PBB_TAG_REWRITE_REPLY); + +} + +static void +vl_api_punt_t_handler (vl_api_punt_t * mp) +{ + vl_api_punt_reply_t *rmp; + vlib_main_t *vm = vlib_get_main (); + int rv = 0; + clib_error_t *error; + + error = vnet_punt_add_del (vm, mp->ipv, mp->l4_protocol, + ntohs (mp->l4_port), mp->is_add); + if (error) + { + rv = -1; + clib_error_report (error); + } + + REPLY_MACRO (VL_API_PUNT_REPLY); +} + +static void + vl_api_flow_classify_set_interface_t_handler + (vl_api_flow_classify_set_interface_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_flow_classify_set_interface_reply_t *rmp; + int rv; + u32 sw_if_index, ip4_table_index, ip6_table_index; + + ip4_table_index = ntohl (mp->ip4_table_index); + ip6_table_index = ntohl (mp->ip6_table_index); + sw_if_index = ntohl (mp->sw_if_index); + + VALIDATE_SW_IF_INDEX (mp); + + rv = vnet_set_flow_classify_intfc (vm, sw_if_index, ip4_table_index, + ip6_table_index, mp->is_add); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_FLOW_CLASSIFY_SET_INTERFACE_REPLY); +} + +static void +send_flow_classify_details (u32 sw_if_index, + u32 table_index, + unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_flow_classify_details_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_FLOW_CLASSIFY_DETAILS); + mp->context = context; + mp->sw_if_index = htonl (sw_if_index); + mp->table_index = htonl (table_index); + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +vl_api_flow_classify_dump_t_handler (vl_api_flow_classify_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + flow_classify_main_t *pcm = &flow_classify_main; + u32 *vec_tbl; + int i; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + vec_tbl = pcm->classify_table_index_by_sw_if_index[mp->type]; + + if (vec_len (vec_tbl)) + { + for (i = 0; i < vec_len (vec_tbl); i++) + { + if (vec_elt (vec_tbl, i) == ~0) + continue; + + send_flow_classify_details (i, vec_elt (vec_tbl, i), q, + mp->context); + } + } +} + +static void +vl_api_feature_enable_disable_t_handler (vl_api_feature_enable_disable_t * mp) +{ + vl_api_feature_enable_disable_reply_t *rmp; + int rv = 0; + u8 *arc_name, *feature_name; + + VALIDATE_SW_IF_INDEX (mp); + + arc_name = format (0, "%s%c", mp->arc_name, 0); + feature_name = format (0, "%s%c", mp->feature_name, 0); + + vnet_feature_registration_t *reg; + reg = + vnet_get_feature_reg ((const char *) arc_name, + (const char *) feature_name); + if (reg == 0) + rv = VNET_API_ERROR_INVALID_VALUE; + else + { + u32 sw_if_index; + clib_error_t *error = 0; + + sw_if_index = ntohl (mp->sw_if_index); + if (reg->enable_disable_cb) + error = reg->enable_disable_cb (sw_if_index, mp->enable); + if (!error) + vnet_feature_enable_disable ((const char *) arc_name, + (const char *) feature_name, + sw_if_index, mp->enable, 0, 0); + else + { + clib_error_report (error); + rv = VNET_API_ERROR_CANNOT_ENABLE_DISABLE_FEATURE; + } + } + + vec_free (feature_name); + vec_free (arc_name); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_FEATURE_ENABLE_DISABLE_REPLY); +} + +#define BOUNCE_HANDLER(nn) \ +static void vl_api_##nn##_t_handler ( \ + vl_api_##nn##_t *mp) \ +{ \ + vpe_client_registration_t *reg; \ + vpe_api_main_t * vam = &vpe_api_main; \ + unix_shared_memory_queue_t * q; \ + \ + /* One registration only... */ \ + pool_foreach(reg, vam->nn##_registrations, \ + ({ \ + q = vl_api_client_index_to_input_queue (reg->client_index); \ + if (q) { \ + /* \ + * If the queue is stuffed, turf the msg and complain \ + * It's unlikely that the intended recipient is \ + * alive; avoid deadlock at all costs. \ + */ \ + if (q->cursize == q->maxsize) { \ + clib_warning ("ERROR: receiver queue full, drop msg"); \ + vl_msg_api_free (mp); \ + return; \ + } \ + vl_msg_api_send_shmem (q, (u8 *)&mp); \ + return; \ + } \ + })); \ + vl_msg_api_free (mp); \ +} + +static void setup_message_id_table (api_main_t * am); + +/* + * vpe_api_hookup + * Add vpe's API message handlers to the table. + * vlib has alread mapped shared memory and + * added the client registration handlers. + * See .../open-repo/vlib/memclnt_vlib.c:memclnt_process() + */ +static clib_error_t * +vpe_api_hookup (vlib_main_t * vm) +{ + api_main_t *am = &api_main; + +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler, \ + vl_noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 1); + foreach_vpe_api_msg; +#undef _ + + /* + * Manually register the sr tunnel add del msg, so we trace + * enough bytes to capture a typical segment list + */ + vl_msg_api_set_handlers (VL_API_SR_TUNNEL_ADD_DEL, + "sr_tunnel_add_del", + vl_api_sr_tunnel_add_del_t_handler, + vl_noop_handler, + vl_api_sr_tunnel_add_del_t_endian, + vl_api_sr_tunnel_add_del_t_print, 256, 1); + + + /* + * Manually register the sr policy add del msg, so we trace + * enough bytes to capture a typical tunnel name list + */ + vl_msg_api_set_handlers (VL_API_SR_POLICY_ADD_DEL, + "sr_policy_add_del", + vl_api_sr_policy_add_del_t_handler, + vl_noop_handler, + vl_api_sr_policy_add_del_t_endian, + vl_api_sr_policy_add_del_t_print, 256, 1); + + /* + * Trace space for 8 MPLS encap labels, classifier mask+match + */ + am->api_trace_cfg[VL_API_MPLS_TUNNEL_ADD_DEL].size += 8 * sizeof (u32); + am->api_trace_cfg[VL_API_CLASSIFY_ADD_DEL_TABLE].size += 5 * sizeof (u32x4); + am->api_trace_cfg[VL_API_CLASSIFY_ADD_DEL_SESSION].size + += 5 * sizeof (u32x4); + am->api_trace_cfg[VL_API_VXLAN_ADD_DEL_TUNNEL].size += 16 * sizeof (u32); + + /* + * Thread-safe API messages + */ + am->is_mp_safe[VL_API_IP_ADD_DEL_ROUTE] = 1; + am->is_mp_safe[VL_API_GET_NODE_GRAPH] = 1; + + /* + * Set up the (msg_name, crc, message-id) table + */ + setup_message_id_table (am); + + return 0; +} + +VLIB_API_INIT_FUNCTION (vpe_api_hookup); + +static clib_error_t * +vpe_api_init (vlib_main_t * vm) +{ + vpe_api_main_t *am = &vpe_api_main; + + am->vlib_main = vm; + am->vnet_main = vnet_get_main (); + am->interface_events_registration_hash = hash_create (0, sizeof (uword)); + am->to_netconf_server_registration_hash = hash_create (0, sizeof (uword)); + am->from_netconf_server_registration_hash = hash_create (0, sizeof (uword)); + am->to_netconf_client_registration_hash = hash_create (0, sizeof (uword)); + am->from_netconf_client_registration_hash = hash_create (0, sizeof (uword)); + am->oam_events_registration_hash = hash_create (0, sizeof (uword)); + am->bfd_events_registration_hash = hash_create (0, sizeof (uword)); + + vl_api_init (vm); + vl_set_memory_region_name ("/vpe-api"); + vl_enable_disable_memory_api (vm, 1 /* enable it */ ); + + return 0; +} + +VLIB_INIT_FUNCTION (vpe_api_init); + + +static clib_error_t * +api_segment_config (vlib_main_t * vm, unformat_input_t * input) +{ + u8 *chroot_path; + u64 baseva, size, pvt_heap_size; + int uid, gid, rv; + const int max_buf_size = 4096; + char *s, *buf; + struct passwd _pw, *pw; + struct group _grp, *grp; + clib_error_t *e; + buf = vec_new (char, 128); + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "prefix %s", &chroot_path)) + { + vec_add1 (chroot_path, 0); + vl_set_memory_root_path ((char *) chroot_path); + } + else if (unformat (input, "uid %d", &uid)) + vl_set_memory_uid (uid); + else if (unformat (input, "gid %d", &gid)) + vl_set_memory_gid (gid); + else if (unformat (input, "baseva %llx", &baseva)) + vl_set_global_memory_baseva (baseva); + else if (unformat (input, "global-size %lldM", &size)) + vl_set_global_memory_size (size * (1ULL << 20)); + else if (unformat (input, "global-size %lldG", &size)) + vl_set_global_memory_size (size * (1ULL << 30)); + else if (unformat (input, "global-size %lld", &size)) + vl_set_global_memory_size (size); + else if (unformat (input, "global-pvt-heap-size %lldM", &pvt_heap_size)) + vl_set_global_pvt_heap_size (pvt_heap_size * (1ULL << 20)); + else if (unformat (input, "global-pvt-heap-size size %lld", + &pvt_heap_size)) + vl_set_global_pvt_heap_size (pvt_heap_size); + else if (unformat (input, "api-pvt-heap-size %lldM", &pvt_heap_size)) + vl_set_api_pvt_heap_size (pvt_heap_size * (1ULL << 20)); + else if (unformat (input, "api-pvt-heap-size size %lld", + &pvt_heap_size)) + vl_set_api_pvt_heap_size (pvt_heap_size); + else if (unformat (input, "api-size %lldM", &size)) + vl_set_api_memory_size (size * (1ULL << 20)); + else if (unformat (input, "api-size %lldG", &size)) + vl_set_api_memory_size (size * (1ULL << 30)); + else if (unformat (input, "api-size %lld", &size)) + vl_set_api_memory_size (size); + else if (unformat (input, "uid %s", &s)) + { + /* lookup the username */ + pw = NULL; + while (((rv = + getpwnam_r (s, &_pw, buf, vec_len (buf), &pw)) == ERANGE) + && (vec_len (buf) <= max_buf_size)) + { + vec_resize (buf, vec_len (buf) * 2); + } + if (rv < 0) + { + e = clib_error_return_code (0, rv, + CLIB_ERROR_ERRNO_VALID | + CLIB_ERROR_FATAL, + "cannot fetch username %s", s); + vec_free (s); + vec_free (buf); + return e; + } + if (pw == NULL) + { + e = + clib_error_return_fatal (0, "username %s does not exist", s); + vec_free (s); + vec_free (buf); + return e; + } + vec_free (s); + vl_set_memory_uid (pw->pw_uid); + } + else if (unformat (input, "gid %s", &s)) + { + /* lookup the group name */ + grp = NULL; + while (((rv = + getgrnam_r (s, &_grp, buf, vec_len (buf), &grp)) == ERANGE) + && (vec_len (buf) <= max_buf_size)) + { + vec_resize (buf, vec_len (buf) * 2); + } + if (rv != 0) + { + e = clib_error_return_code (0, rv, + CLIB_ERROR_ERRNO_VALID | + CLIB_ERROR_FATAL, + "cannot fetch group %s", s); + vec_free (s); + vec_free (buf); + return e; + } + if (grp == NULL) + { + e = clib_error_return_fatal (0, "group %s does not exist", s); + vec_free (s); + vec_free (buf); + return e; + } + vec_free (s); + vec_free (buf); + vl_set_memory_gid (grp->gr_gid); + } + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + return 0; +} + +VLIB_EARLY_CONFIG_FUNCTION (api_segment_config, "api-segment"); + +void * +get_unformat_vnet_sw_interface (void) +{ + return (void *) &unformat_vnet_sw_interface; +} + +static u8 * +format_arp_event (u8 * s, va_list * args) +{ + vl_api_ip4_arp_event_t *event = va_arg (*args, vl_api_ip4_arp_event_t *); + + s = format (s, "pid %d: ", event->pid); + if (event->mac_ip) + s = format (s, "bd mac/ip4 binding events"); + else + s = format (s, "resolution for %U", format_ip4_address, &event->address); + return s; +} + +static u8 * +format_nd_event (u8 * s, va_list * args) +{ + vl_api_ip6_nd_event_t *event = va_arg (*args, vl_api_ip6_nd_event_t *); + + s = format (s, "pid %d: ", event->pid); + if (event->mac_ip) + s = format (s, "bd mac/ip6 binding events"); + else + s = format (s, "resolution for %U", format_ip6_address, event->address); + return s; +} + +static clib_error_t * +show_ip_arp_nd_events_fn (vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) +{ + vpe_api_main_t *am = &vpe_api_main; + vl_api_ip4_arp_event_t *arp_event; + vl_api_ip6_nd_event_t *nd_event; + + if ((pool_elts (am->arp_events) == 0) && (pool_elts (am->nd_events) == 0)) + { + vlib_cli_output (vm, "No active arp or nd event registrations"); + return 0; + } + + /* *INDENT-OFF* */ + pool_foreach (arp_event, am->arp_events, + ({ + vlib_cli_output (vm, "%U", format_arp_event, arp_event); + })); + + pool_foreach (nd_event, am->nd_events, + ({ + vlib_cli_output (vm, "%U", format_nd_event, nd_event); + })); + /* *INDENT-ON* */ + + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_ip_arp_nd_events, static) = { + .path = "show arp-nd-event registrations", + .function = show_ip_arp_nd_events_fn, + .short_help = "Show ip4 arp and ip6 nd event registrations", +}; +/* *INDENT-ON* */ + +#define vl_msg_name_crc_list +#include +#undef vl_msg_name_crc_list + +static void +setup_message_id_table (api_main_t * am) +{ +#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id); + foreach_vl_msg_name_crc_memclnt; + foreach_vl_msg_name_crc_vpe; +#undef _ +} + + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ -- cgit 1.2.3-korg From 738f3f2a170bace45180bc8718d5a7e75939b275 Mon Sep 17 00:00:00 2001 From: Pavel Kotucek Date: Mon, 9 Jan 2017 15:11:03 +0100 Subject: API refactoring : dpdk Change-Id: If2541be803a0303401b013390e117c26fd1d9739 Signed-off-by: Pavel Kotucek --- src/vat/api_format.c | 119 ++++++++++++++++--- src/vnet.am | 8 +- src/vnet/devices/dpdk/dpdk.api | 103 ++++++++++++++++ src/vnet/devices/dpdk/dpdk_api.c | 246 +++++++++++++++++++++++++++++++++++++++ src/vnet/vnet_all_api_h.h | 3 + src/vpp/api/api.c | 153 ------------------------ src/vpp/api/custom_dump.c | 17 ++- src/vpp/api/vpe.api | 83 +------------ 8 files changed, 475 insertions(+), 257 deletions(-) create mode 100644 src/vnet/devices/dpdk/dpdk.api create mode 100644 src/vnet/devices/dpdk/dpdk_api.c (limited to 'src/vpp/api/api.c') diff --git a/src/vat/api_format.c b/src/vat/api_format.c index e6c0f244..c00104de 100644 --- a/src/vat/api_format.c +++ b/src/vat/api_format.c @@ -3569,9 +3569,6 @@ _(sw_interface_set_mpls_enable_reply) \ _(sw_interface_set_vpath_reply) \ _(sw_interface_set_vxlan_bypass_reply) \ _(sw_interface_set_l2_bridge_reply) \ -_(sw_interface_set_dpdk_hqos_pipe_reply) \ -_(sw_interface_set_dpdk_hqos_subport_reply) \ -_(sw_interface_set_dpdk_hqos_tctbl_reply) \ _(bridge_domain_add_del_reply) \ _(sw_interface_set_l2_xconnect_reply) \ _(l2fib_add_del_reply) \ @@ -3671,6 +3668,13 @@ _(feature_enable_disable_reply) \ _(sw_interface_tag_add_del_reply) \ _(sw_interface_set_mtu_reply) +#if DPDK > 0 +#define foreach_standard_dpdk_reply_retval_handler \ +_(sw_interface_set_dpdk_hqos_pipe_reply) \ +_(sw_interface_set_dpdk_hqos_subport_reply) \ +_(sw_interface_set_dpdk_hqos_tctbl_reply) +#endif + #define _(n) \ static void vl_api_##n##_t_handler \ (vl_api_##n##_t * mp) \ @@ -3702,6 +3706,39 @@ foreach_standard_reply_retval_handler; foreach_standard_reply_retval_handler; #undef _ +#if DPDK > 0 +#define _(n) \ + static void vl_api_##n##_t_handler \ + (vl_api_##n##_t * mp) \ + { \ + vat_main_t * vam = &vat_main; \ + i32 retval = ntohl(mp->retval); \ + if (vam->async_mode) { \ + vam->async_errors += (retval < 0); \ + } else { \ + vam->retval = retval; \ + vam->result_ready = 1; \ + } \ + } +foreach_standard_dpdk_reply_retval_handler; +#undef _ + +#define _(n) \ + static void vl_api_##n##_t_handler_json \ + (vl_api_##n##_t * mp) \ + { \ + vat_main_t * vam = &vat_main; \ + vat_json_node_t node; \ + vat_json_init_object(&node); \ + vat_json_object_add_int(&node, "retval", ntohl(mp->retval)); \ + vat_json_print(vam->ofp, &node); \ + vam->retval = ntohl(mp->retval); \ + vam->result_ready = 1; \ + } +foreach_standard_dpdk_reply_retval_handler; +#undef _ +#endif + /* * Table of message reply handlers, must include boilerplate handlers * we just generated @@ -3725,12 +3762,6 @@ _(SW_INTERFACE_SET_L2_XCONNECT_REPLY, \ sw_interface_set_l2_xconnect_reply) \ _(SW_INTERFACE_SET_L2_BRIDGE_REPLY, \ sw_interface_set_l2_bridge_reply) \ -_(SW_INTERFACE_SET_DPDK_HQOS_PIPE_REPLY, \ - sw_interface_set_dpdk_hqos_pipe_reply) \ -_(SW_INTERFACE_SET_DPDK_HQOS_SUBPORT_REPLY, \ - sw_interface_set_dpdk_hqos_subport_reply) \ -_(SW_INTERFACE_SET_DPDK_HQOS_TCTBL_REPLY, \ - sw_interface_set_dpdk_hqos_tctbl_reply) \ _(BRIDGE_DOMAIN_ADD_DEL_REPLY, bridge_domain_add_del_reply) \ _(BRIDGE_DOMAIN_DETAILS, bridge_domain_details) \ _(BRIDGE_DOMAIN_SW_IF_DETAILS, bridge_domain_sw_if_details) \ @@ -3924,6 +3955,16 @@ _(SW_INTERFACE_SET_MTU_REPLY, sw_interface_set_mtu_reply) \ _(IP_NEIGHBOR_DETAILS, ip_neighbor_details) \ _(SW_INTERFACE_GET_TABLE_REPLY, sw_interface_get_table_reply) +#if DPDK > 0 +#define foreach_vpe_dpdk_api_reply_msg \ +_(SW_INTERFACE_SET_DPDK_HQOS_PIPE_REPLY, \ + sw_interface_set_dpdk_hqos_pipe_reply) \ +_(SW_INTERFACE_SET_DPDK_HQOS_SUBPORT_REPLY, \ + sw_interface_set_dpdk_hqos_subport_reply) \ +_(SW_INTERFACE_SET_DPDK_HQOS_TCTBL_REPLY, \ + sw_interface_set_dpdk_hqos_tctbl_reply) +#endif + /* M: construct, but don't yet send a message */ #define M(T,t) \ @@ -4724,6 +4765,7 @@ api_sw_interface_clear_stats (vat_main_t * vam) W; } +#if DPDK >0 static int api_sw_interface_set_dpdk_hqos_pipe (vat_main_t * vam) { @@ -4944,6 +4986,7 @@ api_sw_interface_set_dpdk_hqos_tctbl (vat_main_t * vam) /* NOTREACHED */ return 0; } +#endif static int api_sw_interface_add_del_address (vat_main_t * vam) @@ -17434,14 +17477,6 @@ _(sw_interface_set_l2_bridge, \ " | sw_if_index bd_id \n" \ "[shg ] [bvi]\n" \ "enable | disable") \ -_(sw_interface_set_dpdk_hqos_pipe, \ - "rx | sw_if_index subport pipe \n" \ - "profile \n") \ -_(sw_interface_set_dpdk_hqos_subport, \ - "rx | sw_if_index subport [rate ]\n" \ - "[bktsize ] [tc0 ] [tc1 ] [tc2 ] [tc3 ] [period ]\n") \ -_(sw_interface_set_dpdk_hqos_tctbl, \ - "rx | sw_if_index entry tc queue \n") \ _(bridge_domain_add_del, \ "bd_id [flood 1|0] [uu-flood 1|0] [forward 1|0] [learn 1|0] [arp-term 1|0] [del]\n") \ _(bridge_domain_dump, "[bd_id ]\n") \ @@ -17739,6 +17774,18 @@ _(sw_interface_set_mtu, " | sw_if_index mtu ") \ _(ip_neighbor_dump, "[ip6] | sw_if_index ") \ _(sw_interface_get_table, " | sw_if_index [ipv6]") +#if DPDK > 0 +#define foreach_vpe_dpdk_api_msg \ +_(sw_interface_set_dpdk_hqos_pipe, \ + "rx | sw_if_index subport pipe \n" \ + "profile \n") \ +_(sw_interface_set_dpdk_hqos_subport, \ + "rx | sw_if_index subport [rate ]\n" \ + "[bktsize ] [tc0 ] [tc1 ] [tc2 ] [tc3 ] [period ]\n") \ +_(sw_interface_set_dpdk_hqos_tctbl, \ + "rx | sw_if_index entry tc queue \n") +#endif + /* List of command functions, CLI names map directly to functions */ #define foreach_cli_function \ _(comment, "usage: comment ") \ @@ -17776,6 +17823,22 @@ _(unset, "usage: unset ") foreach_vpe_api_reply_msg; #undef _ +#if DPDK > 0 +#define _(N,n) \ + static void vl_api_##n##_t_handler_uni \ + (vl_api_##n##_t * mp) \ + { \ + vat_main_t * vam = &vat_main; \ + if (vam->json_output) { \ + vl_api_##n##_t_handler_json(mp); \ + } else { \ + vl_api_##n##_t_handler(mp); \ + } \ + } +foreach_vpe_dpdk_api_reply_msg; +#undef _ +#endif + void vat_api_hookup (vat_main_t * vam) { @@ -17789,6 +17852,18 @@ vat_api_hookup (vat_main_t * vam) foreach_vpe_api_reply_msg; #undef _ +#if DPDK > 0 +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler_uni, \ + vl_noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 1); + foreach_vpe_dpdk_api_reply_msg; +#undef _ +#endif + #if (VPP_API_TEST_BUILTIN==0) vl_msg_api_set_first_available_msg_id (VL_MSG_FIRST_AVAILABLE); #endif @@ -17803,11 +17878,21 @@ vat_api_hookup (vat_main_t * vam) #define _(n,h) hash_set_mem (vam->function_by_name, #n, api_##n); foreach_vpe_api_msg; #undef _ +#if DPDK >0 +#define _(n,h) hash_set_mem (vam->function_by_name, #n, api_##n); + foreach_vpe_dpdk_api_msg; +#undef _ +#endif /* Help strings */ #define _(n,h) hash_set_mem (vam->help_by_name, #n, h); foreach_vpe_api_msg; #undef _ +#if DPDK >0 +#define _(n,h) hash_set_mem (vam->help_by_name, #n, h); + foreach_vpe_dpdk_api_msg; +#undef _ +#endif /* CLI functions */ #define _(n,h) hash_set_mem (vam->function_by_name, #n, n); diff --git a/src/vnet.am b/src/vnet.am index 16ade4d1..bc0820a3 100644 --- a/src/vnet.am +++ b/src/vnet.am @@ -759,10 +759,14 @@ libvnet_la_SOURCES += \ vnet/devices/dpdk/init.c \ vnet/devices/dpdk/node.c \ vnet/devices/dpdk/hqos.c \ - vnet/devices/dpdk/cli.c + vnet/devices/dpdk/cli.c \ + vnet/devices/dpdk/dpdk_api.c nobase_include_HEADERS += \ - vnet/devices/dpdk/dpdk.h + vnet/devices/dpdk/dpdk.h \ + vnet/devices/dpdk/dpdk.api.h + +API_FILES += vnet/devices/dpdk/dpdk.api else libvnet_la_SOURCES += \ vnet/devices/nic/ixge.c \ diff --git a/src/vnet/devices/dpdk/dpdk.api b/src/vnet/devices/dpdk/dpdk.api new file mode 100644 index 00000000..21215d45 --- /dev/null +++ b/src/vnet/devices/dpdk/dpdk.api @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2015-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** \brief DPDK interface HQoS pipe profile set request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - the interface + @param subport - subport ID + @param pipe - pipe ID within its subport + @param profile - pipe profile ID +*/ +define sw_interface_set_dpdk_hqos_pipe { + u32 client_index; + u32 context; + u32 sw_if_index; + u32 subport; + u32 pipe; + u32 profile; +}; + +/** \brief DPDK interface HQoS pipe profile set reply + @param context - sender context, to match reply w/ request + @param retval - request return code +*/ +define sw_interface_set_dpdk_hqos_pipe_reply { + u32 context; + i32 retval; +}; + +/** \brief DPDK interface HQoS subport parameters set request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - the interface + @param subport - subport ID + @param tb_rate - subport token bucket rate (measured in bytes/second) + @param tb_size - subport token bucket size (measured in credits) + @param tc_rate - subport traffic class 0 .. 3 rates (measured in bytes/second) + @param tc_period - enforcement period for rates (measured in milliseconds) +*/ +define sw_interface_set_dpdk_hqos_subport { + u32 client_index; + u32 context; + u32 sw_if_index; + u32 subport; + u32 tb_rate; + u32 tb_size; + u32 tc_rate[4]; + u32 tc_period; +}; + +/** \brief DPDK interface HQoS subport parameters set reply + @param context - sender context, to match reply w/ request + @param retval - request return code +*/ +define sw_interface_set_dpdk_hqos_subport_reply { + u32 context; + i32 retval; +}; + +/** \brief DPDK interface HQoS tctbl entry set request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - the interface + @param entry - entry index ID + @param tc - traffic class (0 .. 3) + @param queue - traffic class queue (0 .. 3) +*/ +define sw_interface_set_dpdk_hqos_tctbl { + u32 client_index; + u32 context; + u32 sw_if_index; + u32 entry; + u32 tc; + u32 queue; +}; + +/** \brief DPDK interface HQoS tctbl entry set reply + @param context - sender context, to match reply w/ request + @param retval - request return code +*/ +define sw_interface_set_dpdk_hqos_tctbl_reply { + u32 context; + i32 retval; +}; + +/* + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ + \ No newline at end of file diff --git a/src/vnet/devices/dpdk/dpdk_api.c b/src/vnet/devices/dpdk/dpdk_api.c new file mode 100644 index 00000000..8faf5c2c --- /dev/null +++ b/src/vnet/devices/dpdk/dpdk_api.c @@ -0,0 +1,246 @@ +/* + *------------------------------------------------------------------ + * dpdk_api.c - dpdk interface api + * + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include + +#if DPDK > 0 +#include +#endif + +#include + +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs + +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) +#define vl_printfun +#include +#undef vl_printfun + +#include + +#define foreach_vpe_api_msg \ +_(SW_INTERFACE_SET_DPDK_HQOS_PIPE, sw_interface_set_dpdk_hqos_pipe) \ +_(SW_INTERFACE_SET_DPDK_HQOS_SUBPORT, sw_interface_set_dpdk_hqos_subport) \ +_(SW_INTERFACE_SET_DPDK_HQOS_TCTBL, sw_interface_set_dpdk_hqos_tctbl) + +static void + vl_api_sw_interface_set_dpdk_hqos_pipe_t_handler + (vl_api_sw_interface_set_dpdk_hqos_pipe_t * mp) +{ + vl_api_sw_interface_set_dpdk_hqos_pipe_reply_t *rmp; + int rv = 0; + +#if DPDK > 0 + dpdk_main_t *dm = &dpdk_main; + dpdk_device_t *xd; + + u32 sw_if_index = ntohl (mp->sw_if_index); + u32 subport = ntohl (mp->subport); + u32 pipe = ntohl (mp->pipe); + u32 profile = ntohl (mp->profile); + vnet_hw_interface_t *hw; + + VALIDATE_SW_IF_INDEX (mp); + + /* hw_if & dpdk device */ + hw = vnet_get_sup_hw_interface (dm->vnet_main, sw_if_index); + + xd = vec_elt_at_index (dm->devices, hw->dev_instance); + + rv = rte_sched_pipe_config (xd->hqos_ht->hqos, subport, pipe, profile); + + BAD_SW_IF_INDEX_LABEL; +#else + clib_warning ("setting HQoS pipe parameters without DPDK not implemented"); + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif /* DPDK */ + + REPLY_MACRO (VL_API_SW_INTERFACE_SET_DPDK_HQOS_PIPE_REPLY); +} + +static void + vl_api_sw_interface_set_dpdk_hqos_subport_t_handler + (vl_api_sw_interface_set_dpdk_hqos_subport_t * mp) +{ + vl_api_sw_interface_set_dpdk_hqos_subport_reply_t *rmp; + int rv = 0; + +#if DPDK > 0 + dpdk_main_t *dm = &dpdk_main; + dpdk_device_t *xd; + struct rte_sched_subport_params p; + + u32 sw_if_index = ntohl (mp->sw_if_index); + u32 subport = ntohl (mp->subport); + p.tb_rate = ntohl (mp->tb_rate); + p.tb_size = ntohl (mp->tb_size); + p.tc_rate[0] = ntohl (mp->tc_rate[0]); + p.tc_rate[1] = ntohl (mp->tc_rate[1]); + p.tc_rate[2] = ntohl (mp->tc_rate[2]); + p.tc_rate[3] = ntohl (mp->tc_rate[3]); + p.tc_period = ntohl (mp->tc_period); + + vnet_hw_interface_t *hw; + + VALIDATE_SW_IF_INDEX (mp); + + /* hw_if & dpdk device */ + hw = vnet_get_sup_hw_interface (dm->vnet_main, sw_if_index); + + xd = vec_elt_at_index (dm->devices, hw->dev_instance); + + rv = rte_sched_subport_config (xd->hqos_ht->hqos, subport, &p); + + BAD_SW_IF_INDEX_LABEL; +#else + clib_warning + ("setting HQoS subport parameters without DPDK not implemented"); + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif /* DPDK */ + + REPLY_MACRO (VL_API_SW_INTERFACE_SET_DPDK_HQOS_SUBPORT_REPLY); +} + +static void + vl_api_sw_interface_set_dpdk_hqos_tctbl_t_handler + (vl_api_sw_interface_set_dpdk_hqos_tctbl_t * mp) +{ + vl_api_sw_interface_set_dpdk_hqos_tctbl_reply_t *rmp; + int rv = 0; + +#if DPDK > 0 + dpdk_main_t *dm = &dpdk_main; + vlib_thread_main_t *tm = vlib_get_thread_main (); + dpdk_device_t *xd; + + u32 sw_if_index = ntohl (mp->sw_if_index); + u32 entry = ntohl (mp->entry); + u32 tc = ntohl (mp->tc); + u32 queue = ntohl (mp->queue); + u32 val, i; + + vnet_hw_interface_t *hw; + + VALIDATE_SW_IF_INDEX (mp); + + /* hw_if & dpdk device */ + hw = vnet_get_sup_hw_interface (dm->vnet_main, sw_if_index); + + xd = vec_elt_at_index (dm->devices, hw->dev_instance); + + if (tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) + { + clib_warning ("invalid traffic class !!"); + rv = VNET_API_ERROR_INVALID_VALUE; + goto done; + } + if (queue >= RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS) + { + clib_warning ("invalid queue !!"); + rv = VNET_API_ERROR_INVALID_VALUE; + goto done; + } + + /* Detect the set of worker threads */ + uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers"); + + if (p == 0) + { + clib_warning ("worker thread registration AWOL !!"); + rv = VNET_API_ERROR_INVALID_VALUE_2; + goto done; + } + + vlib_thread_registration_t *tr = (vlib_thread_registration_t *) p[0]; + int worker_thread_first = tr->first_index; + int worker_thread_count = tr->count; + + val = tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue; + for (i = 0; i < worker_thread_count; i++) + xd->hqos_wt[worker_thread_first + i].hqos_tc_table[entry] = val; + + BAD_SW_IF_INDEX_LABEL; +done: +#else + clib_warning ("setting HQoS DSCP table entry without DPDK not implemented"); + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif /* DPDK */ + + REPLY_MACRO (VL_API_SW_INTERFACE_SET_DPDK_HQOS_TCTBL_REPLY); +} + +/* + * dpdk_api_hookup + * Add vpe's API message handlers to the table. + * vlib has alread mapped shared memory and + * added the client registration handlers. + * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process() + */ +#define vl_msg_name_crc_list +#include +#undef vl_msg_name_crc_list + +static void +setup_message_id_table (api_main_t * am) +{ +#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id); + foreach_vl_msg_name_crc_dpdk; +#undef _ +} + +static clib_error_t * +dpdk_api_hookup (vlib_main_t * vm) +{ + api_main_t *am = &api_main; + +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler, \ + vl_noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 1); + foreach_vpe_api_msg; +#undef _ + + /* + * Set up the (msg_name, crc, message-id) table + */ + setup_message_id_table (am); + + return 0; +} + +VLIB_API_INIT_FUNCTION (dpdk_api_hookup); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/vnet_all_api_h.h b/src/vnet/vnet_all_api_h.h index 1b4d6c45..d48e1540 100644 --- a/src/vnet/vnet_all_api_h.h +++ b/src/vnet/vnet_all_api_h.h @@ -30,6 +30,9 @@ #endif /* included_from_layer_3 */ #include +#if DPDK > 0 +#include +#endif #include #include #include diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c index 6289249c..46e28e9d 100644 --- a/src/vpp/api/api.c +++ b/src/vpp/api/api.c @@ -86,10 +86,6 @@ #undef __included_bihash_template_h__ #include -#if DPDK > 0 -#include -#endif - #include #include @@ -131,9 +127,6 @@ _(SW_INTERFACE_SET_VPATH, sw_interface_set_vpath) \ _(SW_INTERFACE_SET_VXLAN_BYPASS, sw_interface_set_vxlan_bypass) \ _(SW_INTERFACE_SET_L2_XCONNECT, sw_interface_set_l2_xconnect) \ _(SW_INTERFACE_SET_L2_BRIDGE, sw_interface_set_l2_bridge) \ -_(SW_INTERFACE_SET_DPDK_HQOS_PIPE, sw_interface_set_dpdk_hqos_pipe) \ -_(SW_INTERFACE_SET_DPDK_HQOS_SUBPORT, sw_interface_set_dpdk_hqos_subport) \ -_(SW_INTERFACE_SET_DPDK_HQOS_TCTBL, sw_interface_set_dpdk_hqos_tctbl) \ _(BRIDGE_DOMAIN_ADD_DEL, bridge_domain_add_del) \ _(BRIDGE_DOMAIN_DUMP, bridge_domain_dump) \ _(BRIDGE_DOMAIN_DETAILS, bridge_domain_details) \ @@ -697,152 +690,6 @@ static void REPLY_MACRO (VL_API_SW_INTERFACE_SET_L2_BRIDGE_REPLY); } -static void - vl_api_sw_interface_set_dpdk_hqos_pipe_t_handler - (vl_api_sw_interface_set_dpdk_hqos_pipe_t * mp) -{ - vl_api_sw_interface_set_dpdk_hqos_pipe_reply_t *rmp; - int rv = 0; - -#if DPDK > 0 - dpdk_main_t *dm = &dpdk_main; - dpdk_device_t *xd; - - u32 sw_if_index = ntohl (mp->sw_if_index); - u32 subport = ntohl (mp->subport); - u32 pipe = ntohl (mp->pipe); - u32 profile = ntohl (mp->profile); - vnet_hw_interface_t *hw; - - VALIDATE_SW_IF_INDEX (mp); - - /* hw_if & dpdk device */ - hw = vnet_get_sup_hw_interface (dm->vnet_main, sw_if_index); - - xd = vec_elt_at_index (dm->devices, hw->dev_instance); - - rv = rte_sched_pipe_config (xd->hqos_ht->hqos, subport, pipe, profile); - - BAD_SW_IF_INDEX_LABEL; -#else - clib_warning ("setting HQoS pipe parameters without DPDK not implemented"); - rv = VNET_API_ERROR_UNIMPLEMENTED; -#endif /* DPDK */ - - REPLY_MACRO (VL_API_SW_INTERFACE_SET_DPDK_HQOS_PIPE_REPLY); -} - -static void - vl_api_sw_interface_set_dpdk_hqos_subport_t_handler - (vl_api_sw_interface_set_dpdk_hqos_subport_t * mp) -{ - vl_api_sw_interface_set_dpdk_hqos_subport_reply_t *rmp; - int rv = 0; - -#if DPDK > 0 - dpdk_main_t *dm = &dpdk_main; - dpdk_device_t *xd; - struct rte_sched_subport_params p; - - u32 sw_if_index = ntohl (mp->sw_if_index); - u32 subport = ntohl (mp->subport); - p.tb_rate = ntohl (mp->tb_rate); - p.tb_size = ntohl (mp->tb_size); - p.tc_rate[0] = ntohl (mp->tc_rate[0]); - p.tc_rate[1] = ntohl (mp->tc_rate[1]); - p.tc_rate[2] = ntohl (mp->tc_rate[2]); - p.tc_rate[3] = ntohl (mp->tc_rate[3]); - p.tc_period = ntohl (mp->tc_period); - - vnet_hw_interface_t *hw; - - VALIDATE_SW_IF_INDEX (mp); - - /* hw_if & dpdk device */ - hw = vnet_get_sup_hw_interface (dm->vnet_main, sw_if_index); - - xd = vec_elt_at_index (dm->devices, hw->dev_instance); - - rv = rte_sched_subport_config (xd->hqos_ht->hqos, subport, &p); - - BAD_SW_IF_INDEX_LABEL; -#else - clib_warning - ("setting HQoS subport parameters without DPDK not implemented"); - rv = VNET_API_ERROR_UNIMPLEMENTED; -#endif /* DPDK */ - - REPLY_MACRO (VL_API_SW_INTERFACE_SET_DPDK_HQOS_SUBPORT_REPLY); -} - -static void - vl_api_sw_interface_set_dpdk_hqos_tctbl_t_handler - (vl_api_sw_interface_set_dpdk_hqos_tctbl_t * mp) -{ - vl_api_sw_interface_set_dpdk_hqos_tctbl_reply_t *rmp; - int rv = 0; - -#if DPDK > 0 - dpdk_main_t *dm = &dpdk_main; - vlib_thread_main_t *tm = vlib_get_thread_main (); - dpdk_device_t *xd; - - u32 sw_if_index = ntohl (mp->sw_if_index); - u32 entry = ntohl (mp->entry); - u32 tc = ntohl (mp->tc); - u32 queue = ntohl (mp->queue); - u32 val, i; - - vnet_hw_interface_t *hw; - - VALIDATE_SW_IF_INDEX (mp); - - /* hw_if & dpdk device */ - hw = vnet_get_sup_hw_interface (dm->vnet_main, sw_if_index); - - xd = vec_elt_at_index (dm->devices, hw->dev_instance); - - if (tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) - { - clib_warning ("invalid traffic class !!"); - rv = VNET_API_ERROR_INVALID_VALUE; - goto done; - } - if (queue >= RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS) - { - clib_warning ("invalid queue !!"); - rv = VNET_API_ERROR_INVALID_VALUE; - goto done; - } - - /* Detect the set of worker threads */ - uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers"); - - if (p == 0) - { - clib_warning ("worker thread registration AWOL !!"); - rv = VNET_API_ERROR_INVALID_VALUE_2; - goto done; - } - - vlib_thread_registration_t *tr = (vlib_thread_registration_t *) p[0]; - int worker_thread_first = tr->first_index; - int worker_thread_count = tr->count; - - val = tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue; - for (i = 0; i < worker_thread_count; i++) - xd->hqos_wt[worker_thread_first + i].hqos_tc_table[entry] = val; - - BAD_SW_IF_INDEX_LABEL; -done: -#else - clib_warning ("setting HQoS DSCP table entry without DPDK not implemented"); - rv = VNET_API_ERROR_UNIMPLEMENTED; -#endif /* DPDK */ - - REPLY_MACRO (VL_API_SW_INTERFACE_SET_DPDK_HQOS_TCTBL_REPLY); -} - static void vl_api_bridge_domain_add_del_t_handler (vl_api_bridge_domain_add_del_t * mp) { diff --git a/src/vpp/api/custom_dump.c b/src/vpp/api/custom_dump.c index 1964533e..c2cd3d15 100644 --- a/src/vpp/api/custom_dump.c +++ b/src/vpp/api/custom_dump.c @@ -238,6 +238,7 @@ static void *vl_api_sw_interface_set_l2_bridge_t_print FINISH; } +#if DPDK > 0 static void *vl_api_sw_interface_set_dpdk_hqos_pipe_t_print (vl_api_sw_interface_set_dpdk_hqos_pipe_t * mp, void *handle) { @@ -287,6 +288,7 @@ static void *vl_api_sw_interface_set_dpdk_hqos_tctbl_t_print FINISH; } +#endif static void *vl_api_bridge_domain_add_del_t_print (vl_api_bridge_domain_add_del_t * mp, void *handle) @@ -3002,9 +3004,6 @@ _(BRIDGE_FLAGS, bridge_flags) \ _(CLASSIFY_ADD_DEL_TABLE, classify_add_del_table) \ _(CLASSIFY_ADD_DEL_SESSION, classify_add_del_session) \ _(SW_INTERFACE_SET_L2_BRIDGE, sw_interface_set_l2_bridge) \ -_(SW_INTERFACE_SET_DPDK_HQOS_PIPE, sw_interface_set_dpdk_hqos_pipe) \ -_(SW_INTERFACE_SET_DPDK_HQOS_SUBPORT, sw_interface_set_dpdk_hqos_subport)\ -_(SW_INTERFACE_SET_DPDK_HQOS_TCTBL, sw_interface_set_dpdk_hqos_tctbl) \ _(BRIDGE_DOMAIN_ADD_DEL, bridge_domain_add_del) \ _(BRIDGE_DOMAIN_DUMP, bridge_domain_dump) \ _(CLASSIFY_SET_INTERFACE_IP_TABLE, classify_set_interface_ip_table) \ @@ -3128,6 +3127,18 @@ vl_msg_api_custom_dump_configure (api_main_t * am) = (void *) vl_api_##f##_t_print; foreach_custom_print_function; #undef _ + +#if DPDK > 0 + /* + * manually add DPDK hqos print handlers + */ + am->msg_print_handlers[VL_API_SW_INTERFACE_SET_DPDK_HQOS_PIPE] = + (void *) vl_api_sw_interface_set_dpdk_hqos_pipe_t_print; + am->msg_print_handlers[VL_API_SW_INTERFACE_SET_DPDK_HQOS_SUBPORT] = + (void *) vl_api_sw_interface_set_dpdk_hqos_subport_t_print; + am->msg_print_handlers[VL_API_SW_INTERFACE_SET_DPDK_HQOS_TCTBL] = + (void *) vl_api_sw_interface_set_dpdk_hqos_tctbl_t_print; +#endif } /* diff --git a/src/vpp/api/vpe.api b/src/vpp/api/vpe.api index abd0e8f1..3e4bcdf9 100644 --- a/src/vpp/api/vpe.api +++ b/src/vpp/api/vpe.api @@ -37,6 +37,7 @@ * IPSEC-GRE APIs: see .../vnet/vnet/ipsec-gre/{ipsec_gre.api, ipsec_gre_api.c} * LISP APIs: see .../vnet/vnet/lisp/{lisp.api, lisp_api.c} * LISP-GPE APIs: see .../vnet/vnet/lisp-gpe/{lisp_gpe.api, lisp_gpe_api.c} + * DPDK APIs: ... see /src/vnet/devices/dpdk/{dpdk.api, dpdk_api.c} */ /** \brief Create a new subinterface with the given vlan id @@ -2606,88 +2607,6 @@ define delete_subif_reply { i32 retval; }; -/** \brief DPDK interface HQoS pipe profile set request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param sw_if_index - the interface - @param subport - subport ID - @param pipe - pipe ID within its subport - @param profile - pipe profile ID -*/ -define sw_interface_set_dpdk_hqos_pipe { - u32 client_index; - u32 context; - u32 sw_if_index; - u32 subport; - u32 pipe; - u32 profile; -}; - -/** \brief DPDK interface HQoS pipe profile set reply - @param context - sender context, to match reply w/ request - @param retval - request return code -*/ -define sw_interface_set_dpdk_hqos_pipe_reply { - u32 context; - i32 retval; -}; - -/** \brief DPDK interface HQoS subport parameters set request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param sw_if_index - the interface - @param subport - subport ID - @param tb_rate - subport token bucket rate (measured in bytes/second) - @param tb_size - subport token bucket size (measured in credits) - @param tc_rate - subport traffic class 0 .. 3 rates (measured in bytes/second) - @param tc_period - enforcement period for rates (measured in milliseconds) -*/ -define sw_interface_set_dpdk_hqos_subport { - u32 client_index; - u32 context; - u32 sw_if_index; - u32 subport; - u32 tb_rate; - u32 tb_size; - u32 tc_rate[4]; - u32 tc_period; -}; - -/** \brief DPDK interface HQoS subport parameters set reply - @param context - sender context, to match reply w/ request - @param retval - request return code -*/ -define sw_interface_set_dpdk_hqos_subport_reply { - u32 context; - i32 retval; -}; - -/** \brief DPDK interface HQoS tctbl entry set request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param sw_if_index - the interface - @param entry - entry index ID - @param tc - traffic class (0 .. 3) - @param queue - traffic class queue (0 .. 3) -*/ -define sw_interface_set_dpdk_hqos_tctbl { - u32 client_index; - u32 context; - u32 sw_if_index; - u32 entry; - u32 tc; - u32 queue; -}; - -/** \brief DPDK interface HQoS tctbl entry set reply - @param context - sender context, to match reply w/ request - @param retval - request return code -*/ -define sw_interface_set_dpdk_hqos_tctbl_reply { - u32 context; - i32 retval; -}; - /** \brief L2 interface pbb tag rewrite configure request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request -- cgit 1.2.3-korg From 0f971d8c22adf89d3f8592ac0d207727f2b1a23a Mon Sep 17 00:00:00 2001 From: Pavel Kotucek Date: Tue, 3 Jan 2017 10:48:54 +0100 Subject: API refactoring : l2, mpls, sr Change-Id: Ic5f273dae607a1d3902489e65734c76f027dc30f Signed-off-by: Pavel Kotucek --- src/vnet.am | 15 +- src/vnet/l2/l2.api | 229 +++++++++++ src/vnet/l2/l2_api.c | 373 +++++++++++++++++- src/vnet/mpls/mpls.api | 246 ++++++++++++ src/vnet/mpls/mpls_api.c | 497 ++++++++++++++++++++++++ src/vnet/sr/sr.api | 119 ++++++ src/vnet/sr/sr_api.c | 279 ++++++++++++++ src/vnet/vnet_all_api_h.h | 2 + src/vpp/api/api.c | 947 +--------------------------------------------- src/vpp/api/vpe.api | 574 +--------------------------- 10 files changed, 1770 insertions(+), 1511 deletions(-) create mode 100644 src/vnet/mpls/mpls.api create mode 100644 src/vnet/mpls/mpls_api.c create mode 100644 src/vnet/sr/sr.api create mode 100644 src/vnet/sr/sr_api.c (limited to 'src/vpp/api/api.c') diff --git a/src/vnet.am b/src/vnet.am index bc0820a3..bca56227 100644 --- a/src/vnet.am +++ b/src/vnet.am @@ -498,15 +498,18 @@ libvnet_la_SOURCES += \ vnet/mpls/node.c \ vnet/mpls/interface.c \ vnet/mpls/mpls_tunnel.c \ - vnet/mpls/pg.c + vnet/mpls/pg.c \ + vnet/mpls/mpls_api.c nobase_include_HEADERS += \ vnet/mpls/mpls.h \ vnet/mpls/mpls_types.h \ vnet/mpls/mpls_tunnel.h \ vnet/mpls/packet.h \ - vnet/mpls/error.def + vnet/mpls/error.def \ + vnet/mpls/mpls.api.h +API_FILES += vnet/mpls/mpls.api ######################################## # Tunnel protocol: vxlan-gpe @@ -666,13 +669,17 @@ nobase_include_HEADERS += \ if WITH_IPV6SR libvnet_la_SOURCES += \ vnet/sr/sr.c \ - vnet/sr/sr_replicate.c + vnet/sr/sr_replicate.c \ + vnet/sr/sr_api.c endif nobase_include_HEADERS += \ vnet/sr/sr_packet.h \ vnet/sr/sr_error.def \ - vnet/sr/sr.h + vnet/sr/sr.h \ + vnet/sr/sr.api.h + +API_FILES += vnet/sr/sr.api ######################################## # DHCPv6 proxy diff --git a/src/vnet/l2/l2.api b/src/vnet/l2/l2.api index 5fce7944..5b24f259 100644 --- a/src/vnet/l2/l2.api +++ b/src/vnet/l2/l2.api @@ -36,3 +36,232 @@ define l2_xconnect_dump u32 context; }; +/** \brief l2 fib table entry structure + @param bd_id - the l2 fib / bridge domain table id + @param mac - the entry's mac address + @param sw_if_index - index of the interface + @param static_mac - the entry is statically configured. + @param filter_mac - the entry is a mac filter entry. + @param bvi_mac - the mac address is a bridge virtual interface +*/ +define l2_fib_table_entry +{ + u32 context; + u32 bd_id; + u64 mac; + u32 sw_if_index; + u8 static_mac; + u8 filter_mac; + u8 bvi_mac; +}; + +/** \brief Dump l2 fib (aka bridge domain) table + @param client_index - opaque cookie to identify the sender + @param bd_id - the l2 fib / bridge domain table identifier +*/ +define l2_fib_table_dump +{ + u32 client_index; + u32 context; + u32 bd_id; +}; + +/** \brief L2 fib clear table request, clear all mac entries in the l2 fib + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define l2_fib_clear_table +{ + u32 client_index; + u32 context; +}; + +/** \brief L2 fib clear table response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define l2_fib_clear_table_reply +{ + u32 context; + i32 retval; +}; + +/** \brief L2 FIB add entry request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param mac - the entry's mac address + @param bd_id - the entry's bridge domain id + @param sw_if_index - the interface + @param is_add - If non zero add the entry, else delete it + @param static_mac - + @param filter_mac - +*/ +define l2fib_add_del +{ + u32 client_index; + u32 context; + u64 mac; + u32 bd_id; + u32 sw_if_index; + u8 is_add; + u8 static_mac; + u8 filter_mac; + u8 bvi_mac; +}; + +/** \brief L2 FIB add entry response + @param context - sender context, to match reply w/ request + @param retval - return code for the add l2fib entry request +*/ +define l2fib_add_del_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Set L2 flags request !!! TODO - need more info, feature bits in l2_input.h + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - interface + @param is_set - if non-zero, set the bits, else clear them + @param feature_bitmap - non-zero bits to set or clear +*/ +define l2_flags +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u8 is_set; + u32 feature_bitmap; +}; + +/** \brief Set L2 bits response + @param context - sender context, to match reply w/ request + @param retval - return code for the set l2 bits request +*/ +define l2_flags_reply +{ + u32 context; + i32 retval; + u32 resulting_feature_bitmap; +}; + +/** \brief L2 bridge domain add or delete request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param bd_id - the bridge domain to create + @param flood - enable/disable bcast/mcast flooding in the bd + @param uu_flood - enable/disable uknown unicast flood in the bd + @param forward - enable/disable forwarding on all interfaces in the bd + @param learn - enable/disable learning on all interfaces in the bd + @param arp_term - enable/disable arp termination in the bd + @param mac_age - mac aging time in min, 0 for disabled + @param is_add - add or delete flag +*/ +define bridge_domain_add_del +{ + u32 client_index; + u32 context; + u32 bd_id; + u8 flood; + u8 uu_flood; + u8 forward; + u8 learn; + u8 arp_term; + u8 mac_age; + u8 is_add; +}; + +/** \brief L2 bridge domain add or delete response + @param context - sender context, to match reply w/ request + @param retval - return code for the set bridge flags request +*/ +define bridge_domain_add_del_reply +{ + u32 context; + i32 retval; +}; + +/** \brief L2 bridge domain request operational state details + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param bd_id - the bridge domain id desired or ~0 to request all bds +*/ +define bridge_domain_dump +{ + u32 client_index; + u32 context; + u32 bd_id; +}; + +/** \brief L2 bridge domain operational state response + @param bd_id - the bridge domain id + @param flood - bcast/mcast flooding state on all interfaces in the bd + @param uu_flood - uknown unicast flooding state on all interfaces in the bd + @param forward - forwarding state on all interfaces in the bd + @param learn - learning state on all interfaces in the bd + @param arp_term - arp termination state on all interfaces in the bd + @param mac_age - mac aging time in min, 0 for disabled + @param n_sw_ifs - number of sw_if_index's in the domain +*/ +define bridge_domain_details +{ + u32 context; + u32 bd_id; + u8 flood; + u8 uu_flood; + u8 forward; + u8 learn; + u8 arp_term; + u8 mac_age; + u32 bvi_sw_if_index; + u32 n_sw_ifs; +}; + +/** \brief L2 bridge domain sw interface operational state response + @param bd_id - the bridge domain id + @param sw_if_index - sw_if_index in the domain + @param shg - split horizon group for the interface +*/ +define bridge_domain_sw_if_details +{ + u32 context; + u32 bd_id; + u32 sw_if_index; + u8 shg; +}; + +/** \brief Set bridge flags (such as L2_LEARN, L2_FWD, L2_FLOOD, + L2_UU_FLOOD, or L2_ARP_TERM) request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param bd_id - the bridge domain to set the flags for + @param is_set - if non-zero, set the flags, else clear them + @param feature_bitmap - bits that are non-zero to set or clear +*/ +define bridge_flags +{ + u32 client_index; + u32 context; + u32 bd_id; + u8 is_set; + u32 feature_bitmap; +}; + +/** \brief Set bridge flags response + @param context - sender context, to match reply w/ request + @param retval - return code for the set bridge flags request + @param resulting_feature_bitmap - the feature bitmap value after the request is implemented +*/ +define bridge_flags_reply +{ + u32 context; + i32 retval; + u32 resulting_feature_bitmap; +}; + +/* + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/l2/l2_api.c b/src/vnet/l2/l2_api.c index ca4f593f..ef33509c 100644 --- a/src/vnet/l2/l2_api.c +++ b/src/vnet/l2/l2_api.c @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -42,8 +43,18 @@ #include -#define foreach_vpe_api_msg \ -_(L2_XCONNECT_DUMP, l2_xconnect_dump) +#define foreach_vpe_api_msg \ +_(L2_XCONNECT_DUMP, l2_xconnect_dump) \ +_(L2_FIB_CLEAR_TABLE, l2_fib_clear_table) \ +_(L2_FIB_TABLE_DUMP, l2_fib_table_dump) \ +_(L2_FIB_TABLE_ENTRY, l2_fib_table_entry) \ +_(L2FIB_ADD_DEL, l2fib_add_del) \ +_(L2_FLAGS, l2_flags) \ +_(BRIDGE_DOMAIN_ADD_DEL, bridge_domain_add_del) \ +_(BRIDGE_DOMAIN_DUMP, bridge_domain_dump) \ +_(BRIDGE_DOMAIN_DETAILS, bridge_domain_details) \ +_(BRIDGE_DOMAIN_SW_IF_DETAILS, bridge_domain_sw_if_details) \ +_(BRIDGE_FLAGS, bridge_flags) static void send_l2_xconnect_details (unix_shared_memory_queue_t * q, u32 context, @@ -86,9 +97,365 @@ vl_api_l2_xconnect_dump_t_handler (vl_api_l2_xconnect_dump_t * mp) /* *INDENT-ON* */ } +static void +vl_api_l2_fib_clear_table_t_handler (vl_api_l2_fib_clear_table_t * mp) +{ + int rv = 0; + vl_api_l2_fib_clear_table_reply_t *rmp; + + /* DAW-FIXME: This API should only clear non-static l2fib entries, but + * that is not currently implemented. When that TODO is fixed + * this call should be changed to pass 1 instead of 0. + */ + l2fib_clear_table (0); + + REPLY_MACRO (VL_API_L2_FIB_CLEAR_TABLE_REPLY); +} + +static void +send_l2fib_table_entry (vpe_api_main_t * am, + unix_shared_memory_queue_t * q, + l2fib_entry_key_t * l2fe_key, + l2fib_entry_result_t * l2fe_res, u32 context) +{ + vl_api_l2_fib_table_entry_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_L2_FIB_TABLE_ENTRY); + + mp->bd_id = + ntohl (l2input_main.bd_configs[l2fe_key->fields.bd_index].bd_id); + + mp->mac = l2fib_make_key (l2fe_key->fields.mac, 0); + mp->sw_if_index = ntohl (l2fe_res->fields.sw_if_index); + mp->static_mac = l2fe_res->fields.static_mac; + mp->filter_mac = l2fe_res->fields.filter; + mp->bvi_mac = l2fe_res->fields.bvi; + mp->context = context; + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +vl_api_l2_fib_table_entry_t_handler (vl_api_l2_fib_table_entry_t * mp) +{ + clib_warning ("BUG"); +} + +static void +vl_api_l2_fib_table_dump_t_handler (vl_api_l2_fib_table_dump_t * mp) +{ + vpe_api_main_t *am = &vpe_api_main; + bd_main_t *bdm = &bd_main; + l2fib_entry_key_t *l2fe_key = NULL; + l2fib_entry_result_t *l2fe_res = NULL; + u32 ni, bd_id = ntohl (mp->bd_id); + u32 bd_index; + unix_shared_memory_queue_t *q; + uword *p; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + /* see l2fib_table_dump: ~0 means "any" */ + if (bd_id == ~0) + bd_index = ~0; + else + { + p = hash_get (bdm->bd_index_by_bd_id, bd_id); + if (p == 0) + return; + + bd_index = p[0]; + } + + l2fib_table_dump (bd_index, &l2fe_key, &l2fe_res); + + vec_foreach_index (ni, l2fe_key) + { + send_l2fib_table_entry (am, q, vec_elt_at_index (l2fe_key, ni), + vec_elt_at_index (l2fe_res, ni), mp->context); + } + vec_free (l2fe_key); + vec_free (l2fe_res); +} + +static void +vl_api_l2fib_add_del_t_handler (vl_api_l2fib_add_del_t * mp) +{ + bd_main_t *bdm = &bd_main; + l2input_main_t *l2im = &l2input_main; + vl_api_l2fib_add_del_reply_t *rmp; + int rv = 0; + u64 mac = 0; + u32 sw_if_index = ntohl (mp->sw_if_index); + u32 bd_id = ntohl (mp->bd_id); + u32 bd_index; + u32 static_mac; + u32 filter_mac; + u32 bvi_mac; + uword *p; + + mac = mp->mac; + + p = hash_get (bdm->bd_index_by_bd_id, bd_id); + if (!p) + { + rv = VNET_API_ERROR_NO_SUCH_ENTRY; + goto bad_sw_if_index; + } + bd_index = p[0]; + + if (mp->is_add) + { + filter_mac = mp->filter_mac ? 1 : 0; + if (filter_mac == 0) + { + VALIDATE_SW_IF_INDEX (mp); + if (vec_len (l2im->configs) <= sw_if_index) + { + rv = VNET_API_ERROR_INVALID_SW_IF_INDEX; + goto bad_sw_if_index; + } + else + { + l2_input_config_t *config; + config = vec_elt_at_index (l2im->configs, sw_if_index); + if (config->bridge == 0) + { + rv = VNET_API_ERROR_INVALID_SW_IF_INDEX; + goto bad_sw_if_index; + } + } + } + static_mac = mp->static_mac ? 1 : 0; + bvi_mac = mp->bvi_mac ? 1 : 0; + l2fib_add_entry (mac, bd_index, sw_if_index, static_mac, filter_mac, + bvi_mac); + } + else + { + l2fib_del_entry (mac, bd_index); + } + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_L2FIB_ADD_DEL_REPLY); +} + +static void +vl_api_l2_flags_t_handler (vl_api_l2_flags_t * mp) +{ + vl_api_l2_flags_reply_t *rmp; + int rv = 0; + u32 sw_if_index = ntohl (mp->sw_if_index); + u32 flags = ntohl (mp->feature_bitmap); + u32 rbm = 0; + + VALIDATE_SW_IF_INDEX (mp); + +#define _(a,b) \ + if (flags & L2INPUT_FEAT_ ## a) \ + rbm = l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_ ## a, mp->is_set); + foreach_l2input_feat; +#undef _ + + BAD_SW_IF_INDEX_LABEL; + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_L2_FLAGS_REPLY, + ({ + rmp->resulting_feature_bitmap = ntohl(rbm); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_bridge_domain_add_del_t_handler (vl_api_bridge_domain_add_del_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + bd_main_t *bdm = &bd_main; + vl_api_bridge_domain_add_del_reply_t *rmp; + int rv = 0; + u32 enable_flags = 0, disable_flags = 0; + u32 bd_id = ntohl (mp->bd_id); + u32 bd_index; + + if (mp->is_add) + { + bd_index = bd_find_or_add_bd_index (bdm, bd_id); + + if (mp->flood) + enable_flags |= L2_FLOOD; + else + disable_flags |= L2_FLOOD; + + if (mp->uu_flood) + enable_flags |= L2_UU_FLOOD; + else + disable_flags |= L2_UU_FLOOD; + + if (mp->forward) + enable_flags |= L2_FWD; + else + disable_flags |= L2_FWD; + + if (mp->arp_term) + enable_flags |= L2_ARP_TERM; + else + disable_flags |= L2_ARP_TERM; + + if (mp->learn) + enable_flags |= L2_LEARN; + else + disable_flags |= L2_LEARN; + + if (enable_flags) + bd_set_flags (vm, bd_index, enable_flags, 1 /* enable */ ); + + if (disable_flags) + bd_set_flags (vm, bd_index, disable_flags, 0 /* disable */ ); + + bd_set_mac_age (vm, bd_index, mp->mac_age); + } + else + rv = bd_delete_bd_index (bdm, bd_id); + + REPLY_MACRO (VL_API_BRIDGE_DOMAIN_ADD_DEL_REPLY); +} + +static void +vl_api_bridge_domain_details_t_handler (vl_api_bridge_domain_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void + vl_api_bridge_domain_sw_if_details_t_handler + (vl_api_bridge_domain_sw_if_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void +send_bridge_domain_details (unix_shared_memory_queue_t * q, + l2_bridge_domain_t * bd_config, + u32 n_sw_ifs, u32 context) +{ + vl_api_bridge_domain_details_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_BRIDGE_DOMAIN_DETAILS); + mp->bd_id = ntohl (bd_config->bd_id); + mp->flood = bd_feature_flood (bd_config); + mp->uu_flood = bd_feature_uu_flood (bd_config); + mp->forward = bd_feature_forward (bd_config); + mp->learn = bd_feature_learn (bd_config); + mp->arp_term = bd_feature_arp_term (bd_config); + mp->bvi_sw_if_index = ntohl (bd_config->bvi_sw_if_index); + mp->mac_age = bd_config->mac_age; + mp->n_sw_ifs = ntohl (n_sw_ifs); + mp->context = context; + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +send_bd_sw_if_details (l2input_main_t * l2im, + unix_shared_memory_queue_t * q, + l2_flood_member_t * member, u32 bd_id, u32 context) +{ + vl_api_bridge_domain_sw_if_details_t *mp; + l2_input_config_t *input_cfg; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_BRIDGE_DOMAIN_SW_IF_DETAILS); + mp->bd_id = ntohl (bd_id); + mp->sw_if_index = ntohl (member->sw_if_index); + input_cfg = vec_elt_at_index (l2im->configs, member->sw_if_index); + mp->shg = input_cfg->shg; + mp->context = context; + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +vl_api_bridge_domain_dump_t_handler (vl_api_bridge_domain_dump_t * mp) +{ + bd_main_t *bdm = &bd_main; + l2input_main_t *l2im = &l2input_main; + unix_shared_memory_queue_t *q; + l2_bridge_domain_t *bd_config; + u32 bd_id, bd_index; + u32 end; + + q = vl_api_client_index_to_input_queue (mp->client_index); + + if (q == 0) + return; + + bd_id = ntohl (mp->bd_id); + + bd_index = (bd_id == ~0) ? 0 : bd_find_or_add_bd_index (bdm, bd_id); + end = (bd_id == ~0) ? vec_len (l2im->bd_configs) : bd_index + 1; + for (; bd_index < end; bd_index++) + { + bd_config = l2input_bd_config_from_index (l2im, bd_index); + /* skip dummy bd_id 0 */ + if (bd_config && (bd_config->bd_id > 0)) + { + u32 n_sw_ifs; + l2_flood_member_t *m; + + n_sw_ifs = vec_len (bd_config->members); + send_bridge_domain_details (q, bd_config, n_sw_ifs, mp->context); + + vec_foreach (m, bd_config->members) + { + send_bd_sw_if_details (l2im, q, m, bd_config->bd_id, mp->context); + } + } + } +} + +static void +vl_api_bridge_flags_t_handler (vl_api_bridge_flags_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + bd_main_t *bdm = &bd_main; + vl_api_bridge_flags_reply_t *rmp; + int rv = 0; + u32 bd_id = ntohl (mp->bd_id); + u32 bd_index; + u32 flags = ntohl (mp->feature_bitmap); + uword *p; + + p = hash_get (bdm->bd_index_by_bd_id, bd_id); + if (p == 0) + { + rv = VNET_API_ERROR_NO_SUCH_ENTRY; + goto out; + } + + bd_index = p[0]; + + bd_set_flags (vm, bd_index, flags, mp->is_set); + +out: + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_BRIDGE_FLAGS_REPLY, + ({ + rmp->resulting_feature_bitmap = ntohl(flags); + })); + /* *INDENT-ON* */ +} /* - * vpe_api_hookup + * l2_api_hookup * Add vpe's API message handlers to the table. * vlib has alread mapped shared memory and * added the client registration handlers. diff --git a/src/vnet/mpls/mpls.api b/src/vnet/mpls/mpls.api new file mode 100644 index 00000000..2e3bfaf5 --- /dev/null +++ b/src/vnet/mpls/mpls.api @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2015-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** \brief Bind/Unbind an MPLS local label to an IP prefix. i.e. create + a per-prefix label entry. + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param mb_mpls_table_id - The MPLS table-id the MPLS entry will be added in + @param mb_label - The MPLS label value to bind + @param mb_ip_table_id - The IP table-id of the IP prefix to bind to. + @param mb_create_table_if_needed - Create either/both tables if required. + @param mb_is_bind - Bind or unbind + @param mb_is_ip4 - The prefix to bind to is IPv4 + @param mb_address_length - Length of IP prefix + @param mb_address[16] - IP prefix/ +*/ +define mpls_ip_bind_unbind +{ + u32 client_index; + u32 context; + u32 mb_mpls_table_id; + u32 mb_label; + u32 mb_ip_table_id; + u8 mb_create_table_if_needed; + u8 mb_is_bind; + u8 mb_is_ip4; + u8 mb_address_length; + u8 mb_address[16]; +}; + +/** \brief Reply for MPLS IP bind/unbind request + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define mpls_ip_bind_unbind_reply +{ + u32 context; + i32 retval; +}; + +/** \brief MPLS tunnel Add / del route + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param mt_is_add - Is this a route add or delete + @param mt_sw_if_index - The SW interface index of the tunnel to delete + @param mt_next_hop_proto_is_ip4 - The next-hop is IPV4 + @param mt_next_hop_weight - The weight, for UCMP + @param mt_next_hop[16] - the nextop address + @param mt_next_hop_sw_if_index - the next-hop SW interface + @param mt_next_hop_table_id - the next-hop table-id (if appropriate) + @param mt_next_hop_n_out_labels - the number of next-hop output labels + @param mt_next_hop_out_label_stack - the next-hop output label stack, outer most first +*/ +define mpls_tunnel_add_del +{ + u32 client_index; + u32 context; + u32 mt_sw_if_index; + u8 mt_is_add; + u8 mt_l2_only; + u8 mt_next_hop_proto_is_ip4; + u8 mt_next_hop_weight; + u8 mt_next_hop[16]; + u8 mt_next_hop_n_out_labels; + u32 mt_next_hop_sw_if_index; + u32 mt_next_hop_table_id; + u32 mt_next_hop_out_label_stack[mt_next_hop_n_out_labels]; +}; + +/** \brief Reply for MPLS tunnel add / del request + @param context - returned sender context, to match reply w/ request + @param retval - return code + @param sw_if_index - SW interface index of the tunnel created +*/ +define mpls_tunnel_add_del_reply +{ + u32 context; + i32 retval; + u32 sw_if_index; +}; + +/** \brief Dump mpls eth tunnel table + @param client_index - opaque cookie to identify the sender + @param tunnel_index - eth tunnel identifier or -1 in case of all tunnels +*/ +define mpls_tunnel_dump +{ + u32 client_index; + u32 context; + i32 tunnel_index; +}; + +/** \brief mpls eth tunnel operational state response + @param tunnel_index - eth tunnel identifier + @param intfc_address - interface ipv4 addr + @param mask_width - interface ipv4 addr mask + @param hw_if_index - interface id + @param l2_only - + @param tunnel_dst_mac - + @param tx_sw_if_index - + @param encap_index - reference to mpls label table + @param nlabels - number of resolved labels + @param labels - resolved labels +*/ +define mpls_tunnel_details +{ + u32 context; + u32 tunnel_index; + u8 mt_l2_only; + u8 mt_sw_if_index; + u8 mt_next_hop_proto_is_ip4; + u8 mt_next_hop[16]; + u32 mt_next_hop_sw_if_index; + u32 mt_next_hop_table_id; + u32 mt_next_hop_n_labels; + u32 mt_next_hop_out_labels[mt_next_hop_n_labels]; +}; + +/** \brief MPLS Route Add / del route + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param mr_label - The MPLS label value + @param mr_eos - The End of stack bit + @param mr_table_id - The MPLS table-id the route is added in + @param mr_classify_table_index - If this is a classify route, + this is the classify table index + @param mr_create_table_if_needed - If the MPLS or IP tables do not exist, + create them + @param mr_is_add - Is this a route add or delete + @param mr_is_classify - Is this route result a classify + @param mr_is_multipath - Is this route update a multipath - i.e. is this + a path addition to an existing route + @param mr_is_resolve_host - Recurse resolution constraint via a host prefix + @param mr_is_resolve_attached - Recurse resolution constraint via attached prefix + @param mr_next_hop_proto_is_ip4 - The next-hop is IPV4 + @param mr_next_hop_weight - The weight, for UCMP + @param mr_next_hop[16] - the nextop address + @param mr_next_hop_sw_if_index - the next-hop SW interface + @param mr_next_hop_table_id - the next-hop table-id (if appropriate) + @param mr_next_hop_n_out_labels - the number of labels in the label stack + @param mr_next_hop_out_label_stack - the next-hop output label stack, outer most first + @param next_hop_via_label - The next-hop is a resolved via a local label +*/ +define mpls_route_add_del +{ + u32 client_index; + u32 context; + u32 mr_label; + u8 mr_eos; + u32 mr_table_id; + u32 mr_classify_table_index; + u8 mr_create_table_if_needed; + u8 mr_is_add; + u8 mr_is_classify; + u8 mr_is_multipath; + u8 mr_is_resolve_host; + u8 mr_is_resolve_attached; + u8 mr_next_hop_proto_is_ip4; + u8 mr_next_hop_weight; + u8 mr_next_hop[16]; + u8 mr_next_hop_n_out_labels; + u32 mr_next_hop_sw_if_index; + u32 mr_next_hop_table_id; + u32 mr_next_hop_via_label; + u32 mr_next_hop_out_label_stack[mr_next_hop_n_out_labels]; +}; + +/** \brief Reply for MPLS route add / del request + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define mpls_route_add_del_reply +{ + u32 context; + i32 retval; +}; + +/** \brief FIB path + @param sw_if_index - index of the interface + @param weight - The weight, for UCMP + @param is_local - local if non-zero, else remote + @param is_drop - Drop the packet + @param is_unreach - Drop the packet and rate limit send ICMP unreachable + @param is_prohibit - Drop the packet and rate limit send ICMP prohibited + @param afi - the afi of the next hop, IP46_TYPE_IP4=1, IP46_TYPE_IP6=2 + @param next_hop[16] - the next hop address + + WARNING: this type is replicated, pending cleanup completion + +*/ +typeonly manual_print manual_endian define fib_path2 +{ + u32 sw_if_index; + u32 weight; + u8 is_local; + u8 is_drop; + u8 is_unreach; + u8 is_prohibit; + u8 afi; + u8 next_hop[16]; +}; + +/** \brief Dump MPLS fib table + @param client_index - opaque cookie to identify the sender +*/ +define mpls_fib_dump +{ + u32 client_index; + u32 context; +}; + +/** \brief mpls FIB table response + @param table_id - MPLS fib table id + @param s_bit - End-of-stack bit + @param label - MPLS label value + @param count - the number of fib_path in path + @param path - array of of fib_path structures +*/ +manual_endian manual_print define mpls_fib_details +{ + u32 context; + u32 table_id; + u8 eos_bit; + u32 label; + u32 count; + vl_api_fib_path2_t path[count]; +}; + +/* + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ + \ No newline at end of file diff --git a/src/vnet/mpls/mpls_api.c b/src/vnet/mpls/mpls_api.c new file mode 100644 index 00000000..ebbeba69 --- /dev/null +++ b/src/vnet/mpls/mpls_api.c @@ -0,0 +1,497 @@ +/* + *------------------------------------------------------------------ + * mpls_api.c - mpls api + * + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs + +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) +#define vl_printfun +#include +#undef vl_printfun + +#include + +#define foreach_vpe_api_msg \ +_(MPLS_IP_BIND_UNBIND, mpls_ip_bind_unbind) \ +_(MPLS_ROUTE_ADD_DEL, mpls_route_add_del) \ +_(MPLS_TUNNEL_ADD_DEL, mpls_tunnel_add_del) \ +_(MPLS_TUNNEL_DUMP, mpls_tunnel_dump) \ +_(MPLS_TUNNEL_DETAILS, mpls_tunnel_details) \ +_(MPLS_FIB_DUMP, mpls_fib_dump) \ +_(MPLS_FIB_DETAILS, mpls_fib_details) + +extern void stats_dslock_with_hint (int hint, int tag); +extern void stats_dsunlock (void); + +static int +mpls_ip_bind_unbind_handler (vnet_main_t * vnm, + vl_api_mpls_ip_bind_unbind_t * mp) +{ + u32 mpls_fib_index, ip_fib_index; + + mpls_fib_index = + fib_table_find (FIB_PROTOCOL_MPLS, ntohl (mp->mb_mpls_table_id)); + + if (~0 == mpls_fib_index) + { + if (mp->mb_create_table_if_needed) + { + mpls_fib_index = + fib_table_find_or_create_and_lock (FIB_PROTOCOL_MPLS, + ntohl (mp->mb_mpls_table_id)); + } + else + return VNET_API_ERROR_NO_SUCH_FIB; + } + + ip_fib_index = fib_table_find ((mp->mb_is_ip4 ? + FIB_PROTOCOL_IP4 : + FIB_PROTOCOL_IP6), + ntohl (mp->mb_ip_table_id)); + if (~0 == ip_fib_index) + return VNET_API_ERROR_NO_SUCH_FIB; + + fib_prefix_t pfx = { + .fp_len = mp->mb_address_length, + }; + + if (mp->mb_is_ip4) + { + pfx.fp_proto = FIB_PROTOCOL_IP4; + clib_memcpy (&pfx.fp_addr.ip4, mp->mb_address, + sizeof (pfx.fp_addr.ip4)); + } + else + { + pfx.fp_proto = FIB_PROTOCOL_IP6; + clib_memcpy (&pfx.fp_addr.ip6, mp->mb_address, + sizeof (pfx.fp_addr.ip6)); + } + + if (mp->mb_is_bind) + fib_table_entry_local_label_add (ip_fib_index, &pfx, + ntohl (mp->mb_label)); + else + fib_table_entry_local_label_remove (ip_fib_index, &pfx, + ntohl (mp->mb_label)); + + return (0); +} + +void +vl_api_mpls_ip_bind_unbind_t_handler (vl_api_mpls_ip_bind_unbind_t * mp) +{ + vl_api_mpls_ip_bind_unbind_reply_t *rmp; + vnet_main_t *vnm; + int rv; + + vnm = vnet_get_main (); + vnm->api_errno = 0; + + rv = mpls_ip_bind_unbind_handler (vnm, mp); + rv = (rv == 0) ? vnm->api_errno : rv; + + REPLY_MACRO (VL_API_MPLS_IP_BIND_UNBIND_REPLY); +} + +static int +mpls_route_add_del_t_handler (vnet_main_t * vnm, + vl_api_mpls_route_add_del_t * mp) +{ + u32 fib_index, next_hop_fib_index; + mpls_label_t *label_stack = NULL; + int rv, ii, n_labels;; + + fib_prefix_t pfx = { + .fp_len = 21, + .fp_proto = FIB_PROTOCOL_MPLS, + .fp_eos = mp->mr_eos, + .fp_label = ntohl (mp->mr_label), + }; + if (pfx.fp_eos) + { + if (mp->mr_next_hop_proto_is_ip4) + { + pfx.fp_payload_proto = DPO_PROTO_IP4; + } + else + { + pfx.fp_payload_proto = DPO_PROTO_IP6; + } + } + else + { + pfx.fp_payload_proto = DPO_PROTO_MPLS; + } + + rv = add_del_route_check (FIB_PROTOCOL_MPLS, + mp->mr_table_id, + mp->mr_next_hop_sw_if_index, + dpo_proto_to_fib (pfx.fp_payload_proto), + mp->mr_next_hop_table_id, + mp->mr_create_table_if_needed, + &fib_index, &next_hop_fib_index); + + if (0 != rv) + return (rv); + + ip46_address_t nh; + memset (&nh, 0, sizeof (nh)); + + if (mp->mr_next_hop_proto_is_ip4) + memcpy (&nh.ip4, mp->mr_next_hop, sizeof (nh.ip4)); + else + memcpy (&nh.ip6, mp->mr_next_hop, sizeof (nh.ip6)); + + n_labels = mp->mr_next_hop_n_out_labels; + if (n_labels == 0) + ; + else if (1 == n_labels) + vec_add1 (label_stack, ntohl (mp->mr_next_hop_out_label_stack[0])); + else + { + vec_validate (label_stack, n_labels - 1); + for (ii = 0; ii < n_labels; ii++) + label_stack[ii] = ntohl (mp->mr_next_hop_out_label_stack[ii]); + } + + return (add_del_route_t_handler (mp->mr_is_multipath, mp->mr_is_add, 0, // mp->is_drop, + 0, // mp->is_unreach, + 0, // mp->is_prohibit, + 0, // mp->is_local, + mp->mr_is_classify, + mp->mr_classify_table_index, + mp->mr_is_resolve_host, + mp->mr_is_resolve_attached, + fib_index, &pfx, + mp->mr_next_hop_proto_is_ip4, + &nh, ntohl (mp->mr_next_hop_sw_if_index), + next_hop_fib_index, + mp->mr_next_hop_weight, + ntohl (mp->mr_next_hop_via_label), + label_stack)); +} + +void +vl_api_mpls_route_add_del_t_handler (vl_api_mpls_route_add_del_t * mp) +{ + vl_api_mpls_route_add_del_reply_t *rmp; + vnet_main_t *vnm; + int rv; + + vnm = vnet_get_main (); + vnm->api_errno = 0; + + rv = mpls_route_add_del_t_handler (vnm, mp); + + rv = (rv == 0) ? vnm->api_errno : rv; + + REPLY_MACRO (VL_API_MPLS_ROUTE_ADD_DEL_REPLY); +} + +static void +vl_api_mpls_tunnel_add_del_t_handler (vl_api_mpls_tunnel_add_del_t * mp) +{ + vl_api_mpls_tunnel_add_del_reply_t *rmp; + int rv = 0; + u32 tunnel_sw_if_index; + int ii; + + stats_dslock_with_hint (1 /* release hint */ , 5 /* tag */ ); + + if (mp->mt_is_add) + { + fib_route_path_t rpath, *rpaths = NULL; + mpls_label_t *label_stack = NULL; + + memset (&rpath, 0, sizeof (rpath)); + + if (mp->mt_next_hop_proto_is_ip4) + { + rpath.frp_proto = FIB_PROTOCOL_IP4; + clib_memcpy (&rpath.frp_addr.ip4, + mp->mt_next_hop, sizeof (rpath.frp_addr.ip4)); + } + else + { + rpath.frp_proto = FIB_PROTOCOL_IP6; + clib_memcpy (&rpath.frp_addr.ip6, + mp->mt_next_hop, sizeof (rpath.frp_addr.ip6)); + } + rpath.frp_sw_if_index = ntohl (mp->mt_next_hop_sw_if_index); + + for (ii = 0; ii < mp->mt_next_hop_n_out_labels; ii++) + vec_add1 (label_stack, ntohl (mp->mt_next_hop_out_label_stack[ii])); + + vec_add1 (rpaths, rpath); + + vnet_mpls_tunnel_add (rpaths, label_stack, + mp->mt_l2_only, &tunnel_sw_if_index); + vec_free (rpaths); + vec_free (label_stack); + } + else + { + tunnel_sw_if_index = ntohl (mp->mt_sw_if_index); + vnet_mpls_tunnel_del (tunnel_sw_if_index); + } + + stats_dsunlock (); + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_MPLS_TUNNEL_ADD_DEL_REPLY, + ({ + rmp->sw_if_index = ntohl(tunnel_sw_if_index); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_mpls_tunnel_details_t_handler (vl_api_mpls_tunnel_details_t * mp) +{ + clib_warning ("BUG"); +} + +typedef struct mpls_tunnel_send_walk_ctx_t_ +{ + unix_shared_memory_queue_t *q; + u32 index; + u32 context; +} mpls_tunnel_send_walk_ctx_t; + +static void +send_mpls_tunnel_entry (u32 mti, void *arg) +{ + mpls_tunnel_send_walk_ctx_t *ctx; + vl_api_mpls_tunnel_details_t *mp; + const mpls_tunnel_t *mt; + u32 nlabels; + + ctx = arg; + + if (~0 != ctx->index && mti != ctx->index) + return; + + mt = mpls_tunnel_get (mti); + nlabels = vec_len (mt->mt_label_stack); + + mp = vl_msg_api_alloc (sizeof (*mp) + nlabels * sizeof (u32)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_MPLS_TUNNEL_DETAILS); + mp->context = ctx->context; + + mp->tunnel_index = ntohl (mti); + memcpy (mp->mt_next_hop_out_labels, + mt->mt_label_stack, nlabels * sizeof (u32)); + + // FIXME + + vl_msg_api_send_shmem (ctx->q, (u8 *) & mp); +} + +static void +vl_api_mpls_tunnel_dump_t_handler (vl_api_mpls_tunnel_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + mpls_tunnel_send_walk_ctx_t ctx = { + .q = q, + .index = ntohl (mp->tunnel_index), + .context = mp->context, + }; + mpls_tunnel_walk (send_mpls_tunnel_entry, &ctx); +} + +static void +vl_api_mpls_fib_details_t_handler (vl_api_mpls_fib_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void +vl_api_mpls_fib_details_t_endian (vl_api_mpls_fib_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void +vl_api_mpls_fib_details_t_print (vl_api_mpls_fib_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void +send_mpls_fib_details (vpe_api_main_t * am, + unix_shared_memory_queue_t * q, + u32 table_id, u32 label, u32 eos, + fib_route_path_encode_t * api_rpaths, u32 context) +{ + vl_api_mpls_fib_details_t *mp; + fib_route_path_encode_t *api_rpath; + vl_api_fib_path2_t *fp; + int path_count; + + path_count = vec_len (api_rpaths); + mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp)); + if (!mp) + return; + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_MPLS_FIB_DETAILS); + mp->context = context; + + mp->table_id = htonl (table_id); + mp->eos_bit = eos; + mp->label = htonl (label); + + mp->count = htonl (path_count); + fp = mp->path; + vec_foreach (api_rpath, api_rpaths) + { + memset (fp, 0, sizeof (*fp)); + fp->weight = htonl (api_rpath->rpath.frp_weight); + fp->sw_if_index = htonl (api_rpath->rpath.frp_sw_if_index); + copy_fib_next_hop (api_rpath, fp); + fp++; + } + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +vl_api_mpls_fib_dump_t_handler (vl_api_mpls_fib_dump_t * mp) +{ + vpe_api_main_t *am = &vpe_api_main; + unix_shared_memory_queue_t *q; + mpls_main_t *mm = &mpls_main; + fib_table_t *fib_table; + fib_node_index_t lfei, *lfeip, *lfeis = NULL; + mpls_label_t key; + fib_prefix_t pfx; + u32 fib_index; + fib_route_path_encode_t *api_rpaths; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + /* *INDENT-OFF* */ + pool_foreach (fib_table, mm->fibs, + ({ + hash_foreach(key, lfei, fib_table->mpls.mf_entries, + ({ + vec_add1(lfeis, lfei); + })); + })); + /* *INDENT-ON* */ + vec_sort_with_function (lfeis, fib_entry_cmp_for_sort); + + vec_foreach (lfeip, lfeis) + { + fib_entry_get_prefix (*lfeip, &pfx); + fib_index = fib_entry_get_fib_index (*lfeip); + fib_table = fib_table_get (fib_index, pfx.fp_proto); + api_rpaths = NULL; + fib_entry_encode (*lfeip, &api_rpaths); + send_mpls_fib_details (am, q, + fib_table->ft_table_id, + pfx.fp_label, pfx.fp_eos, api_rpaths, mp->context); + vec_free (api_rpaths); + } + + vec_free (lfeis); +} + +/* + * mpls_api_hookup + * Add vpe's API message handlers to the table. + * vlib has alread mapped shared memory and + * added the client registration handlers. + * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process() + */ +#define vl_msg_name_crc_list +#include +#undef vl_msg_name_crc_list + +static void +setup_message_id_table (api_main_t * am) +{ +#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id); + foreach_vl_msg_name_crc_mpls; +#undef _ +} + +static clib_error_t * +mpls_api_hookup (vlib_main_t * vm) +{ + api_main_t *am = &api_main; + +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler, \ + vl_noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 1); + foreach_vpe_api_msg; +#undef _ + + /* + * Trace space for 8 MPLS encap labels + */ + am->api_trace_cfg[VL_API_MPLS_TUNNEL_ADD_DEL].size += 8 * sizeof (u32); + + /* + * Set up the (msg_name, crc, message-id) table + */ + setup_message_id_table (am); + + return 0; +} + +VLIB_API_INIT_FUNCTION (mpls_api_hookup); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/sr/sr.api b/src/vnet/sr/sr.api new file mode 100644 index 00000000..3d017ce5 --- /dev/null +++ b/src/vnet/sr/sr.api @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2015-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** \brief IPv6 segment routing tunnel add / del request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add the tunnel if non-zero, else delete it + @param name[] - tunnel name (len. 64) + @param src_address[] - + @param dst_address[] - + @param dst_mask_width - + @param inner_vrf_id - + @param outer_vrf_id - + @param flags_net_byte_order - + @param n_segments - + @param n_tags - + @param segs_and_tags[] - + @param policy_name[] - name of policy to associate this tunnel to (len. 64) +*/ +define sr_tunnel_add_del +{ + u32 client_index; + u32 context; + u8 is_add; + u8 name[64]; + u8 src_address[16]; + u8 dst_address[16]; + u8 dst_mask_width; + u32 inner_vrf_id; + u32 outer_vrf_id; + u16 flags_net_byte_order; + u8 n_segments; + u8 n_tags; + u8 policy_name[64]; + u8 segs_and_tags[0]; +}; + +/** \brief IPv6 segment routing tunnel add / del response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define sr_tunnel_add_del_reply +{ + u32 context; + i32 retval; +}; + +/** \brief IPv6 segment routing policy add / del request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add the tunnel if non-zero, else delete it + @param name[] - policy name (len. 64) + @param tunnel_names[] - +*/ +define sr_policy_add_del +{ + u32 client_index; + u32 context; + u8 is_add; + u8 name[64]; + u8 tunnel_names[0]; +}; + +/** \brief IPv6 segment routing policy add / del response + @param context - sender context, to match reply w/ request + @param retval - return value for request + + +*/ +define sr_policy_add_del_reply +{ + u32 context; + i32 retval; +}; + +/** \brief IPv6 segment routing multicast map to policy add / del request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add the tunnel if non-zero, else delete it + @param multicast_address[] - IP6 multicast address + @param policy_name[] = policy name (len.64) +*/ +define sr_multicast_map_add_del +{ + u32 client_index; + u32 context; + u8 is_add; + u8 multicast_address[16]; + u8 policy_name[64]; +}; + +/** \brief IPv6 segment routing multicast map to policy add / del response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define sr_multicast_map_add_del_reply +{ + u32 context; + i32 retval; +}; + +/* + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ + \ No newline at end of file diff --git a/src/vnet/sr/sr_api.c b/src/vnet/sr/sr_api.c new file mode 100644 index 00000000..6c6eb9b6 --- /dev/null +++ b/src/vnet/sr/sr_api.c @@ -0,0 +1,279 @@ +/* + *------------------------------------------------------------------ + * sr_api.c - ipv6 segment routing api + * + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include + +#include +#include +#include + +#include + +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs + +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) +#define vl_printfun +#include +#undef vl_printfun + +#include + +#define foreach_vpe_api_msg \ +_(SR_MULTICAST_MAP_ADD_DEL, sr_multicast_map_add_del) + +static void vl_api_sr_tunnel_add_del_t_handler + (vl_api_sr_tunnel_add_del_t * mp) +{ +#if IP6SR == 0 + clib_warning ("unimplemented"); +#else + ip6_sr_add_del_tunnel_args_t _a, *a = &_a; + int rv = 0; + vl_api_sr_tunnel_add_del_reply_t *rmp; + ip6_address_t *segments = 0, *seg; + ip6_address_t *tags = 0, *tag; + ip6_address_t *this_address; + int i; + + if (mp->n_segments == 0) + { + rv = -11; + goto out; + } + + memset (a, 0, sizeof (*a)); + a->src_address = (ip6_address_t *) & mp->src_address; + a->dst_address = (ip6_address_t *) & mp->dst_address; + a->dst_mask_width = mp->dst_mask_width; + a->flags_net_byte_order = mp->flags_net_byte_order; + a->is_del = (mp->is_add == 0); + a->rx_table_id = ntohl (mp->outer_vrf_id); + a->tx_table_id = ntohl (mp->inner_vrf_id); + + a->name = format (0, "%s", mp->name); + if (!(vec_len (a->name))) + a->name = 0; + + a->policy_name = format (0, "%s", mp->policy_name); + if (!(vec_len (a->policy_name))) + a->policy_name = 0; + + /* Yank segments and tags out of the API message */ + this_address = (ip6_address_t *) mp->segs_and_tags; + for (i = 0; i < mp->n_segments; i++) + { + vec_add2 (segments, seg, 1); + clib_memcpy (seg->as_u8, this_address->as_u8, sizeof (*this_address)); + this_address++; + } + for (i = 0; i < mp->n_tags; i++) + { + vec_add2 (tags, tag, 1); + clib_memcpy (tag->as_u8, this_address->as_u8, sizeof (*this_address)); + this_address++; + } + + a->segments = segments; + a->tags = tags; + + rv = ip6_sr_add_del_tunnel (a); + +out: + + REPLY_MACRO (VL_API_SR_TUNNEL_ADD_DEL_REPLY); +#endif +} + +static void vl_api_sr_policy_add_del_t_handler + (vl_api_sr_policy_add_del_t * mp) +{ +#if IP6SR == 0 + clib_warning ("unimplemented"); +#else + ip6_sr_add_del_policy_args_t _a, *a = &_a; + int rv = 0; + vl_api_sr_policy_add_del_reply_t *rmp; + int i; + + memset (a, 0, sizeof (*a)); + a->is_del = (mp->is_add == 0); + + a->name = format (0, "%s", mp->name); + if (!(vec_len (a->name))) + { + rv = VNET_API_ERROR_NO_SUCH_NODE2; + goto out; + } + + if (!(mp->tunnel_names[0])) + { + rv = VNET_API_ERROR_NO_SUCH_NODE2; + goto out; + } + + // start deserializing tunnel_names + int num_tunnels = mp->tunnel_names[0]; //number of tunnels + u8 *deser_tun_names = mp->tunnel_names; + deser_tun_names += 1; //moving along + + u8 *tun_name = 0; + int tun_name_len = 0; + + for (i = 0; i < num_tunnels; i++) + { + tun_name_len = *deser_tun_names; + deser_tun_names += 1; + vec_resize (tun_name, tun_name_len); + memcpy (tun_name, deser_tun_names, tun_name_len); + vec_add1 (a->tunnel_names, tun_name); + deser_tun_names += tun_name_len; + tun_name = 0; + } + + rv = ip6_sr_add_del_policy (a); + +out: + + REPLY_MACRO (VL_API_SR_POLICY_ADD_DEL_REPLY); +#endif +} + +static void vl_api_sr_multicast_map_add_del_t_handler + (vl_api_sr_multicast_map_add_del_t * mp) +{ +#if IP6SR == 0 + clib_warning ("unimplemented"); +#else + ip6_sr_add_del_multicastmap_args_t _a, *a = &_a; + int rv = 0; + vl_api_sr_multicast_map_add_del_reply_t *rmp; + + memset (a, 0, sizeof (*a)); + a->is_del = (mp->is_add == 0); + + a->multicast_address = (ip6_address_t *) & mp->multicast_address; + a->policy_name = format (0, "%s", mp->policy_name); + + if (a->multicast_address == 0) + { + rv = -1; + goto out; + } + + if (!(a->policy_name)) + { + rv = -2; + goto out; + } + +#if DPDK > 0 /* Cannot call replicate without DPDK */ + rv = ip6_sr_add_del_multicastmap (a); +#else + clib_warning ("multicast replication without DPDK not implemented"); + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif /* DPDK */ + +out: + + REPLY_MACRO (VL_API_SR_MULTICAST_MAP_ADD_DEL_REPLY); +#endif +} + +/* + * sr_api_hookup + * Add vpe's API message handlers to the table. + * vlib has alread mapped shared memory and + * added the client registration handlers. + * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process() + */ +#define vl_msg_name_crc_list +#include +#undef vl_msg_name_crc_list + +static void +setup_message_id_table (api_main_t * am) +{ +#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id); + foreach_vl_msg_name_crc_sr; +#undef _ +} + +static clib_error_t * +sr_api_hookup (vlib_main_t * vm) +{ + api_main_t *am = &api_main; + +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler, \ + vl_noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 1); + foreach_vpe_api_msg; +#undef _ + + /* + * Manually register the sr tunnel add del msg, so we trace + * enough bytes to capture a typical segment list + */ + vl_msg_api_set_handlers (VL_API_SR_TUNNEL_ADD_DEL, + "sr_tunnel_add_del", + vl_api_sr_tunnel_add_del_t_handler, + vl_noop_handler, + vl_api_sr_tunnel_add_del_t_endian, + vl_api_sr_tunnel_add_del_t_print, 256, 1); + + + /* + * Manually register the sr policy add del msg, so we trace + * enough bytes to capture a typical tunnel name list + */ + vl_msg_api_set_handlers (VL_API_SR_POLICY_ADD_DEL, + "sr_policy_add_del", + vl_api_sr_policy_add_del_t_handler, + vl_noop_handler, + vl_api_sr_policy_add_del_t_endian, + vl_api_sr_policy_add_del_t_print, 256, 1); + + /* + * Set up the (msg_name, crc, message-id) table + */ + setup_message_id_table (am); + + return 0; +} + +VLIB_API_INIT_FUNCTION (sr_api_hookup); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/vnet_all_api_h.h b/src/vnet/vnet_all_api_h.h index d48e1540..1024f92c 100644 --- a/src/vnet/vnet_all_api_h.h +++ b/src/vnet/vnet_all_api_h.h @@ -50,6 +50,8 @@ #include #include #include +#include +#include /* * fd.io coding-style-patch-verification: ON diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c index 46e28e9d..3d6905dd 100644 --- a/src/vpp/api/api.c +++ b/src/vpp/api/api.c @@ -52,8 +52,6 @@ #include #include #include -#include -#include #include #include #if IPV6SR > 0 @@ -119,24 +117,14 @@ #define foreach_vpe_api_msg \ _(WANT_OAM_EVENTS, want_oam_events) \ _(OAM_ADD_DEL, oam_add_del) \ -_(MPLS_ROUTE_ADD_DEL, mpls_route_add_del) \ -_(MPLS_IP_BIND_UNBIND, mpls_ip_bind_unbind) \ _(IS_ADDRESS_REACHABLE, is_address_reachable) \ _(SW_INTERFACE_SET_MPLS_ENABLE, sw_interface_set_mpls_enable) \ _(SW_INTERFACE_SET_VPATH, sw_interface_set_vpath) \ _(SW_INTERFACE_SET_VXLAN_BYPASS, sw_interface_set_vxlan_bypass) \ _(SW_INTERFACE_SET_L2_XCONNECT, sw_interface_set_l2_xconnect) \ _(SW_INTERFACE_SET_L2_BRIDGE, sw_interface_set_l2_bridge) \ -_(BRIDGE_DOMAIN_ADD_DEL, bridge_domain_add_del) \ -_(BRIDGE_DOMAIN_DUMP, bridge_domain_dump) \ -_(BRIDGE_DOMAIN_DETAILS, bridge_domain_details) \ -_(BRIDGE_DOMAIN_SW_IF_DETAILS, bridge_domain_sw_if_details) \ -_(L2FIB_ADD_DEL, l2fib_add_del) \ -_(L2_FLAGS, l2_flags) \ -_(BRIDGE_FLAGS, bridge_flags) \ _(CREATE_VLAN_SUBIF, create_vlan_subif) \ _(CREATE_SUBIF, create_subif) \ -_(MPLS_TUNNEL_ADD_DEL, mpls_tunnel_add_del) \ _(PROXY_ARP_ADD_DEL, proxy_arp_add_del) \ _(PROXY_ARP_INTFC_ENABLE_DISABLE, proxy_arp_intfc_enable_disable) \ _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \ @@ -159,12 +147,9 @@ _(GET_NODE_INDEX, get_node_index) \ _(ADD_NODE_NEXT, add_node_next) \ _(VXLAN_ADD_DEL_TUNNEL, vxlan_add_del_tunnel) \ _(VXLAN_TUNNEL_DUMP, vxlan_tunnel_dump) \ -_(L2_FIB_CLEAR_TABLE, l2_fib_clear_table) \ _(L2_INTERFACE_EFP_FILTER, l2_interface_efp_filter) \ _(L2_INTERFACE_VLAN_TAG_REWRITE, l2_interface_vlan_tag_rewrite) \ _(SHOW_VERSION, show_version) \ -_(L2_FIB_TABLE_DUMP, l2_fib_table_dump) \ -_(L2_FIB_TABLE_ENTRY, l2_fib_table_entry) \ _(VXLAN_GPE_ADD_DEL_TUNNEL, vxlan_gpe_add_del_tunnel) \ _(VXLAN_GPE_TUNNEL_DUMP, vxlan_gpe_tunnel_dump) \ _(INTERFACE_NAME_RENUMBER, interface_name_renumber) \ @@ -178,15 +163,10 @@ _(COP_WHITELIST_ENABLE_DISABLE, cop_whitelist_enable_disable) \ _(GET_NODE_GRAPH, get_node_graph) \ _(IOAM_ENABLE, ioam_enable) \ _(IOAM_DISABLE, ioam_disable) \ -_(SR_MULTICAST_MAP_ADD_DEL, sr_multicast_map_add_del) \ _(POLICER_ADD_DEL, policer_add_del) \ _(POLICER_DUMP, policer_dump) \ _(POLICER_CLASSIFY_SET_INTERFACE, policer_classify_set_interface) \ _(POLICER_CLASSIFY_DUMP, policer_classify_dump) \ -_(MPLS_TUNNEL_DUMP, mpls_tunnel_dump) \ -_(MPLS_TUNNEL_DETAILS, mpls_tunnel_details) \ -_(MPLS_FIB_DUMP, mpls_fib_dump) \ -_(MPLS_FIB_DETAILS, mpls_fib_details) \ _(CLASSIFY_TABLE_IDS,classify_table_ids) \ _(CLASSIFY_TABLE_BY_INTERFACE, classify_table_by_interface) \ _(CLASSIFY_TABLE_INFO,classify_table_info) \ @@ -411,173 +391,6 @@ VLIB_REGISTER_NODE (vpe_resolver_process_node,static) = { }; /* *INDENT-ON* */ -static int -mpls_route_add_del_t_handler (vnet_main_t * vnm, - vl_api_mpls_route_add_del_t * mp) -{ - u32 fib_index, next_hop_fib_index; - mpls_label_t *label_stack = NULL; - int rv, ii, n_labels;; - - fib_prefix_t pfx = { - .fp_len = 21, - .fp_proto = FIB_PROTOCOL_MPLS, - .fp_eos = mp->mr_eos, - .fp_label = ntohl (mp->mr_label), - }; - if (pfx.fp_eos) - { - if (mp->mr_next_hop_proto_is_ip4) - { - pfx.fp_payload_proto = DPO_PROTO_IP4; - } - else - { - pfx.fp_payload_proto = DPO_PROTO_IP6; - } - } - else - { - pfx.fp_payload_proto = DPO_PROTO_MPLS; - } - - rv = add_del_route_check (FIB_PROTOCOL_MPLS, - mp->mr_table_id, - mp->mr_next_hop_sw_if_index, - dpo_proto_to_fib (pfx.fp_payload_proto), - mp->mr_next_hop_table_id, - mp->mr_create_table_if_needed, - &fib_index, &next_hop_fib_index); - - if (0 != rv) - return (rv); - - ip46_address_t nh; - memset (&nh, 0, sizeof (nh)); - - if (mp->mr_next_hop_proto_is_ip4) - memcpy (&nh.ip4, mp->mr_next_hop, sizeof (nh.ip4)); - else - memcpy (&nh.ip6, mp->mr_next_hop, sizeof (nh.ip6)); - - n_labels = mp->mr_next_hop_n_out_labels; - if (n_labels == 0) - ; - else if (1 == n_labels) - vec_add1 (label_stack, ntohl (mp->mr_next_hop_out_label_stack[0])); - else - { - vec_validate (label_stack, n_labels - 1); - for (ii = 0; ii < n_labels; ii++) - label_stack[ii] = ntohl (mp->mr_next_hop_out_label_stack[ii]); - } - - return (add_del_route_t_handler (mp->mr_is_multipath, mp->mr_is_add, 0, // mp->is_drop, - 0, // mp->is_unreach, - 0, // mp->is_prohibit, - 0, // mp->is_local, - mp->mr_is_classify, - mp->mr_classify_table_index, - mp->mr_is_resolve_host, - mp->mr_is_resolve_attached, - fib_index, &pfx, - mp->mr_next_hop_proto_is_ip4, - &nh, ntohl (mp->mr_next_hop_sw_if_index), - next_hop_fib_index, - mp->mr_next_hop_weight, - ntohl (mp->mr_next_hop_via_label), - label_stack)); -} - -void -vl_api_mpls_route_add_del_t_handler (vl_api_mpls_route_add_del_t * mp) -{ - vl_api_mpls_route_add_del_reply_t *rmp; - vnet_main_t *vnm; - int rv; - - vnm = vnet_get_main (); - vnm->api_errno = 0; - - rv = mpls_route_add_del_t_handler (vnm, mp); - - rv = (rv == 0) ? vnm->api_errno : rv; - - REPLY_MACRO (VL_API_MPLS_ROUTE_ADD_DEL_REPLY); -} - -static int -mpls_ip_bind_unbind_handler (vnet_main_t * vnm, - vl_api_mpls_ip_bind_unbind_t * mp) -{ - u32 mpls_fib_index, ip_fib_index; - - mpls_fib_index = - fib_table_find (FIB_PROTOCOL_MPLS, ntohl (mp->mb_mpls_table_id)); - - if (~0 == mpls_fib_index) - { - if (mp->mb_create_table_if_needed) - { - mpls_fib_index = - fib_table_find_or_create_and_lock (FIB_PROTOCOL_MPLS, - ntohl (mp->mb_mpls_table_id)); - } - else - return VNET_API_ERROR_NO_SUCH_FIB; - } - - ip_fib_index = fib_table_find ((mp->mb_is_ip4 ? - FIB_PROTOCOL_IP4 : - FIB_PROTOCOL_IP6), - ntohl (mp->mb_ip_table_id)); - if (~0 == ip_fib_index) - return VNET_API_ERROR_NO_SUCH_FIB; - - fib_prefix_t pfx = { - .fp_len = mp->mb_address_length, - }; - - if (mp->mb_is_ip4) - { - pfx.fp_proto = FIB_PROTOCOL_IP4; - clib_memcpy (&pfx.fp_addr.ip4, mp->mb_address, - sizeof (pfx.fp_addr.ip4)); - } - else - { - pfx.fp_proto = FIB_PROTOCOL_IP6; - clib_memcpy (&pfx.fp_addr.ip6, mp->mb_address, - sizeof (pfx.fp_addr.ip6)); - } - - if (mp->mb_is_bind) - fib_table_entry_local_label_add (ip_fib_index, &pfx, - ntohl (mp->mb_label)); - else - fib_table_entry_local_label_remove (ip_fib_index, &pfx, - ntohl (mp->mb_label)); - - return (0); -} - -void -vl_api_mpls_ip_bind_unbind_t_handler (vl_api_mpls_ip_bind_unbind_t * mp) -{ - vl_api_mpls_route_add_del_reply_t *rmp; - vnet_main_t *vnm; - int rv; - - vnm = vnet_get_main (); - vnm->api_errno = 0; - - rv = mpls_ip_bind_unbind_handler (vnm, mp); - - rv = (rv == 0) ? vnm->api_errno : rv; - - REPLY_MACRO (VL_API_MPLS_ROUTE_ADD_DEL_REPLY); -} - static void vl_api_sw_interface_set_vpath_t_handler (vl_api_sw_interface_set_vpath_t * mp) { @@ -690,278 +503,6 @@ static void REPLY_MACRO (VL_API_SW_INTERFACE_SET_L2_BRIDGE_REPLY); } -static void -vl_api_bridge_domain_add_del_t_handler (vl_api_bridge_domain_add_del_t * mp) -{ - vlib_main_t *vm = vlib_get_main (); - bd_main_t *bdm = &bd_main; - vl_api_bridge_domain_add_del_reply_t *rmp; - int rv = 0; - u32 enable_flags = 0, disable_flags = 0; - u32 bd_id = ntohl (mp->bd_id); - u32 bd_index; - - if (mp->is_add) - { - bd_index = bd_find_or_add_bd_index (bdm, bd_id); - - if (mp->flood) - enable_flags |= L2_FLOOD; - else - disable_flags |= L2_FLOOD; - - if (mp->uu_flood) - enable_flags |= L2_UU_FLOOD; - else - disable_flags |= L2_UU_FLOOD; - - if (mp->forward) - enable_flags |= L2_FWD; - else - disable_flags |= L2_FWD; - - if (mp->arp_term) - enable_flags |= L2_ARP_TERM; - else - disable_flags |= L2_ARP_TERM; - - if (mp->learn) - enable_flags |= L2_LEARN; - else - disable_flags |= L2_LEARN; - - if (enable_flags) - bd_set_flags (vm, bd_index, enable_flags, 1 /* enable */ ); - - if (disable_flags) - bd_set_flags (vm, bd_index, disable_flags, 0 /* disable */ ); - - bd_set_mac_age (vm, bd_index, mp->mac_age); - } - else - rv = bd_delete_bd_index (bdm, bd_id); - - REPLY_MACRO (VL_API_BRIDGE_DOMAIN_ADD_DEL_REPLY); -} - -static void -vl_api_bridge_domain_details_t_handler (vl_api_bridge_domain_details_t * mp) -{ - clib_warning ("BUG"); -} - -static void - vl_api_bridge_domain_sw_if_details_t_handler - (vl_api_bridge_domain_sw_if_details_t * mp) -{ - clib_warning ("BUG"); -} - -static void -send_bridge_domain_details (unix_shared_memory_queue_t * q, - l2_bridge_domain_t * bd_config, - u32 n_sw_ifs, u32 context) -{ - vl_api_bridge_domain_details_t *mp; - - mp = vl_msg_api_alloc (sizeof (*mp)); - memset (mp, 0, sizeof (*mp)); - mp->_vl_msg_id = ntohs (VL_API_BRIDGE_DOMAIN_DETAILS); - mp->bd_id = ntohl (bd_config->bd_id); - mp->flood = bd_feature_flood (bd_config); - mp->uu_flood = bd_feature_uu_flood (bd_config); - mp->forward = bd_feature_forward (bd_config); - mp->learn = bd_feature_learn (bd_config); - mp->arp_term = bd_feature_arp_term (bd_config); - mp->bvi_sw_if_index = ntohl (bd_config->bvi_sw_if_index); - mp->mac_age = bd_config->mac_age; - mp->n_sw_ifs = ntohl (n_sw_ifs); - mp->context = context; - - vl_msg_api_send_shmem (q, (u8 *) & mp); -} - -static void -send_bd_sw_if_details (l2input_main_t * l2im, - unix_shared_memory_queue_t * q, - l2_flood_member_t * member, u32 bd_id, u32 context) -{ - vl_api_bridge_domain_sw_if_details_t *mp; - l2_input_config_t *input_cfg; - - mp = vl_msg_api_alloc (sizeof (*mp)); - memset (mp, 0, sizeof (*mp)); - mp->_vl_msg_id = ntohs (VL_API_BRIDGE_DOMAIN_SW_IF_DETAILS); - mp->bd_id = ntohl (bd_id); - mp->sw_if_index = ntohl (member->sw_if_index); - input_cfg = vec_elt_at_index (l2im->configs, member->sw_if_index); - mp->shg = input_cfg->shg; - mp->context = context; - - vl_msg_api_send_shmem (q, (u8 *) & mp); -} - -static void -vl_api_bridge_domain_dump_t_handler (vl_api_bridge_domain_dump_t * mp) -{ - bd_main_t *bdm = &bd_main; - l2input_main_t *l2im = &l2input_main; - unix_shared_memory_queue_t *q; - l2_bridge_domain_t *bd_config; - u32 bd_id, bd_index; - u32 end; - - q = vl_api_client_index_to_input_queue (mp->client_index); - - if (q == 0) - return; - - bd_id = ntohl (mp->bd_id); - - bd_index = (bd_id == ~0) ? 0 : bd_find_or_add_bd_index (bdm, bd_id); - end = (bd_id == ~0) ? vec_len (l2im->bd_configs) : bd_index + 1; - for (; bd_index < end; bd_index++) - { - bd_config = l2input_bd_config_from_index (l2im, bd_index); - /* skip dummy bd_id 0 */ - if (bd_config && (bd_config->bd_id > 0)) - { - u32 n_sw_ifs; - l2_flood_member_t *m; - - n_sw_ifs = vec_len (bd_config->members); - send_bridge_domain_details (q, bd_config, n_sw_ifs, mp->context); - - vec_foreach (m, bd_config->members) - { - send_bd_sw_if_details (l2im, q, m, bd_config->bd_id, mp->context); - } - } - } -} - -static void -vl_api_l2fib_add_del_t_handler (vl_api_l2fib_add_del_t * mp) -{ - bd_main_t *bdm = &bd_main; - l2input_main_t *l2im = &l2input_main; - vl_api_l2fib_add_del_reply_t *rmp; - int rv = 0; - u64 mac = 0; - u32 sw_if_index = ntohl (mp->sw_if_index); - u32 bd_id = ntohl (mp->bd_id); - u32 bd_index; - u32 static_mac; - u32 filter_mac; - u32 bvi_mac; - uword *p; - - mac = mp->mac; - - p = hash_get (bdm->bd_index_by_bd_id, bd_id); - if (!p) - { - rv = VNET_API_ERROR_NO_SUCH_ENTRY; - goto bad_sw_if_index; - } - bd_index = p[0]; - - if (mp->is_add) - { - filter_mac = mp->filter_mac ? 1 : 0; - if (filter_mac == 0) - { - VALIDATE_SW_IF_INDEX (mp); - if (vec_len (l2im->configs) <= sw_if_index) - { - rv = VNET_API_ERROR_INVALID_SW_IF_INDEX; - goto bad_sw_if_index; - } - else - { - l2_input_config_t *config; - config = vec_elt_at_index (l2im->configs, sw_if_index); - if (config->bridge == 0) - { - rv = VNET_API_ERROR_INVALID_SW_IF_INDEX; - goto bad_sw_if_index; - } - } - } - static_mac = mp->static_mac ? 1 : 0; - bvi_mac = mp->bvi_mac ? 1 : 0; - l2fib_add_entry (mac, bd_index, sw_if_index, static_mac, filter_mac, - bvi_mac); - } - else - { - l2fib_del_entry (mac, bd_index); - } - - BAD_SW_IF_INDEX_LABEL; - - REPLY_MACRO (VL_API_L2FIB_ADD_DEL_REPLY); -} - -static void -vl_api_l2_flags_t_handler (vl_api_l2_flags_t * mp) -{ - vl_api_l2_flags_reply_t *rmp; - int rv = 0; - u32 sw_if_index = ntohl (mp->sw_if_index); - u32 flags = ntohl (mp->feature_bitmap); - u32 rbm = 0; - - VALIDATE_SW_IF_INDEX (mp); - -#define _(a,b) \ - if (flags & L2INPUT_FEAT_ ## a) \ - rbm = l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_ ## a, mp->is_set); - foreach_l2input_feat; -#undef _ - - BAD_SW_IF_INDEX_LABEL; - - /* *INDENT-OFF* */ - REPLY_MACRO2(VL_API_L2_FLAGS_REPLY, - ({ - rmp->resulting_feature_bitmap = ntohl(rbm); - })); - /* *INDENT-ON* */ -} - -static void -vl_api_bridge_flags_t_handler (vl_api_bridge_flags_t * mp) -{ - vlib_main_t *vm = vlib_get_main (); - bd_main_t *bdm = &bd_main; - vl_api_bridge_flags_reply_t *rmp; - int rv = 0; - u32 bd_id = ntohl (mp->bd_id); - u32 bd_index; - u32 flags = ntohl (mp->feature_bitmap); - uword *p; - - p = hash_get (bdm->bd_index_by_bd_id, bd_id); - if (p == 0) - { - rv = VNET_API_ERROR_NO_SUCH_ENTRY; - goto out; - } - - bd_index = p[0]; - - bd_set_flags (vm, bd_index, flags, mp->is_set); - -out: - /* *INDENT-OFF* */ - REPLY_MACRO2(VL_API_BRIDGE_FLAGS_REPLY, - ({ - rmp->resulting_feature_bitmap = ntohl(flags); - })); - /* *INDENT-ON* */ -} - static void vl_api_bd_ip_mac_add_del_t_handler (vl_api_bd_ip_mac_add_del_t * mp) { @@ -1147,64 +688,6 @@ out: /* *INDENT-ON* */ } -static void -vl_api_mpls_tunnel_add_del_t_handler (vl_api_mpls_tunnel_add_del_t * mp) -{ - vl_api_mpls_tunnel_add_del_reply_t *rmp; - int rv = 0; - stats_main_t *sm = &stats_main; - u32 tunnel_sw_if_index; - int ii; - - dslock (sm, 1 /* release hint */ , 5 /* tag */ ); - - if (mp->mt_is_add) - { - fib_route_path_t rpath, *rpaths = NULL; - mpls_label_t *label_stack = NULL; - - memset (&rpath, 0, sizeof (rpath)); - - if (mp->mt_next_hop_proto_is_ip4) - { - rpath.frp_proto = FIB_PROTOCOL_IP4; - clib_memcpy (&rpath.frp_addr.ip4, - mp->mt_next_hop, sizeof (rpath.frp_addr.ip4)); - } - else - { - rpath.frp_proto = FIB_PROTOCOL_IP6; - clib_memcpy (&rpath.frp_addr.ip6, - mp->mt_next_hop, sizeof (rpath.frp_addr.ip6)); - } - rpath.frp_sw_if_index = ntohl (mp->mt_next_hop_sw_if_index); - - for (ii = 0; ii < mp->mt_next_hop_n_out_labels; ii++) - vec_add1 (label_stack, ntohl (mp->mt_next_hop_out_label_stack[ii])); - - vec_add1 (rpaths, rpath); - - vnet_mpls_tunnel_add (rpaths, label_stack, - mp->mt_l2_only, &tunnel_sw_if_index); - vec_free (rpaths); - vec_free (label_stack); - } - else - { - tunnel_sw_if_index = ntohl (mp->mt_sw_if_index); - vnet_mpls_tunnel_del (tunnel_sw_if_index); - } - - dsunlock (sm); - - /* *INDENT-OFF* */ - REPLY_MACRO2(VL_API_MPLS_TUNNEL_ADD_DEL_REPLY, - ({ - rmp->sw_if_index = ntohl(tunnel_sw_if_index); - })); - /* *INDENT-ON* */ -} - static void vl_api_proxy_arp_add_del_t_handler (vl_api_proxy_arp_add_del_t * mp) { @@ -1929,164 +1412,6 @@ vl_api_set_arp_neighbor_limit_t_handler (vl_api_set_arp_neighbor_limit_t * mp) REPLY_MACRO (VL_API_SET_ARP_NEIGHBOR_LIMIT_REPLY); } -static void vl_api_sr_tunnel_add_del_t_handler - (vl_api_sr_tunnel_add_del_t * mp) -{ -#if IP6SR == 0 - clib_warning ("unimplemented"); -#else - ip6_sr_add_del_tunnel_args_t _a, *a = &_a; - int rv = 0; - vl_api_sr_tunnel_add_del_reply_t *rmp; - ip6_address_t *segments = 0, *seg; - ip6_address_t *tags = 0, *tag; - ip6_address_t *this_address; - int i; - - if (mp->n_segments == 0) - { - rv = -11; - goto out; - } - - memset (a, 0, sizeof (*a)); - a->src_address = (ip6_address_t *) & mp->src_address; - a->dst_address = (ip6_address_t *) & mp->dst_address; - a->dst_mask_width = mp->dst_mask_width; - a->flags_net_byte_order = mp->flags_net_byte_order; - a->is_del = (mp->is_add == 0); - a->rx_table_id = ntohl (mp->outer_vrf_id); - a->tx_table_id = ntohl (mp->inner_vrf_id); - - a->name = format (0, "%s", mp->name); - if (!(vec_len (a->name))) - a->name = 0; - - a->policy_name = format (0, "%s", mp->policy_name); - if (!(vec_len (a->policy_name))) - a->policy_name = 0; - - /* Yank segments and tags out of the API message */ - this_address = (ip6_address_t *) mp->segs_and_tags; - for (i = 0; i < mp->n_segments; i++) - { - vec_add2 (segments, seg, 1); - clib_memcpy (seg->as_u8, this_address->as_u8, sizeof (*this_address)); - this_address++; - } - for (i = 0; i < mp->n_tags; i++) - { - vec_add2 (tags, tag, 1); - clib_memcpy (tag->as_u8, this_address->as_u8, sizeof (*this_address)); - this_address++; - } - - a->segments = segments; - a->tags = tags; - - rv = ip6_sr_add_del_tunnel (a); - -out: - - REPLY_MACRO (VL_API_SR_TUNNEL_ADD_DEL_REPLY); -#endif -} - -static void vl_api_sr_policy_add_del_t_handler - (vl_api_sr_policy_add_del_t * mp) -{ -#if IP6SR == 0 - clib_warning ("unimplemented"); -#else - ip6_sr_add_del_policy_args_t _a, *a = &_a; - int rv = 0; - vl_api_sr_policy_add_del_reply_t *rmp; - int i; - - memset (a, 0, sizeof (*a)); - a->is_del = (mp->is_add == 0); - - a->name = format (0, "%s", mp->name); - if (!(vec_len (a->name))) - { - rv = VNET_API_ERROR_NO_SUCH_NODE2; - goto out; - } - - if (!(mp->tunnel_names[0])) - { - rv = VNET_API_ERROR_NO_SUCH_NODE2; - goto out; - } - - // start deserializing tunnel_names - int num_tunnels = mp->tunnel_names[0]; //number of tunnels - u8 *deser_tun_names = mp->tunnel_names; - deser_tun_names += 1; //moving along - - u8 *tun_name = 0; - int tun_name_len = 0; - - for (i = 0; i < num_tunnels; i++) - { - tun_name_len = *deser_tun_names; - deser_tun_names += 1; - vec_resize (tun_name, tun_name_len); - memcpy (tun_name, deser_tun_names, tun_name_len); - vec_add1 (a->tunnel_names, tun_name); - deser_tun_names += tun_name_len; - tun_name = 0; - } - - rv = ip6_sr_add_del_policy (a); - -out: - - REPLY_MACRO (VL_API_SR_POLICY_ADD_DEL_REPLY); -#endif -} - -static void vl_api_sr_multicast_map_add_del_t_handler - (vl_api_sr_multicast_map_add_del_t * mp) -{ -#if IP6SR == 0 - clib_warning ("unimplemented"); -#else - ip6_sr_add_del_multicastmap_args_t _a, *a = &_a; - int rv = 0; - vl_api_sr_multicast_map_add_del_reply_t *rmp; - - memset (a, 0, sizeof (*a)); - a->is_del = (mp->is_add == 0); - - a->multicast_address = (ip6_address_t *) & mp->multicast_address; - a->policy_name = format (0, "%s", mp->policy_name); - - if (a->multicast_address == 0) - { - rv = -1; - goto out; - } - - if (!(a->policy_name)) - { - rv = -2; - goto out; - } - -#if DPDK > 0 /* Cannot call replicate without DPDK */ - rv = ip6_sr_add_del_multicastmap (a); -#else - clib_warning ("multicast replication without DPDK not implemented"); - rv = VNET_API_ERROR_UNIMPLEMENTED; -#endif /* DPDK */ - -out: - - REPLY_MACRO (VL_API_SR_MULTICAST_MAP_ADD_DEL_REPLY); -#endif -} - #define foreach_classify_add_del_table_field \ _(table_index) \ _(nbuckets) \ @@ -2246,21 +1571,6 @@ static void vl_api_classify_set_interface_l2_tables_t_handler REPLY_MACRO (VL_API_CLASSIFY_SET_INTERFACE_L2_TABLES_REPLY); } -static void -vl_api_l2_fib_clear_table_t_handler (vl_api_l2_fib_clear_table_t * mp) -{ - int rv = 0; - vl_api_l2_fib_clear_table_reply_t *rmp; - - /* DAW-FIXME: This API should only clear non-static l2fib entries, but - * that is not currently implemented. When that TODO is fixed - * this call should be changed to pass 1 instead of 0. - */ - l2fib_clear_table (0); - - REPLY_MACRO (VL_API_L2_FIB_CLEAR_TABLE_REPLY); -} - extern void l2_efp_filter_configure (vnet_main_t * vnet_main, u32 sw_if_index, u32 enable); @@ -2321,76 +1631,6 @@ static void REPLY_MACRO (VL_API_L2_INTERFACE_VLAN_TAG_REWRITE_REPLY); } -static void -vl_api_l2_fib_table_entry_t_handler (vl_api_l2_fib_table_entry_t * mp) -{ - clib_warning ("BUG"); -} - -static void -send_l2fib_table_entry (vpe_api_main_t * am, - unix_shared_memory_queue_t * q, - l2fib_entry_key_t * l2fe_key, - l2fib_entry_result_t * l2fe_res, u32 context) -{ - vl_api_l2_fib_table_entry_t *mp; - - mp = vl_msg_api_alloc (sizeof (*mp)); - memset (mp, 0, sizeof (*mp)); - mp->_vl_msg_id = ntohs (VL_API_L2_FIB_TABLE_ENTRY); - - mp->bd_id = - ntohl (l2input_main.bd_configs[l2fe_key->fields.bd_index].bd_id); - - mp->mac = l2fib_make_key (l2fe_key->fields.mac, 0); - mp->sw_if_index = ntohl (l2fe_res->fields.sw_if_index); - mp->static_mac = l2fe_res->fields.static_mac; - mp->filter_mac = l2fe_res->fields.filter; - mp->bvi_mac = l2fe_res->fields.bvi; - mp->context = context; - - vl_msg_api_send_shmem (q, (u8 *) & mp); -} - -static void -vl_api_l2_fib_table_dump_t_handler (vl_api_l2_fib_table_dump_t * mp) -{ - vpe_api_main_t *am = &vpe_api_main; - bd_main_t *bdm = &bd_main; - l2fib_entry_key_t *l2fe_key = NULL; - l2fib_entry_result_t *l2fe_res = NULL; - u32 ni, bd_id = ntohl (mp->bd_id); - u32 bd_index; - unix_shared_memory_queue_t *q; - uword *p; - - q = vl_api_client_index_to_input_queue (mp->client_index); - if (q == 0) - return; - - /* see l2fib_table_dump: ~0 means "any" */ - if (bd_id == ~0) - bd_index = ~0; - else - { - p = hash_get (bdm->bd_index_by_bd_id, bd_id); - if (p == 0) - return; - - bd_index = p[0]; - } - - l2fib_table_dump (bd_index, &l2fe_key, &l2fe_res); - - vec_foreach_index (ni, l2fe_key) - { - send_l2fib_table_entry (am, q, vec_elt_at_index (l2fe_key, ni), - vec_elt_at_index (l2fe_res, ni), mp->context); - } - vec_free (l2fe_key); - vec_free (l2fe_res); -} - static void vl_api_show_version_t_handler (vl_api_show_version_t * mp) { @@ -3357,167 +2597,6 @@ vl_api_policer_classify_dump_t_handler (vl_api_policer_classify_dump_t * mp) } } -static void -vl_api_mpls_tunnel_details_t_handler (vl_api_mpls_fib_details_t * mp) -{ - clib_warning ("BUG"); -} - -typedef struct mpls_tunnel_send_walk_ctx_t_ -{ - unix_shared_memory_queue_t *q; - u32 index; - u32 context; -} mpls_tunnel_send_walk_ctx_t; - -static void -send_mpls_tunnel_entry (u32 mti, void *arg) -{ - mpls_tunnel_send_walk_ctx_t *ctx; - vl_api_mpls_tunnel_details_t *mp; - const mpls_tunnel_t *mt; - u32 nlabels; - - ctx = arg; - - if (~0 != ctx->index && mti != ctx->index) - return; - - mt = mpls_tunnel_get (mti); - nlabels = vec_len (mt->mt_label_stack); - - mp = vl_msg_api_alloc (sizeof (*mp) + nlabels * sizeof (u32)); - memset (mp, 0, sizeof (*mp)); - mp->_vl_msg_id = ntohs (VL_API_MPLS_TUNNEL_DETAILS); - mp->context = ctx->context; - - mp->tunnel_index = ntohl (mti); - memcpy (mp->mt_next_hop_out_labels, - mt->mt_label_stack, nlabels * sizeof (u32)); - - // FIXME - - vl_msg_api_send_shmem (ctx->q, (u8 *) & mp); -} - -static void -vl_api_mpls_tunnel_dump_t_handler (vl_api_mpls_tunnel_dump_t * mp) -{ - unix_shared_memory_queue_t *q; - - q = vl_api_client_index_to_input_queue (mp->client_index); - if (q == 0) - return; - - mpls_tunnel_send_walk_ctx_t ctx = { - .q = q, - .index = ntohl (mp->tunnel_index), - .context = mp->context, - }; - mpls_tunnel_walk (send_mpls_tunnel_entry, &ctx); -} - -static void -vl_api_mpls_fib_details_t_handler (vl_api_mpls_fib_details_t * mp) -{ - clib_warning ("BUG"); -} - -static void -vl_api_mpls_fib_details_t_endian (vl_api_mpls_fib_details_t * mp) -{ - clib_warning ("BUG"); -} - -static void -vl_api_mpls_fib_details_t_print (vl_api_mpls_fib_details_t * mp) -{ - clib_warning ("BUG"); -} - -static void -send_mpls_fib_details (vpe_api_main_t * am, - unix_shared_memory_queue_t * q, - u32 table_id, u32 label, u32 eos, - fib_route_path_encode_t * api_rpaths, u32 context) -{ - vl_api_mpls_fib_details_t *mp; - fib_route_path_encode_t *api_rpath; - vl_api_fib_path2_t *fp; - int path_count; - - path_count = vec_len (api_rpaths); - mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp)); - if (!mp) - return; - memset (mp, 0, sizeof (*mp)); - mp->_vl_msg_id = ntohs (VL_API_MPLS_FIB_DETAILS); - mp->context = context; - - mp->table_id = htonl (table_id); - mp->eos_bit = eos; - mp->label = htonl (label); - - mp->count = htonl (path_count); - fp = mp->path; - vec_foreach (api_rpath, api_rpaths) - { - memset (fp, 0, sizeof (*fp)); - fp->weight = htonl (api_rpath->rpath.frp_weight); - fp->sw_if_index = htonl (api_rpath->rpath.frp_sw_if_index); - copy_fib_next_hop (api_rpath, fp); - fp++; - } - - vl_msg_api_send_shmem (q, (u8 *) & mp); -} - -static void -vl_api_mpls_fib_dump_t_handler (vl_api_mpls_fib_dump_t * mp) -{ - vpe_api_main_t *am = &vpe_api_main; - unix_shared_memory_queue_t *q; - mpls_main_t *mm = &mpls_main; - fib_table_t *fib_table; - fib_node_index_t lfei, *lfeip, *lfeis = NULL; - mpls_label_t key; - fib_prefix_t pfx; - u32 fib_index; - fib_route_path_encode_t *api_rpaths; - - q = vl_api_client_index_to_input_queue (mp->client_index); - if (q == 0) - return; - - /* *INDENT-OFF* */ - pool_foreach (fib_table, mm->fibs, - ({ - hash_foreach(key, lfei, fib_table->mpls.mf_entries, - ({ - vec_add1(lfeis, lfei); - })); - })); - vec_sort_with_function(lfeis, fib_entry_cmp_for_sort); - - vec_foreach(lfeip, lfeis) - { - fib_entry_get_prefix(*lfeip, &pfx); - fib_index = fib_entry_get_fib_index(*lfeip); - fib_table = fib_table_get(fib_index, pfx.fp_proto); - api_rpaths = NULL; - fib_entry_encode(*lfeip, &api_rpaths); - send_mpls_fib_details (am, q, - fib_table->ft_table_id, - pfx.fp_label, - pfx.fp_eos, - api_rpaths, - mp->context); - vec_free(api_rpaths); - } - - vec_free (lfeis); -} - static void vl_api_classify_table_ids_t_handler (vl_api_classify_table_ids_t * mp) { @@ -4487,32 +3566,8 @@ vpe_api_hookup (vlib_main_t * vm) #undef _ /* - * Manually register the sr tunnel add del msg, so we trace - * enough bytes to capture a typical segment list - */ - vl_msg_api_set_handlers (VL_API_SR_TUNNEL_ADD_DEL, - "sr_tunnel_add_del", - vl_api_sr_tunnel_add_del_t_handler, - vl_noop_handler, - vl_api_sr_tunnel_add_del_t_endian, - vl_api_sr_tunnel_add_del_t_print, 256, 1); - - - /* - * Manually register the sr policy add del msg, so we trace - * enough bytes to capture a typical tunnel name list - */ - vl_msg_api_set_handlers (VL_API_SR_POLICY_ADD_DEL, - "sr_policy_add_del", - vl_api_sr_policy_add_del_t_handler, - vl_noop_handler, - vl_api_sr_policy_add_del_t_endian, - vl_api_sr_policy_add_del_t_print, 256, 1); - - /* - * Trace space for 8 MPLS encap labels, classifier mask+match + * Trace space for classifier mask+match */ - am->api_trace_cfg[VL_API_MPLS_TUNNEL_ADD_DEL].size += 8 * sizeof (u32); am->api_trace_cfg[VL_API_CLASSIFY_ADD_DEL_TABLE].size += 5 * sizeof (u32x4); am->api_trace_cfg[VL_API_CLASSIFY_ADD_DEL_SESSION].size += 5 * sizeof (u32x4); diff --git a/src/vpp/api/vpe.api b/src/vpp/api/vpe.api index 3e4bcdf9..e784fa01 100644 --- a/src/vpp/api/vpe.api +++ b/src/vpp/api/vpe.api @@ -22,21 +22,24 @@ /* * Note: API placement cleanup in progress * If you're looking for interface APIs, please - * see .../vnet/vnet/{interface.api,interface_api.c} - * IP APIs: see .../vnet/vnet/ip/{ip.api, ip_api.c} - * TAP APIs: see .../vnet/vnet/unix/{tap.api, tap_api.c} - * VXLAN APIs: see .../vnet/vnet/vxlan/{vxlan.api, vxlan_api.c} + * see .../src/vnet/{interface.api,interface_api.c} + * IP APIs: see .../src/vnet/ip/{ip.api, ip_api.c} + * TAP APIs: see .../src/vnet/unix/{tap.api, tap_api.c} + * VXLAN APIs: see .../src/vnet/vxlan/{vxlan.api, vxlan_api.c} * AF-PACKET APIs: ... see /vnet/devices/af_packet/{af_packet.api, af_packet_api.c} - * NETMAP APIs: see ... /vnet/vnet/devices/netmap/{netmap.api, netmap_api.c} + * NETMAP APIs: see ... /src/vnet/devices/netmap/{netmap.api, netmap_api.c} * VHOST-USER APIs: see .../vnet/devices/virtio/{vhost_user.api, vhost_user_api.c} - * VXLAN GPE APIs: see .../vnet/vnet/vxlan-gpe/{vxlan_gpe.api, vxlan_gpe_api.c} - * GRE APIs: see .../vnet/vnet/gre/{gre.api, gre_api.c} - * L2TP APIs: see .../vnet/vnet/l2tp/{l2tp.api, l2tp_api.c} - * BFD APIs: see .../vnet/vnet/bfd/{bfd.api, bfd_api.c} - * IPSEC APIs: see .../vnet/vnet/ipsec/{ipsec.api, ipsec_api.c} - * IPSEC-GRE APIs: see .../vnet/vnet/ipsec-gre/{ipsec_gre.api, ipsec_gre_api.c} - * LISP APIs: see .../vnet/vnet/lisp/{lisp.api, lisp_api.c} - * LISP-GPE APIs: see .../vnet/vnet/lisp-gpe/{lisp_gpe.api, lisp_gpe_api.c} + * VXLAN GPE APIs: see .../src/vnet/vxlan-gpe/{vxlan_gpe.api, vxlan_gpe_api.c} + * GRE APIs: see .../src/vnet/gre/{gre.api, gre_api.c} + * L2 APIs: see .../src/vnet/l2/{l2.api, l2_api.c} + * L2TP APIs: see .../src/vnet/l2tp/{l2tp.api, l2tp_api.c} + * BFD APIs: see .../src/vnet/bfd/{bfd.api, bfd_api.c} + * IPSEC APIs: see .../src/vnet/ipsec/{ipsec.api, ipsec_api.c} + * IPSEC-GRE APIs: see .../src/vnet/ipsec-gre/{ipsec_gre.api, ipsec_gre_api.c} + * LISP APIs: see .../src/vnet/lisp/{lisp.api, lisp_api.c} + * LISP-GPE APIs: see .../src/vnet/lisp-gpe/{lisp_gpe.api, lisp_gpe_api.c} + * MPLS APIs: see .../src/vnet/mpls/{mpls.api, mpls_api.c} + * SR APIs: see .../src/vnet/sr/{sr.api, sr_api.c} * DPDK APIs: ... see /src/vnet/devices/dpdk/{dpdk.api, dpdk_api.c} */ @@ -90,231 +93,6 @@ define sw_interface_set_mpls_enable_reply i32 retval; }; -/** \brief MPLS Route Add / del route - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param mr_label - The MPLS label value - @param mr_eos - The End of stack bit - @param mr_table_id - The MPLS table-id the route is added in - @param mr_classify_table_index - If this is a classify route, - this is the classify table index - @param mr_create_table_if_needed - If the MPLS or IP tables do not exist, - create them - @param mr_is_add - Is this a route add or delete - @param mr_is_classify - Is this route result a classify - @param mr_is_multipath - Is this route update a multipath - i.e. is this - a path addition to an existing route - @param mr_is_resolve_host - Recurse resolution constraint via a host prefix - @param mr_is_resolve_attached - Recurse resolution constraint via attached prefix - @param mr_next_hop_proto_is_ip4 - The next-hop is IPV4 - @param mr_next_hop_weight - The weight, for UCMP - @param mr_next_hop[16] - the nextop address - @param mr_next_hop_sw_if_index - the next-hop SW interface - @param mr_next_hop_table_id - the next-hop table-id (if appropriate) - @param mr_next_hop_n_out_labels - the number of labels in the label stack - @param mr_next_hop_out_label_stack - the next-hop output label stack, outer most first - @param next_hop_via_label - The next-hop is a resolved via a local label -*/ -define mpls_route_add_del -{ - u32 client_index; - u32 context; - u32 mr_label; - u8 mr_eos; - u32 mr_table_id; - u32 mr_classify_table_index; - u8 mr_create_table_if_needed; - u8 mr_is_add; - u8 mr_is_classify; - u8 mr_is_multipath; - u8 mr_is_resolve_host; - u8 mr_is_resolve_attached; - u8 mr_next_hop_proto_is_ip4; - u8 mr_next_hop_weight; - u8 mr_next_hop[16]; - u8 mr_next_hop_n_out_labels; - u32 mr_next_hop_sw_if_index; - u32 mr_next_hop_table_id; - u32 mr_next_hop_via_label; - u32 mr_next_hop_out_label_stack[mr_next_hop_n_out_labels]; -}; - -/** \brief Reply for MPLS route add / del request - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define mpls_route_add_del_reply -{ - u32 context; - i32 retval; -}; - -/** \brief Dump MPLS fib table - @param client_index - opaque cookie to identify the sender -*/ -define mpls_fib_dump -{ - u32 client_index; - u32 context; -}; - -/** \brief FIB path - @param sw_if_index - index of the interface - @param weight - The weight, for UCMP - @param is_local - local if non-zero, else remote - @param is_drop - Drop the packet - @param is_unreach - Drop the packet and rate limit send ICMP unreachable - @param is_prohibit - Drop the packet and rate limit send ICMP prohibited - @param afi - the afi of the next hop, IP46_TYPE_IP4=1, IP46_TYPE_IP6=2 - @param next_hop[16] - the next hop address - - WARNING: this type is replicated, pending cleanup completion - -*/ -typeonly manual_print manual_endian define fib_path2 -{ - u32 sw_if_index; - u32 weight; - u8 is_local; - u8 is_drop; - u8 is_unreach; - u8 is_prohibit; - u8 afi; - u8 next_hop[16]; -}; - -/** \brief mpls FIB table response - @param table_id - MPLS fib table id - @param s_bit - End-of-stack bit - @param label - MPLS label value - @param count - the number of fib_path in path - @param path - array of of fib_path structures -*/ -manual_endian manual_print define mpls_fib_details -{ - u32 context; - u32 table_id; - u8 eos_bit; - u32 label; - u32 count; - vl_api_fib_path2_t path[count]; -}; - -/** \brief Bind/Unbind an MPLS local label to an IP prefix. i.e. create - a per-prefix label entry. - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param mb_mpls_table_id - The MPLS table-id the MPLS entry will be added in - @param mb_label - The MPLS label value to bind - @param mb_ip_table_id - The IP table-id of the IP prefix to bind to. - @param mb_create_table_if_needed - Create either/both tables if required. - @param mb_is_bind - Bind or unbind - @param mb_is_ip4 - The prefix to bind to is IPv4 - @param mb_address_length - Length of IP prefix - @param mb_address[16] - IP prefix/ -*/ -define mpls_ip_bind_unbind -{ - u32 client_index; - u32 context; - u32 mb_mpls_table_id; - u32 mb_label; - u32 mb_ip_table_id; - u8 mb_create_table_if_needed; - u8 mb_is_bind; - u8 mb_is_ip4; - u8 mb_address_length; - u8 mb_address[16]; -}; - -/** \brief Reply for MPLS IP bind/unbind request - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define mpls_ip_bind_unbind_reply -{ - u32 context; - i32 retval; -}; - -/** \brief MPLS tunnel Add / del route - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param mt_is_add - Is this a route add or delete - @param mt_sw_if_index - The SW interface index of the tunnel to delete - @param mt_next_hop_proto_is_ip4 - The next-hop is IPV4 - @param mt_next_hop_weight - The weight, for UCMP - @param mt_next_hop[16] - the nextop address - @param mt_next_hop_sw_if_index - the next-hop SW interface - @param mt_next_hop_table_id - the next-hop table-id (if appropriate) - @param mt_next_hop_n_out_labels - the number of next-hop output labels - @param mt_next_hop_out_label_stack - the next-hop output label stack, outer most first -*/ -define mpls_tunnel_add_del -{ - u32 client_index; - u32 context; - u32 mt_sw_if_index; - u8 mt_is_add; - u8 mt_l2_only; - u8 mt_next_hop_proto_is_ip4; - u8 mt_next_hop_weight; - u8 mt_next_hop[16]; - u8 mt_next_hop_n_out_labels; - u32 mt_next_hop_sw_if_index; - u32 mt_next_hop_table_id; - u32 mt_next_hop_out_label_stack[mt_next_hop_n_out_labels]; -}; - -/** \brief Reply for MPLS tunnel add / del request - @param context - returned sender context, to match reply w/ request - @param retval - return code - @param sw_if_index - SW interface index of the tunnel created -*/ -define mpls_tunnel_add_del_reply -{ - u32 context; - i32 retval; - u32 sw_if_index; -}; - -/** \brief Dump mpls eth tunnel table - @param client_index - opaque cookie to identify the sender - @param tunnel_index - eth tunnel identifier or -1 in case of all tunnels -*/ -define mpls_tunnel_dump -{ - u32 client_index; - u32 context; - i32 tunnel_index; -}; - -/** \brief mpls eth tunnel operational state response - @param tunnel_index - eth tunnel identifier - @param intfc_address - interface ipv4 addr - @param mask_width - interface ipv4 addr mask - @param hw_if_index - interface id - @param l2_only - - @param tunnel_dst_mac - - @param tx_sw_if_index - - @param encap_index - reference to mpls label table - @param nlabels - number of resolved labels - @param labels - resolved labels -*/ -define mpls_tunnel_details -{ - u32 context; - u32 tunnel_index; - u8 mt_l2_only; - u8 mt_sw_if_index; - u8 mt_next_hop_proto_is_ip4; - u8 mt_next_hop[16]; - u32 mt_next_hop_sw_if_index; - u32 mt_next_hop_table_id; - u32 mt_next_hop_n_labels; - u32 mt_next_hop_out_labels[mt_next_hop_n_labels]; -}; - /** \brief Proxy ARP add / del request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -797,102 +575,6 @@ define l2_patch_add_del_reply i32 retval; }; -/** \brief IPv6 segment routing tunnel add / del request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param is_add - add the tunnel if non-zero, else delete it - @param name[] - tunnel name (len. 64) - @param src_address[] - - @param dst_address[] - - @param dst_mask_width - - @param inner_vrf_id - - @param outer_vrf_id - - @param flags_net_byte_order - - @param n_segments - - @param n_tags - - @param segs_and_tags[] - - @param policy_name[] - name of policy to associate this tunnel to (len. 64) -*/ -define sr_tunnel_add_del -{ - u32 client_index; - u32 context; - u8 is_add; - u8 name[64]; - u8 src_address[16]; - u8 dst_address[16]; - u8 dst_mask_width; - u32 inner_vrf_id; - u32 outer_vrf_id; - u16 flags_net_byte_order; - u8 n_segments; - u8 n_tags; - u8 policy_name[64]; - u8 segs_and_tags[0]; -}; - -/** \brief IPv6 segment routing tunnel add / del response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define sr_tunnel_add_del_reply -{ - u32 context; - i32 retval; -}; - -/** \brief IPv6 segment routing policy add / del request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param is_add - add the tunnel if non-zero, else delete it - @param name[] - policy name (len. 64) - @param tunnel_names[] - -*/ -define sr_policy_add_del -{ - u32 client_index; - u32 context; - u8 is_add; - u8 name[64]; - u8 tunnel_names[0]; -}; - -/** \brief IPv6 segment routing policy add / del response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define sr_policy_add_del_reply -{ - u32 context; - i32 retval; -}; - -/** \brief IPv6 segment routing multicast map to policy add / del request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param is_add - add the tunnel if non-zero, else delete it - @param multicast_address[] - IP6 multicast address - @param policy_name[] = policy name (len.64) -*/ -define sr_multicast_map_add_del -{ - u32 client_index; - u32 context; - u8 is_add; - u8 multicast_address[16]; - u8 policy_name[64]; -}; - -/** \brief IPv6 segment routing multicast map to policy add / del response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define sr_multicast_map_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief Interface set vpath request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -973,95 +655,6 @@ define sw_interface_set_l2_bridge_reply i32 retval; }; -/** \brief L2 FIB add entry request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param mac - the entry's mac address - @param bd_id - the entry's bridge domain id - @param sw_if_index - the interface - @param is_add - If non zero add the entry, else delete it - @param static_mac - - @param filter_mac - -*/ -define l2fib_add_del -{ - u32 client_index; - u32 context; - u64 mac; - u32 bd_id; - u32 sw_if_index; - u8 is_add; - u8 static_mac; - u8 filter_mac; - u8 bvi_mac; -}; - -/** \brief L2 FIB add entry response - @param context - sender context, to match reply w/ request - @param retval - return code for the add l2fib entry request -*/ -define l2fib_add_del_reply -{ - u32 context; - i32 retval; -}; - -/** \brief Set L2 flags request !!! TODO - need more info, feature bits in l2_input.h - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param sw_if_index - interface - @param is_set - if non-zero, set the bits, else clear them - @param feature_bitmap - non-zero bits to set or clear -*/ -define l2_flags -{ - u32 client_index; - u32 context; - u32 sw_if_index; - u8 is_set; - u32 feature_bitmap; -}; - -/** \brief Set L2 bits response - @param context - sender context, to match reply w/ request - @param retval - return code for the set l2 bits request -*/ -define l2_flags_reply -{ - u32 context; - i32 retval; - u32 resulting_feature_bitmap; -}; - -/** \brief Set bridge flags (such as L2_LEARN, L2_FWD, L2_FLOOD, - L2_UU_FLOOD, or L2_ARP_TERM) request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param bd_id - the bridge domain to set the flags for - @param is_set - if non-zero, set the flags, else clear them - @param feature_bitmap - bits that are non-zero to set or clear -*/ -define bridge_flags -{ - u32 client_index; - u32 context; - u32 bd_id; - u8 is_set; - u32 feature_bitmap; -}; - -/** \brief Set bridge flags response - @param context - sender context, to match reply w/ request - @param retval - return code for the set bridge flags request - @param resulting_feature_bitmap - the feature bitmap value after the request is implemented -*/ -define bridge_flags_reply -{ - u32 context; - i32 retval; - u32 resulting_feature_bitmap; -}; - /** \brief Set bridge domain ip to mac entry request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -1335,26 +928,6 @@ define dhcp_proxy_config_2_reply i32 retval; }; -/** \brief L2 fib clear table request, clear all mac entries in the l2 fib - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request -*/ -define l2_fib_clear_table -{ - u32 client_index; - u32 context; -}; - -/** \brief L2 fib clear table response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define l2_fib_clear_table_reply -{ - u32 context; - i32 retval; -}; - /** \brief L2 interface ethernet flow point filtering enable/disable request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -1463,36 +1036,6 @@ define show_version_reply u8 build_directory[256]; }; -/** \brief l2 fib table entry structure - @param bd_id - the l2 fib / bridge domain table id - @param mac - the entry's mac address - @param sw_if_index - index of the interface - @param static_mac - the entry is statically configured. - @param filter_mac - the entry is a mac filter entry. - @param bvi_mac - the mac address is a bridge virtual interface -*/ -define l2_fib_table_entry -{ - u32 context; - u32 bd_id; - u64 mac; - u32 sw_if_index; - u8 static_mac; - u8 filter_mac; - u8 bvi_mac; -}; - -/** \brief Dump l2 fib (aka bridge domain) table - @param client_index - opaque cookie to identify the sender - @param bd_id - the l2 fib / bridge domain table identifier -*/ -define l2_fib_table_dump -{ - u32 client_index; - u32 context; - u32 bd_id; -}; - /* Gross kludge, DGMS */ define interface_name_renumber { @@ -1600,91 +1143,6 @@ define ip6_nd_event u8 mac_ip; }; -/** \brief L2 bridge domain add or delete request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param bd_id - the bridge domain to create - @param flood - enable/disable bcast/mcast flooding in the bd - @param uu_flood - enable/disable uknown unicast flood in the bd - @param forward - enable/disable forwarding on all interfaces in the bd - @param learn - enable/disable learning on all interfaces in the bd - @param arp_term - enable/disable arp termination in the bd - @param mac_age - mac aging time in min, 0 for disabled - @param is_add - add or delete flag -*/ -define bridge_domain_add_del -{ - u32 client_index; - u32 context; - u32 bd_id; - u8 flood; - u8 uu_flood; - u8 forward; - u8 learn; - u8 arp_term; - u8 mac_age; - u8 is_add; -}; - -/** \brief L2 bridge domain add or delete response - @param context - sender context, to match reply w/ request - @param retval - return code for the set bridge flags request -*/ -define bridge_domain_add_del_reply -{ - u32 context; - i32 retval; -}; - -/** \brief L2 bridge domain request operational state details - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param bd_id - the bridge domain id desired or ~0 to request all bds -*/ -define bridge_domain_dump -{ - u32 client_index; - u32 context; - u32 bd_id; -}; - -/** \brief L2 bridge domain operational state response - @param bd_id - the bridge domain id - @param flood - bcast/mcast flooding state on all interfaces in the bd - @param uu_flood - uknown unicast flooding state on all interfaces in the bd - @param forward - forwarding state on all interfaces in the bd - @param learn - learning state on all interfaces in the bd - @param arp_term - arp termination state on all interfaces in the bd - @param mac_age - mac aging time in min, 0 for disabled - @param n_sw_ifs - number of sw_if_index's in the domain -*/ -define bridge_domain_details -{ - u32 context; - u32 bd_id; - u8 flood; - u8 uu_flood; - u8 forward; - u8 learn; - u8 arp_term; - u8 mac_age; - u32 bvi_sw_if_index; - u32 n_sw_ifs; -}; - -/** \brief L2 bridge domain sw interface operational state response - @param bd_id - the bridge domain id - @param sw_if_index - sw_if_index in the domain - @param shg - split horizon group for the interface -*/ -define bridge_domain_sw_if_details -{ - u32 context; - u32 bd_id; - u32 sw_if_index; - u8 shg; -}; - /** \brief DHCP Client config add / del request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request -- cgit 1.2.3-korg From a9a951f8e5ed6e172fbfbdbb6cb690c67fa2f715 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Mon, 16 Jan 2017 22:06:10 +0100 Subject: Add --without-libssl configure parameter This replaces --without-ipsec and --without-ipv6sr and allows other parts of the code to be disabled if libssl is not available. Change-Id: Id97ff3685a7924d7f86622952e0405d94ceb5957 Signed-off-by: Damjan Marion --- build-data/platforms/arm32.mk | 4 ++-- build-data/platforms/dpaa2.mk | 8 ++++---- build-data/platforms/qppc.mk | 4 ++-- build-data/platforms/thunder.mk | 4 ++-- src/configure.ac | 6 ++---- src/vnet.am | 6 +++--- src/vnet/ipsec/ipsec_api.c | 24 ++++++++++++------------ src/vnet/ipsec/ipsec_output.c | 2 +- src/vpp/api/api.c | 2 +- 9 files changed, 29 insertions(+), 31 deletions(-) (limited to 'src/vpp/api/api.c') diff --git a/build-data/platforms/arm32.mk b/build-data/platforms/arm32.mk index 7b80061b..47d4ad5a 100644 --- a/build-data/platforms/arm32.mk +++ b/build-data/platforms/arm32.mk @@ -22,8 +22,8 @@ arm32_root_packages = vpp vlib vlib-api vnet svm vpp-api-test \ jvpp gmod vlib_configure_args_arm32 = --with-pre-data=128 -vnet_configure_args_arm32 = --with-dpdk --without-ipsec --without-ipv6sr -vpp_configure_args_arm32 = --with-dpdk --without-ipsec --without-ipv6sr +vnet_configure_args_arm32 = --with-dpdk --without-libssl +vpp_configure_args_arm32 = --with-dpdk --without-libssl arm32_dpdk_arch = "armv7a" arm32_dpdk_target = "arm-armv7a-linuxapp-gcc" diff --git a/build-data/platforms/dpaa2.mk b/build-data/platforms/dpaa2.mk index 0ec627a4..2d4745ac 100644 --- a/build-data/platforms/dpaa2.mk +++ b/build-data/platforms/dpaa2.mk @@ -42,10 +42,10 @@ dpaa2_dpdk_make_extra_args = "CROSS=$(dpaa2_target)- DPDK_PKTMBUF_HEADROOM=256" endif endif -vpp_configure_args_dpaa2 = --with-dpdk --without-ipsec \ - --without-ipv6sr --with-sysroot=$(SYSROOT) -vnet_configure_args_dpaa2 = --with-dpdk --without-ipsec \ - --without-ipv6sr --with-sysroot=$(SYSROOT) +vpp_configure_args_dpaa2 = --with-dpdk --without-libssl \ + --with-sysroot=$(SYSROOT) +vnet_configure_args_dpaa2 = --with-dpdk --without-libssl \ + --with-sysroot=$(SYSROOT) # Set these parameters carefully. The vlib_buffer_t is 256 bytes, i.e. vlib_configure_args_dpaa2 = --with-pre-data=256 diff --git a/build-data/platforms/qppc.mk b/build-data/platforms/qppc.mk index 244747e7..983684fc 100644 --- a/build-data/platforms/qppc.mk +++ b/build-data/platforms/qppc.mk @@ -11,10 +11,10 @@ qppc_root_packages = vppinfra vlib vlib-api vnet svm \ vpp vpp-api-test vnet_configure_args_qppc = \ - --without-ipsec --without-ipv6sr + --without-libssl vpp_configure_args_qppc = \ - --without-ipsec --without-ipv6sr + --without-libssl vlib_configure_args_qppc = --with-pre-data=128 diff --git a/build-data/platforms/thunder.mk b/build-data/platforms/thunder.mk index f891f4a1..31b6a510 100644 --- a/build-data/platforms/thunder.mk +++ b/build-data/platforms/thunder.mk @@ -15,10 +15,10 @@ thunder_root_packages = vppinfra vlib-cavium-dpdk vnet-cavium-dpdk cavium-dpdk \ vpp-cavium-dpdk vpp-api-test-cavium-dpdk vnet-cavium-dpdk_configure_args_thunder = \ - --with-dpdk --without-ipsec --without-ipv6sr + --with-dpdk --without-libssl vpp-cavium-dpdk_configure_args_thunder = \ - --with-dpdk --without-ipsec --without-ipv6sr + --with-dpdk --without-libssl cavium-dpdk_configure_args_thunder = --with-headroom=256 diff --git a/src/configure.ac b/src/configure.ac index eb380d8b..b2234448 100644 --- a/src/configure.ac +++ b/src/configure.ac @@ -101,8 +101,7 @@ WITH_ARG(dpdk_crypto, [Use DPDK cryptodev]) WITH_ARG(dpdk_mlx5_pmd, [Use DPDK with mlx5 PMD]) # --without-X -WITHOUT_ARG(ipsec, [Disable IPSec]) -WITHOUT_ARG(ipv6sr, [Disable IPv6 SR]) +WITHOUT_ARG(libssl, [Disable libssl]) WITHOUT_ARG(apicli, [Disable binary api CLI]) AC_ARG_WITH(unix, @@ -133,8 +132,7 @@ AC_SUBST(APICLI, [-DVPP_API_TEST_BUILTIN=${n_with_apicli}]) AC_DEFINE_UNQUOTED(DPDK, [${n_with_dpdk}]) AC_DEFINE_UNQUOTED(DPDK_SHARED_LIB, [${n_enable_dpdk_shared}]) AC_DEFINE_UNQUOTED(DPDK_CRYPTO, [${n_with_dpdk_crypto}]) -AC_DEFINE_UNQUOTED(IPSEC, [${n_with_ipsec}]) -AC_DEFINE_UNQUOTED(IPV6SR, [${n_with_ipv6sr}]) +AC_DEFINE_UNQUOTED(WITH_LIBSSL, [${n_with_libssl}]) # Silence following noise: diff --git a/src/vnet.am b/src/vnet.am index 3b2a25e8..93dd1e6c 100644 --- a/src/vnet.am +++ b/src/vnet.am @@ -26,7 +26,7 @@ libvnet_la_DEPENDENCIES = \ libvnet_la_LIBADD = $(libvnet_la_DEPENDENCIES) -lm -lpthread -ldl -lrt $(DPDK_LD_ADD) libvnet_la_LDFLAGS = $(DPDK_LD_FLAGS) -if WITH_IPV6SR +if WITH_LIBSSL libvnet_la_LIBADD += -lcrypto endif @@ -372,7 +372,7 @@ API_FILES += vnet/bfd/bfd.api ######################################## # Layer 3 protocol: IPSec ######################################## -if WITH_IPSEC +if WITH_LIBSSL libvnet_la_SOURCES += \ vnet/ipsec/ipsec.c \ vnet/ipsec/ipsec_cli.c \ @@ -673,7 +673,7 @@ nobase_include_HEADERS += \ # ipv6 segment routing ######################################## -if WITH_IPV6SR +if WITH_LIBSSL libvnet_la_SOURCES += \ vnet/sr/sr.c \ vnet/sr/sr_replicate.c \ diff --git a/src/vnet/ipsec/ipsec_api.c b/src/vnet/ipsec/ipsec_api.c index 30cc5bd2..9bcf63b4 100644 --- a/src/vnet/ipsec/ipsec_api.c +++ b/src/vnet/ipsec/ipsec_api.c @@ -26,7 +26,7 @@ #include -#if IPSEC > 0 +#if WITH_LIBSSL > 0 #include #include #endif /* IPSEC */ @@ -63,7 +63,7 @@ _(IKEV2_SET_LOCAL_KEY, ikev2_set_local_key) static void vl_api_ipsec_spd_add_del_t_handler (vl_api_ipsec_spd_add_del_t * mp) { -#if IPSEC == 0 +#if WITH_LIBSSL == 0 clib_warning ("unimplemented"); #else @@ -95,7 +95,7 @@ static void vl_api_ipsec_interface_add_del_spd_t_handler VALIDATE_SW_IF_INDEX (mp); -#if IPSEC > 0 +#if WITH_LIBSSL > 0 rv = ipsec_set_interface_spd (vm, sw_if_index, spd_id, mp->is_add); #else rv = VNET_API_ERROR_UNIMPLEMENTED; @@ -113,7 +113,7 @@ static void vl_api_ipsec_spd_add_del_entry_t_handler vl_api_ipsec_spd_add_del_entry_reply_t *rmp; int rv; -#if IPSEC > 0 +#if WITH_LIBSSL > 0 ipsec_policy_t p; memset (&p, 0, sizeof (p)); @@ -176,7 +176,7 @@ static void vl_api_ipsec_sad_add_del_entry_t_handler vlib_main_t *vm __attribute__ ((unused)) = vlib_get_main (); vl_api_ipsec_sad_add_del_entry_reply_t *rmp; int rv; -#if IPSEC > 0 +#if WITH_LIBSSL > 0 ipsec_sa_t sa; memset (&sa, 0, sizeof (sa)); @@ -324,7 +324,7 @@ vl_api_ipsec_spd_dump_t_handler (vl_api_ipsec_spd_dump_t * mp) ipsec_spd_t *spd; uword *p; u32 spd_index; -#if IPSEC > 0 +#if WITH_LIBSSL > 0 q = vl_api_client_index_to_input_queue (mp->client_index); if (q == 0) return; @@ -355,7 +355,7 @@ vl_api_ipsec_sa_set_key_t_handler (vl_api_ipsec_sa_set_key_t * mp) vlib_main_t *vm __attribute__ ((unused)) = vlib_get_main (); vl_api_ipsec_sa_set_key_reply_t *rmp; int rv; -#if IPSEC > 0 +#if WITH_LIBSSL > 0 ipsec_sa_t sa; sa.id = ntohl (mp->sa_id); sa.crypto_key_len = mp->crypto_key_length; @@ -377,7 +377,7 @@ vl_api_ikev2_profile_add_del_t_handler (vl_api_ikev2_profile_add_del_t * mp) vl_api_ikev2_profile_add_del_reply_t *rmp; int rv = 0; -#if IPSEC > 0 +#if WITH_LIBSSL > 0 vlib_main_t *vm = vlib_get_main (); clib_error_t *error; u8 *tmp = format (0, "%s", mp->name); @@ -399,7 +399,7 @@ static void vl_api_ikev2_profile_set_auth_reply_t *rmp; int rv = 0; -#if IPSEC > 0 +#if WITH_LIBSSL > 0 vlib_main_t *vm = vlib_get_main (); clib_error_t *error; u8 *tmp = format (0, "%s", mp->name); @@ -423,7 +423,7 @@ vl_api_ikev2_profile_set_id_t_handler (vl_api_ikev2_profile_set_id_t * mp) vl_api_ikev2_profile_add_del_reply_t *rmp; int rv = 0; -#if IPSEC > 0 +#if WITH_LIBSSL > 0 vlib_main_t *vm = vlib_get_main (); clib_error_t *error; u8 *tmp = format (0, "%s", mp->name); @@ -447,7 +447,7 @@ vl_api_ikev2_profile_set_ts_t_handler (vl_api_ikev2_profile_set_ts_t * mp) vl_api_ikev2_profile_set_ts_reply_t *rmp; int rv = 0; -#if IPSEC > 0 +#if WITH_LIBSSL > 0 vlib_main_t *vm = vlib_get_main (); clib_error_t *error; u8 *tmp = format (0, "%s", mp->name); @@ -470,7 +470,7 @@ vl_api_ikev2_set_local_key_t_handler (vl_api_ikev2_set_local_key_t * mp) vl_api_ikev2_profile_set_ts_reply_t *rmp; int rv = 0; -#if IPSEC > 0 +#if WITH_LIBSSL > 0 vlib_main_t *vm = vlib_get_main (); clib_error_t *error; diff --git a/src/vnet/ipsec/ipsec_output.c b/src/vnet/ipsec/ipsec_output.c index 97977899..df93b5e4 100644 --- a/src/vnet/ipsec/ipsec_output.c +++ b/src/vnet/ipsec/ipsec_output.c @@ -27,7 +27,7 @@ #define ESP_NODE "esp-encrypt" #endif -#if IPSEC > 0 +#if WITH_LIBSSL > 0 #define foreach_ipsec_output_next \ _(DROP, "error-drop") \ diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c index 3d6905dd..3afc3383 100644 --- a/src/vpp/api/api.c +++ b/src/vpp/api/api.c @@ -54,7 +54,7 @@ #include #include #include -#if IPV6SR > 0 +#if WITH_LIBSSL > 0 #include #endif #include -- cgit 1.2.3-korg From d2c97d988b2fbc28f0905d1826b428967d09348a Mon Sep 17 00:00:00 2001 From: Pavel Kotucek Date: Tue, 24 Jan 2017 10:58:12 +0100 Subject: API refactoring : classify Change-Id: Ib75197ef8e5057e7f0d9361a10705c3743d05333 Signed-off-by: Pavel Kotucek --- src/vnet.am | 8 +- src/vnet/classify/classify.api | 356 +++++++++++++++++++++++++ src/vnet/classify/classify_api.c | 555 +++++++++++++++++++++++++++++++++++++++ src/vnet/vnet_all_api_h.h | 1 + src/vpp/api/api.c | 456 -------------------------------- src/vpp/api/vpe.api | 340 +----------------------- 6 files changed, 921 insertions(+), 795 deletions(-) create mode 100644 src/vnet/classify/classify.api create mode 100644 src/vnet/classify/classify_api.c (limited to 'src/vpp/api/api.c') diff --git a/src/vnet.am b/src/vnet.am index 93dd1e6c..660efcf5 100644 --- a/src/vnet.am +++ b/src/vnet.am @@ -270,13 +270,17 @@ libvnet_la_SOURCES += \ vnet/classify/policer_classify.c \ vnet/classify/flow_classify.c \ vnet/classify/flow_classify_node.c \ - vnet/classify/vnet_classify.h + vnet/classify/vnet_classify.h \ + vnet/classify/classify_api.c nobase_include_HEADERS += \ vnet/classify/vnet_classify.h \ vnet/classify/input_acl.h \ vnet/classify/policer_classify.h \ - vnet/classify/flow_classify.h + vnet/classify/flow_classify.h \ + vnet/classify/classify.api.h + +API_FILES += vnet/classify/classify.api ######################################## # Layer 3 protocols go here diff --git a/src/vnet/classify/classify.api b/src/vnet/classify/classify.api new file mode 100644 index 00000000..51ebd6c8 --- /dev/null +++ b/src/vnet/classify/classify.api @@ -0,0 +1,356 @@ +/* + * Copyright (c) 2015-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** \brief Add/Delete classification table request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add- if non-zero add the table, else delete it + @param del_chain - if non-zero delete the whole chain of tables + @param table_index - if add, reuturns index of the created table, else specifies the table to delete + @param nbuckets - number of buckets when adding a table + @param memory_size - memory size when adding a table + @param match_n_vectors - number of match vectors + @param next_table_index - index of next table + @param miss_next_index - index of miss table + @param current_data_flag - option to use current node's packet payload + as the starting point from where packets are classified, + This option is only valid for L2/L3 input ACL for now. + 0: by default, classify data from the buffer's start location + 1: classify packets from VPP node’s current data pointer + @param current_data_offset - a signed value to shift the start location of + the packet to be classified + For example, if input IP ACL node is used, L2 header’s first byte + can be accessible by configuring current_data_offset to -14 + if there is no vlan tag. + This is valid only if current_data_flag is set to 1. + @param mask[] - match mask +*/ +define classify_add_del_table +{ + u32 client_index; + u32 context; + u8 is_add; + u8 del_chain; + u32 table_index; + u32 nbuckets; + u32 memory_size; + u32 skip_n_vectors; + u32 match_n_vectors; + u32 next_table_index; + u32 miss_next_index; + u32 current_data_flag; + i32 current_data_offset; + u8 mask[0]; +}; + +/** \brief Add/Delete classification table response + @param context - sender context, to match reply w/ request + @param retval - return code for the table add/del requst + @param new_table_index - for add, returned index of the new table + @param skip_n_vectors - for add, returned value of skip_n_vectors in table + @param match_n_vectors -for add, returned value of match_n_vectors in table +*/ +define classify_add_del_table_reply +{ + u32 context; + i32 retval; + u32 new_table_index; + u32 skip_n_vectors; + u32 match_n_vectors; +}; + +/** \brief Classify add / del session request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add session if non-zero, else delete + @param table_index - index of the table to add/del the session, required + @param hit_next_index - for add, hit_next_index of new session, required + @param opaque_index - for add, opaque_index of new session + @param advance -for add, advance value for session + @param action - + 0: no action (by default) + metadata is not used. + 1: Classified IP packets will be looked up from the + specified ipv4 fib table (configured by metadata as VRF id). + Only valid for L3 input ACL node + 2: Classified IP packets will be looked up from the + specified ipv6 fib table (configured by metadata as VRF id). + Only valid for L3 input ACL node + @param metadata - valid only if action != 0 + VRF id if action is 1 or 2. + @param match[] - for add, match value for session, required +*/ +define classify_add_del_session +{ + u32 client_index; + u32 context; + u8 is_add; + u32 table_index; + u32 hit_next_index; + u32 opaque_index; + i32 advance; + u8 action; + u32 metadata; + u8 match[0]; +}; + +/** \brief Classify add / del session response + @param context - sender context, to match reply w/ request + @param retval - return code for the add/del session request +*/ +define classify_add_del_session_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Set/unset policer classify interface + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - interface to set/unset policer classify + @param ip4_table_index - ip4 classify table index (~0 for skip) + @param ip6_table_index - ip6 classify table index (~0 for skip) + @param l2_table_index - l2 classify table index (~0 for skip) + @param is_add - Set if non-zero, else unset + Note: User is recommeneded to use just one valid table_index per call. + (ip4_table_index, ip6_table_index, or l2_table_index) +*/ +define policer_classify_set_interface +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u32 ip4_table_index; + u32 ip6_table_index; + u32 l2_table_index; + u8 is_add; +}; + +/** \brief Set/unset policer classify interface response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define policer_classify_set_interface_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Get list of policer classify interfaces and tables + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param type - classify table type +*/ +define policer_classify_dump +{ + u32 client_index; + u32 context; + u8 type; +}; + +/** \brief Policer iclassify operational state response. + @param context - sender context, to match reply w/ request + @param sw_if_index - software interface index + @param table_index - classify table index +*/ +define policer_classify_details +{ + u32 context; + u32 sw_if_index; + u32 table_index; +}; + +/** \brief Classify get table IDs request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define classify_table_ids +{ + u32 client_index; + u32 context; +}; + +/** \brief Reply for classify get table IDs request + @param context - sender context which was passed in the request + @param count - number of ids returned in response + @param ids - array of classify table ids +*/ +define classify_table_ids_reply +{ + u32 context; + i32 retval; + u32 count; + u32 ids[count]; +}; + +/** \brief Classify table ids by interface index request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - index of the interface +*/ +define classify_table_by_interface +{ + u32 client_index; + u32 context; + u32 sw_if_index; +}; + +/** \brief Reply for classify table id by interface index request + @param context - sender context which was passed in the request + @param count - number of ids returned in response + @param sw_if_index - index of the interface + @param l2_table_id - l2 classify table index + @param ip4_table_id - ip4 classify table index + @param ip6_table_id - ip6 classify table index +*/ +define classify_table_by_interface_reply +{ + u32 context; + i32 retval; + u32 sw_if_index; + u32 l2_table_id; + u32 ip4_table_id; + u32 ip6_table_id; +}; + +/** \brief Classify table info + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param table_id - classify table index +*/ +define classify_table_info +{ + u32 client_index; + u32 context; + u32 table_id; +}; + +/** \brief Reply for classify table info request + @param context - sender context which was passed in the request + @param count - number of ids returned in response + @param table_id - classify table index + @param nbuckets - number of buckets when adding a table + @param match_n_vectors - number of match vectors + @param skip_n_vectors - number of skip_n_vectors + @param active_sessions - number of sessions (active entries) + @param next_table_index - index of next table + @param miss_next_index - index of miss table + @param mask[] - match mask +*/ +define classify_table_info_reply +{ + u32 context; + i32 retval; + u32 table_id; + u32 nbuckets; + u32 match_n_vectors; + u32 skip_n_vectors; + u32 active_sessions; + u32 next_table_index; + u32 miss_next_index; + u32 mask_length; + u8 mask[mask_length]; +}; + +/** \brief Classify sessions dump request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param table_id - classify table index +*/ +define classify_session_dump +{ + u32 client_index; + u32 context; + u32 table_id; +}; + +/** \brief Reply for classify table session dump request + @param context - sender context which was passed in the request + @param count - number of ids returned in response + @param table_id - classify table index + @param hit_next_index - hit_next_index of session + @param opaque_index - for add, opaque_index of session + @param advance - advance value of session + @param match[] - match value for session +*/ +define classify_session_details +{ + u32 context; + i32 retval; + u32 table_id; + u32 hit_next_index; + i32 advance; + u32 opaque_index; + u32 match_length; + u8 match[match_length]; +}; + +/** \brief Set/unset flow classify interface + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - interface to set/unset flow classify + @param ip4_table_index - ip4 classify table index (~0 for skip) + @param ip6_table_index - ip6 classify table index (~0 for skip) + @param l2_table_index - l2 classify table index (~0 for skip) + @param is_add - Set if non-zero, else unset + Note: User is recommeneded to use just one valid table_index per call. + (ip4_table_index, ip6_table_index, or l2_table_index) +*/ +define flow_classify_set_interface { + u32 client_index; + u32 context; + u32 sw_if_index; + u32 ip4_table_index; + u32 ip6_table_index; + u8 is_add; +}; + +/** \brief Set/unset flow classify interface response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define flow_classify_set_interface_reply { + u32 context; + i32 retval; +}; + +/** \brief Get list of flow classify interfaces and tables + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param type - classify table type +*/ +define flow_classify_dump { + u32 client_index; + u32 context; + u8 type; +}; + +/** \brief Flow classify operational state response. + @param context - sender context, to match reply w/ request + @param sw_if_index - software interface index + @param table_index - classify table index +*/ +define flow_classify_details { + u32 context; + u32 sw_if_index; + u32 table_index; +}; + +/* + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ + \ No newline at end of file diff --git a/src/vnet/classify/classify_api.c b/src/vnet/classify/classify_api.c new file mode 100644 index 00000000..77a8b434 --- /dev/null +++ b/src/vnet/classify/classify_api.c @@ -0,0 +1,555 @@ +/* + *------------------------------------------------------------------ + * classify_api.c - classify api + * + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include + +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs + +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) +#define vl_printfun +#include +#undef vl_printfun + +#include + +#define foreach_vpe_api_msg \ +_(CLASSIFY_ADD_DEL_TABLE, classify_add_del_table) \ +_(CLASSIFY_ADD_DEL_SESSION, classify_add_del_session) \ +_(CLASSIFY_TABLE_IDS,classify_table_ids) \ +_(CLASSIFY_TABLE_BY_INTERFACE, classify_table_by_interface) \ +_(CLASSIFY_TABLE_INFO,classify_table_info) \ +_(CLASSIFY_SESSION_DUMP,classify_session_dump) \ +_(CLASSIFY_SESSION_DETAILS,classify_session_details) \ +_(POLICER_CLASSIFY_SET_INTERFACE, policer_classify_set_interface) \ +_(POLICER_CLASSIFY_DUMP, policer_classify_dump) \ +_(FLOW_CLASSIFY_SET_INTERFACE, flow_classify_set_interface) \ +_(FLOW_CLASSIFY_DUMP, flow_classify_dump) + +#define foreach_classify_add_del_table_field \ +_(table_index) \ +_(nbuckets) \ +_(memory_size) \ +_(skip_n_vectors) \ +_(match_n_vectors) \ +_(next_table_index) \ +_(miss_next_index) \ +_(current_data_flag) \ +_(current_data_offset) + +static void vl_api_classify_add_del_table_t_handler + (vl_api_classify_add_del_table_t * mp) +{ + vl_api_classify_add_del_table_reply_t *rmp; + vnet_classify_main_t *cm = &vnet_classify_main; + vnet_classify_table_t *t; + int rv; + +#define _(a) u32 a; + foreach_classify_add_del_table_field; +#undef _ + +#define _(a) a = ntohl(mp->a); + foreach_classify_add_del_table_field; +#undef _ + + /* The underlying API fails silently, on purpose, so check here */ + if (mp->is_add == 0) /* delete */ + { + if (pool_is_free_index (cm->tables, table_index)) + { + rv = VNET_API_ERROR_NO_SUCH_TABLE; + goto out; + } + } + else /* add or update */ + { + if (table_index != ~0 && pool_is_free_index (cm->tables, table_index)) + table_index = ~0; + } + + rv = vnet_classify_add_del_table + (cm, mp->mask, nbuckets, memory_size, + skip_n_vectors, match_n_vectors, + next_table_index, miss_next_index, &table_index, + current_data_flag, current_data_offset, mp->is_add, mp->del_chain); + +out: + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_CLASSIFY_ADD_DEL_TABLE_REPLY, + ({ + if (rv == 0 && mp->is_add) + { + t = pool_elt_at_index (cm->tables, table_index); + rmp->skip_n_vectors = ntohl(t->skip_n_vectors); + rmp->match_n_vectors = ntohl(t->match_n_vectors); + rmp->new_table_index = ntohl(table_index); + } + else + { + rmp->skip_n_vectors = ~0; + rmp->match_n_vectors = ~0; + rmp->new_table_index = ~0; + } + })); + /* *INDENT-ON* */ +} + +static void vl_api_classify_add_del_session_t_handler + (vl_api_classify_add_del_session_t * mp) +{ + vnet_classify_main_t *cm = &vnet_classify_main; + vl_api_classify_add_del_session_reply_t *rmp; + int rv; + u32 table_index, hit_next_index, opaque_index, metadata; + i32 advance; + u8 action; + + table_index = ntohl (mp->table_index); + hit_next_index = ntohl (mp->hit_next_index); + opaque_index = ntohl (mp->opaque_index); + advance = ntohl (mp->advance); + action = mp->action; + metadata = ntohl (mp->metadata); + + rv = vnet_classify_add_del_session + (cm, table_index, mp->match, hit_next_index, opaque_index, + advance, action, metadata, mp->is_add); + + REPLY_MACRO (VL_API_CLASSIFY_ADD_DEL_SESSION_REPLY); +} + +static void + vl_api_policer_classify_set_interface_t_handler + (vl_api_policer_classify_set_interface_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_policer_classify_set_interface_reply_t *rmp; + int rv; + u32 sw_if_index, ip4_table_index, ip6_table_index, l2_table_index; + + ip4_table_index = ntohl (mp->ip4_table_index); + ip6_table_index = ntohl (mp->ip6_table_index); + l2_table_index = ntohl (mp->l2_table_index); + sw_if_index = ntohl (mp->sw_if_index); + + VALIDATE_SW_IF_INDEX (mp); + + rv = vnet_set_policer_classify_intfc (vm, sw_if_index, ip4_table_index, + ip6_table_index, l2_table_index, + mp->is_add); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_POLICER_CLASSIFY_SET_INTERFACE_REPLY); +} + +static void +send_policer_classify_details (u32 sw_if_index, + u32 table_index, + unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_policer_classify_details_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_POLICER_CLASSIFY_DETAILS); + mp->context = context; + mp->sw_if_index = htonl (sw_if_index); + mp->table_index = htonl (table_index); + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +vl_api_policer_classify_dump_t_handler (vl_api_policer_classify_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + policer_classify_main_t *pcm = &policer_classify_main; + u32 *vec_tbl; + int i; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + vec_tbl = pcm->classify_table_index_by_sw_if_index[mp->type]; + + if (vec_len (vec_tbl)) + { + for (i = 0; i < vec_len (vec_tbl); i++) + { + if (vec_elt (vec_tbl, i) == ~0) + continue; + + send_policer_classify_details (i, vec_elt (vec_tbl, i), q, + mp->context); + } + } +} + +static void +vl_api_classify_table_ids_t_handler (vl_api_classify_table_ids_t * mp) +{ + unix_shared_memory_queue_t *q; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + vnet_classify_main_t *cm = &vnet_classify_main; + vnet_classify_table_t *t; + u32 *table_ids = 0; + u32 count; + + /* *INDENT-OFF* */ + pool_foreach (t, cm->tables, + ({ + vec_add1 (table_ids, ntohl(t - cm->tables)); + })); + /* *INDENT-ON* */ + count = vec_len (table_ids); + + vl_api_classify_table_ids_reply_t *rmp; + rmp = vl_msg_api_alloc_as_if_client (sizeof (*rmp) + count * sizeof (u32)); + rmp->_vl_msg_id = ntohs (VL_API_CLASSIFY_TABLE_IDS_REPLY); + rmp->context = mp->context; + rmp->count = ntohl (count); + clib_memcpy (rmp->ids, table_ids, count * sizeof (u32)); + rmp->retval = 0; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); + + vec_free (table_ids); +} + +static void + vl_api_classify_table_by_interface_t_handler + (vl_api_classify_table_by_interface_t * mp) +{ + vl_api_classify_table_by_interface_reply_t *rmp; + int rv = 0; + + u32 sw_if_index = ntohl (mp->sw_if_index); + u32 *acl = 0; + + vec_validate (acl, INPUT_ACL_N_TABLES - 1); + vec_set (acl, ~0); + + VALIDATE_SW_IF_INDEX (mp); + + input_acl_main_t *am = &input_acl_main; + + int if_idx; + u32 type; + + for (type = 0; type < INPUT_ACL_N_TABLES; type++) + { + u32 *vec_tbl = am->classify_table_index_by_sw_if_index[type]; + if (vec_len (vec_tbl)) + { + for (if_idx = 0; if_idx < vec_len (vec_tbl); if_idx++) + { + if (vec_elt (vec_tbl, if_idx) == ~0 || sw_if_index != if_idx) + { + continue; + } + acl[type] = vec_elt (vec_tbl, if_idx); + } + } + } + + BAD_SW_IF_INDEX_LABEL; + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_CLASSIFY_TABLE_BY_INTERFACE_REPLY, + ({ + rmp->sw_if_index = ntohl(sw_if_index); + rmp->l2_table_id = ntohl(acl[INPUT_ACL_TABLE_L2]); + rmp->ip4_table_id = ntohl(acl[INPUT_ACL_TABLE_IP4]); + rmp->ip6_table_id = ntohl(acl[INPUT_ACL_TABLE_IP6]); + })); + /* *INDENT-ON* */ + vec_free (acl); +} + +static void +vl_api_classify_table_info_t_handler (vl_api_classify_table_info_t * mp) +{ + unix_shared_memory_queue_t *q; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + vl_api_classify_table_info_reply_t *rmp = 0; + + vnet_classify_main_t *cm = &vnet_classify_main; + u32 table_id = ntohl (mp->table_id); + vnet_classify_table_t *t; + + /* *INDENT-OFF* */ + pool_foreach (t, cm->tables, + ({ + if (table_id == t - cm->tables) + { + rmp = vl_msg_api_alloc_as_if_client + (sizeof (*rmp) + t->match_n_vectors * sizeof (u32x4)); + rmp->_vl_msg_id = ntohs (VL_API_CLASSIFY_TABLE_INFO_REPLY); + rmp->context = mp->context; + rmp->table_id = ntohl(table_id); + rmp->nbuckets = ntohl(t->nbuckets); + rmp->match_n_vectors = ntohl(t->match_n_vectors); + rmp->skip_n_vectors = ntohl(t->skip_n_vectors); + rmp->active_sessions = ntohl(t->active_elements); + rmp->next_table_index = ntohl(t->next_table_index); + rmp->miss_next_index = ntohl(t->miss_next_index); + rmp->mask_length = ntohl(t->match_n_vectors * sizeof (u32x4)); + clib_memcpy(rmp->mask, t->mask, t->match_n_vectors * sizeof(u32x4)); + rmp->retval = 0; + break; + } + })); + /* *INDENT-ON* */ + + if (rmp == 0) + { + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs ((VL_API_CLASSIFY_TABLE_INFO_REPLY)); + rmp->context = mp->context; + rmp->retval = ntohl (VNET_API_ERROR_CLASSIFY_TABLE_NOT_FOUND); + } + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +vl_api_classify_session_details_t_handler (vl_api_classify_session_details_t * + mp) +{ + clib_warning ("BUG"); +} + +static void +send_classify_session_details (unix_shared_memory_queue_t * q, + u32 table_id, + u32 match_length, + vnet_classify_entry_t * e, u32 context) +{ + vl_api_classify_session_details_t *rmp; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_CLASSIFY_SESSION_DETAILS); + rmp->context = context; + rmp->table_id = ntohl (table_id); + rmp->hit_next_index = ntohl (e->next_index); + rmp->advance = ntohl (e->advance); + rmp->opaque_index = ntohl (e->opaque_index); + rmp->match_length = ntohl (match_length); + clib_memcpy (rmp->match, e->key, match_length); + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +vl_api_classify_session_dump_t_handler (vl_api_classify_session_dump_t * mp) +{ + vnet_classify_main_t *cm = &vnet_classify_main; + unix_shared_memory_queue_t *q; + + u32 table_id = ntohl (mp->table_id); + vnet_classify_table_t *t; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + return; + + /* *INDENT-OFF* */ + pool_foreach (t, cm->tables, + ({ + if (table_id == t - cm->tables) + { + vnet_classify_bucket_t * b; + vnet_classify_entry_t * v, * save_v; + int i, j, k; + + for (i = 0; i < t->nbuckets; i++) + { + b = &t->buckets [i]; + if (b->offset == 0) + continue; + + save_v = vnet_classify_get_entry (t, b->offset); + for (j = 0; j < (1<log2_pages); j++) + { + for (k = 0; k < t->entries_per_page; k++) + { + v = vnet_classify_entry_at_index + (t, save_v, j*t->entries_per_page + k); + if (vnet_classify_entry_is_free (v)) + continue; + + send_classify_session_details + (q, table_id, t->match_n_vectors * sizeof (u32x4), + v, mp->context); + } + } + } + break; + } + })); + /* *INDENT-ON* */ +} + +static void + vl_api_flow_classify_set_interface_t_handler + (vl_api_flow_classify_set_interface_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_flow_classify_set_interface_reply_t *rmp; + int rv; + u32 sw_if_index, ip4_table_index, ip6_table_index; + + ip4_table_index = ntohl (mp->ip4_table_index); + ip6_table_index = ntohl (mp->ip6_table_index); + sw_if_index = ntohl (mp->sw_if_index); + + VALIDATE_SW_IF_INDEX (mp); + + rv = vnet_set_flow_classify_intfc (vm, sw_if_index, ip4_table_index, + ip6_table_index, mp->is_add); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_FLOW_CLASSIFY_SET_INTERFACE_REPLY); +} + +static void +send_flow_classify_details (u32 sw_if_index, + u32 table_index, + unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_flow_classify_details_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_FLOW_CLASSIFY_DETAILS); + mp->context = context; + mp->sw_if_index = htonl (sw_if_index); + mp->table_index = htonl (table_index); + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +vl_api_flow_classify_dump_t_handler (vl_api_flow_classify_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + flow_classify_main_t *pcm = &flow_classify_main; + u32 *vec_tbl; + int i; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + vec_tbl = pcm->classify_table_index_by_sw_if_index[mp->type]; + + if (vec_len (vec_tbl)) + { + for (i = 0; i < vec_len (vec_tbl); i++) + { + if (vec_elt (vec_tbl, i) == ~0) + continue; + + send_flow_classify_details (i, vec_elt (vec_tbl, i), q, + mp->context); + } + } +} + +/* + * classify_api_hookup + * Add vpe's API message handlers to the table. + * vlib has alread mapped shared memory and + * added the client registration handlers. + * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process() + */ +#define vl_msg_name_crc_list +#include +#undef vl_msg_name_crc_list + +static void +setup_message_id_table (api_main_t * am) +{ +#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id); + foreach_vl_msg_name_crc_classify; +#undef _ +} + +static clib_error_t * +classify_api_hookup (vlib_main_t * vm) +{ + api_main_t *am = &api_main; + +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler, \ + vl_noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 1); + foreach_vpe_api_msg; +#undef _ + + /* + * Set up the (msg_name, crc, message-id) table + */ + setup_message_id_table (am); + + return 0; +} + +VLIB_API_INIT_FUNCTION (classify_api_hookup); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/vnet_all_api_h.h b/src/vnet/vnet_all_api_h.h index 1024f92c..c7789650 100644 --- a/src/vnet/vnet_all_api_h.h +++ b/src/vnet/vnet_all_api_h.h @@ -52,6 +52,7 @@ #include #include #include +#include /* * fd.io coding-style-patch-verification: ON diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c index 3afc3383..6e7e9c10 100644 --- a/src/vpp/api/api.c +++ b/src/vpp/api/api.c @@ -62,10 +62,7 @@ #include #include #include -#include #include -#include -#include #include #include #include @@ -139,8 +136,6 @@ _(CLI_REQUEST, cli_request) \ _(CLI_INBAND, cli_inband) \ _(SET_ARP_NEIGHBOR_LIMIT, set_arp_neighbor_limit) \ _(L2_PATCH_ADD_DEL, l2_patch_add_del) \ -_(CLASSIFY_ADD_DEL_TABLE, classify_add_del_table) \ -_(CLASSIFY_ADD_DEL_SESSION, classify_add_del_session) \ _(CLASSIFY_SET_INTERFACE_IP_TABLE, classify_set_interface_ip_table) \ _(CLASSIFY_SET_INTERFACE_L2_TABLES, classify_set_interface_l2_tables) \ _(GET_NODE_INDEX, get_node_index) \ @@ -165,13 +160,6 @@ _(IOAM_ENABLE, ioam_enable) \ _(IOAM_DISABLE, ioam_disable) \ _(POLICER_ADD_DEL, policer_add_del) \ _(POLICER_DUMP, policer_dump) \ -_(POLICER_CLASSIFY_SET_INTERFACE, policer_classify_set_interface) \ -_(POLICER_CLASSIFY_DUMP, policer_classify_dump) \ -_(CLASSIFY_TABLE_IDS,classify_table_ids) \ -_(CLASSIFY_TABLE_BY_INTERFACE, classify_table_by_interface) \ -_(CLASSIFY_TABLE_INFO,classify_table_info) \ -_(CLASSIFY_SESSION_DUMP,classify_session_dump) \ -_(CLASSIFY_SESSION_DETAILS,classify_session_details) \ _(SET_IPFIX_EXPORTER, set_ipfix_exporter) \ _(IPFIX_EXPORTER_DUMP, ipfix_exporter_dump) \ _(SET_IPFIX_CLASSIFY_STREAM, set_ipfix_classify_stream) \ @@ -189,8 +177,6 @@ _(IP_SOURCE_AND_PORT_RANGE_CHECK_INTERFACE_ADD_DEL, \ _(DELETE_SUBIF, delete_subif) \ _(L2_INTERFACE_PBB_TAG_REWRITE, l2_interface_pbb_tag_rewrite) \ _(PUNT, punt) \ -_(FLOW_CLASSIFY_SET_INTERFACE, flow_classify_set_interface) \ -_(FLOW_CLASSIFY_DUMP, flow_classify_dump) \ _(FEATURE_ENABLE_DISABLE, feature_enable_disable) #define QUOTE_(x) #x @@ -1412,99 +1398,6 @@ vl_api_set_arp_neighbor_limit_t_handler (vl_api_set_arp_neighbor_limit_t * mp) REPLY_MACRO (VL_API_SET_ARP_NEIGHBOR_LIMIT_REPLY); } -#define foreach_classify_add_del_table_field \ -_(table_index) \ -_(nbuckets) \ -_(memory_size) \ -_(skip_n_vectors) \ -_(match_n_vectors) \ -_(next_table_index) \ -_(miss_next_index) \ -_(current_data_flag) \ -_(current_data_offset) - -static void vl_api_classify_add_del_table_t_handler - (vl_api_classify_add_del_table_t * mp) -{ - vl_api_classify_add_del_table_reply_t *rmp; - vnet_classify_main_t *cm = &vnet_classify_main; - vnet_classify_table_t *t; - int rv; - -#define _(a) u32 a; - foreach_classify_add_del_table_field; -#undef _ - -#define _(a) a = ntohl(mp->a); - foreach_classify_add_del_table_field; -#undef _ - - /* The underlying API fails silently, on purpose, so check here */ - if (mp->is_add == 0) /* delete */ - { - if (pool_is_free_index (cm->tables, table_index)) - { - rv = VNET_API_ERROR_NO_SUCH_TABLE; - goto out; - } - } - else /* add or update */ - { - if (table_index != ~0 && pool_is_free_index (cm->tables, table_index)) - table_index = ~0; - } - - rv = vnet_classify_add_del_table - (cm, mp->mask, nbuckets, memory_size, - skip_n_vectors, match_n_vectors, - next_table_index, miss_next_index, &table_index, - current_data_flag, current_data_offset, mp->is_add, mp->del_chain); - -out: - /* *INDENT-OFF* */ - REPLY_MACRO2(VL_API_CLASSIFY_ADD_DEL_TABLE_REPLY, - ({ - if (rv == 0 && mp->is_add) - { - t = pool_elt_at_index (cm->tables, table_index); - rmp->skip_n_vectors = ntohl(t->skip_n_vectors); - rmp->match_n_vectors = ntohl(t->match_n_vectors); - rmp->new_table_index = ntohl(table_index); - } - else - { - rmp->skip_n_vectors = ~0; - rmp->match_n_vectors = ~0; - rmp->new_table_index = ~0; - } - })); - /* *INDENT-ON* */ -} - -static void vl_api_classify_add_del_session_t_handler - (vl_api_classify_add_del_session_t * mp) -{ - vnet_classify_main_t *cm = &vnet_classify_main; - vl_api_classify_add_del_session_reply_t *rmp; - int rv; - u32 table_index, hit_next_index, opaque_index, metadata; - i32 advance; - u8 action; - - table_index = ntohl (mp->table_index); - hit_next_index = ntohl (mp->hit_next_index); - opaque_index = ntohl (mp->opaque_index); - advance = ntohl (mp->advance); - action = mp->action; - metadata = ntohl (mp->metadata); - - rv = vnet_classify_add_del_session - (cm, table_index, mp->match, hit_next_index, opaque_index, - advance, action, metadata, mp->is_add); - - REPLY_MACRO (VL_API_CLASSIFY_ADD_DEL_SESSION_REPLY); -} - static void vl_api_classify_set_interface_ip_table_t_handler (vl_api_classify_set_interface_ip_table_t * mp) { @@ -2528,288 +2421,6 @@ vl_api_policer_dump_t_handler (vl_api_policer_dump_t * mp) } } -static void - vl_api_policer_classify_set_interface_t_handler - (vl_api_policer_classify_set_interface_t * mp) -{ - vlib_main_t *vm = vlib_get_main (); - vl_api_policer_classify_set_interface_reply_t *rmp; - int rv; - u32 sw_if_index, ip4_table_index, ip6_table_index, l2_table_index; - - ip4_table_index = ntohl (mp->ip4_table_index); - ip6_table_index = ntohl (mp->ip6_table_index); - l2_table_index = ntohl (mp->l2_table_index); - sw_if_index = ntohl (mp->sw_if_index); - - VALIDATE_SW_IF_INDEX (mp); - - rv = vnet_set_policer_classify_intfc (vm, sw_if_index, ip4_table_index, - ip6_table_index, l2_table_index, - mp->is_add); - - BAD_SW_IF_INDEX_LABEL; - - REPLY_MACRO (VL_API_POLICER_CLASSIFY_SET_INTERFACE_REPLY); -} - -static void -send_policer_classify_details (u32 sw_if_index, - u32 table_index, - unix_shared_memory_queue_t * q, u32 context) -{ - vl_api_policer_classify_details_t *mp; - - mp = vl_msg_api_alloc (sizeof (*mp)); - memset (mp, 0, sizeof (*mp)); - mp->_vl_msg_id = ntohs (VL_API_POLICER_CLASSIFY_DETAILS); - mp->context = context; - mp->sw_if_index = htonl (sw_if_index); - mp->table_index = htonl (table_index); - - vl_msg_api_send_shmem (q, (u8 *) & mp); -} - -static void -vl_api_policer_classify_dump_t_handler (vl_api_policer_classify_dump_t * mp) -{ - unix_shared_memory_queue_t *q; - policer_classify_main_t *pcm = &policer_classify_main; - u32 *vec_tbl; - int i; - - q = vl_api_client_index_to_input_queue (mp->client_index); - if (q == 0) - return; - - vec_tbl = pcm->classify_table_index_by_sw_if_index[mp->type]; - - if (vec_len (vec_tbl)) - { - for (i = 0; i < vec_len (vec_tbl); i++) - { - if (vec_elt (vec_tbl, i) == ~0) - continue; - - send_policer_classify_details (i, vec_elt (vec_tbl, i), q, - mp->context); - } - } -} - -static void -vl_api_classify_table_ids_t_handler (vl_api_classify_table_ids_t * mp) -{ - unix_shared_memory_queue_t *q; - - q = vl_api_client_index_to_input_queue (mp->client_index); - if (q == 0) - return; - - vnet_classify_main_t *cm = &vnet_classify_main; - vnet_classify_table_t *t; - u32 *table_ids = 0; - u32 count; - - /* *INDENT-OFF* */ - pool_foreach (t, cm->tables, - ({ - vec_add1 (table_ids, ntohl(t - cm->tables)); - })); - /* *INDENT-ON* */ - count = vec_len (table_ids); - - vl_api_classify_table_ids_reply_t *rmp; - rmp = vl_msg_api_alloc_as_if_client (sizeof (*rmp) + count * sizeof (u32)); - rmp->_vl_msg_id = ntohs (VL_API_CLASSIFY_TABLE_IDS_REPLY); - rmp->context = mp->context; - rmp->count = ntohl (count); - clib_memcpy (rmp->ids, table_ids, count * sizeof (u32)); - rmp->retval = 0; - - vl_msg_api_send_shmem (q, (u8 *) & rmp); - - vec_free (table_ids); -} - -static void - vl_api_classify_table_by_interface_t_handler - (vl_api_classify_table_by_interface_t * mp) -{ - vl_api_classify_table_by_interface_reply_t *rmp; - int rv = 0; - - u32 sw_if_index = ntohl (mp->sw_if_index); - u32 *acl = 0; - - vec_validate (acl, INPUT_ACL_N_TABLES - 1); - vec_set (acl, ~0); - - VALIDATE_SW_IF_INDEX (mp); - - input_acl_main_t *am = &input_acl_main; - - int if_idx; - u32 type; - - for (type = 0; type < INPUT_ACL_N_TABLES; type++) - { - u32 *vec_tbl = am->classify_table_index_by_sw_if_index[type]; - if (vec_len (vec_tbl)) - { - for (if_idx = 0; if_idx < vec_len (vec_tbl); if_idx++) - { - if (vec_elt (vec_tbl, if_idx) == ~0 || sw_if_index != if_idx) - { - continue; - } - acl[type] = vec_elt (vec_tbl, if_idx); - } - } - } - - BAD_SW_IF_INDEX_LABEL; - - /* *INDENT-OFF* */ - REPLY_MACRO2(VL_API_CLASSIFY_TABLE_BY_INTERFACE_REPLY, - ({ - rmp->sw_if_index = ntohl(sw_if_index); - rmp->l2_table_id = ntohl(acl[INPUT_ACL_TABLE_L2]); - rmp->ip4_table_id = ntohl(acl[INPUT_ACL_TABLE_IP4]); - rmp->ip6_table_id = ntohl(acl[INPUT_ACL_TABLE_IP6]); - })); - /* *INDENT-ON* */ - vec_free (acl); -} - -static void -vl_api_classify_table_info_t_handler (vl_api_classify_table_info_t * mp) -{ - unix_shared_memory_queue_t *q; - - q = vl_api_client_index_to_input_queue (mp->client_index); - if (q == 0) - return; - - vl_api_classify_table_info_reply_t *rmp = 0; - - vnet_classify_main_t *cm = &vnet_classify_main; - u32 table_id = ntohl (mp->table_id); - vnet_classify_table_t *t; - - /* *INDENT-OFF* */ - pool_foreach (t, cm->tables, - ({ - if (table_id == t - cm->tables) - { - rmp = vl_msg_api_alloc_as_if_client - (sizeof (*rmp) + t->match_n_vectors * sizeof (u32x4)); - rmp->_vl_msg_id = ntohs (VL_API_CLASSIFY_TABLE_INFO_REPLY); - rmp->context = mp->context; - rmp->table_id = ntohl(table_id); - rmp->nbuckets = ntohl(t->nbuckets); - rmp->match_n_vectors = ntohl(t->match_n_vectors); - rmp->skip_n_vectors = ntohl(t->skip_n_vectors); - rmp->active_sessions = ntohl(t->active_elements); - rmp->next_table_index = ntohl(t->next_table_index); - rmp->miss_next_index = ntohl(t->miss_next_index); - rmp->mask_length = ntohl(t->match_n_vectors * sizeof (u32x4)); - clib_memcpy(rmp->mask, t->mask, t->match_n_vectors * sizeof(u32x4)); - rmp->retval = 0; - break; - } - })); - /* *INDENT-ON* */ - - if (rmp == 0) - { - rmp = vl_msg_api_alloc (sizeof (*rmp)); - rmp->_vl_msg_id = ntohs ((VL_API_CLASSIFY_TABLE_INFO_REPLY)); - rmp->context = mp->context; - rmp->retval = ntohl (VNET_API_ERROR_CLASSIFY_TABLE_NOT_FOUND); - } - - vl_msg_api_send_shmem (q, (u8 *) & rmp); -} - -static void -vl_api_classify_session_details_t_handler (vl_api_classify_session_details_t * - mp) -{ - clib_warning ("BUG"); -} - -static void -send_classify_session_details (unix_shared_memory_queue_t * q, - u32 table_id, - u32 match_length, - vnet_classify_entry_t * e, u32 context) -{ - vl_api_classify_session_details_t *rmp; - - rmp = vl_msg_api_alloc (sizeof (*rmp)); - memset (rmp, 0, sizeof (*rmp)); - rmp->_vl_msg_id = ntohs (VL_API_CLASSIFY_SESSION_DETAILS); - rmp->context = context; - rmp->table_id = ntohl (table_id); - rmp->hit_next_index = ntohl (e->next_index); - rmp->advance = ntohl (e->advance); - rmp->opaque_index = ntohl (e->opaque_index); - rmp->match_length = ntohl (match_length); - clib_memcpy (rmp->match, e->key, match_length); - - vl_msg_api_send_shmem (q, (u8 *) & rmp); -} - -static void -vl_api_classify_session_dump_t_handler (vl_api_classify_session_dump_t * mp) -{ - vnet_classify_main_t *cm = &vnet_classify_main; - unix_shared_memory_queue_t *q; - - u32 table_id = ntohl (mp->table_id); - vnet_classify_table_t *t; - - q = vl_api_client_index_to_input_queue (mp->client_index); - if (!q) - return; - - /* *INDENT-OFF* */ - pool_foreach (t, cm->tables, - ({ - if (table_id == t - cm->tables) - { - vnet_classify_bucket_t * b; - vnet_classify_entry_t * v, * save_v; - int i, j, k; - - for (i = 0; i < t->nbuckets; i++) - { - b = &t->buckets [i]; - if (b->offset == 0) - continue; - - save_v = vnet_classify_get_entry (t, b->offset); - for (j = 0; j < (1<log2_pages); j++) - { - for (k = 0; k < t->entries_per_page; k++) - { - v = vnet_classify_entry_at_index - (t, save_v, j*t->entries_per_page + k); - if (vnet_classify_entry_is_free (v)) - continue; - - send_classify_session_details - (q, table_id, t->match_n_vectors * sizeof (u32x4), - v, mp->context); - } - } - } - break; - } - })); - /* *INDENT-ON* */ -} static void vl_api_set_ipfix_exporter_t_handler (vl_api_set_ipfix_exporter_t * mp) @@ -3399,73 +3010,6 @@ vl_api_punt_t_handler (vl_api_punt_t * mp) REPLY_MACRO (VL_API_PUNT_REPLY); } -static void - vl_api_flow_classify_set_interface_t_handler - (vl_api_flow_classify_set_interface_t * mp) -{ - vlib_main_t *vm = vlib_get_main (); - vl_api_flow_classify_set_interface_reply_t *rmp; - int rv; - u32 sw_if_index, ip4_table_index, ip6_table_index; - - ip4_table_index = ntohl (mp->ip4_table_index); - ip6_table_index = ntohl (mp->ip6_table_index); - sw_if_index = ntohl (mp->sw_if_index); - - VALIDATE_SW_IF_INDEX (mp); - - rv = vnet_set_flow_classify_intfc (vm, sw_if_index, ip4_table_index, - ip6_table_index, mp->is_add); - - BAD_SW_IF_INDEX_LABEL; - - REPLY_MACRO (VL_API_FLOW_CLASSIFY_SET_INTERFACE_REPLY); -} - -static void -send_flow_classify_details (u32 sw_if_index, - u32 table_index, - unix_shared_memory_queue_t * q, u32 context) -{ - vl_api_flow_classify_details_t *mp; - - mp = vl_msg_api_alloc (sizeof (*mp)); - memset (mp, 0, sizeof (*mp)); - mp->_vl_msg_id = ntohs (VL_API_FLOW_CLASSIFY_DETAILS); - mp->context = context; - mp->sw_if_index = htonl (sw_if_index); - mp->table_index = htonl (table_index); - - vl_msg_api_send_shmem (q, (u8 *) & mp); -} - -static void -vl_api_flow_classify_dump_t_handler (vl_api_flow_classify_dump_t * mp) -{ - unix_shared_memory_queue_t *q; - flow_classify_main_t *pcm = &flow_classify_main; - u32 *vec_tbl; - int i; - - q = vl_api_client_index_to_input_queue (mp->client_index); - if (q == 0) - return; - - vec_tbl = pcm->classify_table_index_by_sw_if_index[mp->type]; - - if (vec_len (vec_tbl)) - { - for (i = 0; i < vec_len (vec_tbl); i++) - { - if (vec_elt (vec_tbl, i) == ~0) - continue; - - send_flow_classify_details (i, vec_elt (vec_tbl, i), q, - mp->context); - } - } -} - static void vl_api_feature_enable_disable_t_handler (vl_api_feature_enable_disable_t * mp) { diff --git a/src/vpp/api/vpe.api b/src/vpp/api/vpe.api index e784fa01..a9b34d1f 100644 --- a/src/vpp/api/vpe.api +++ b/src/vpp/api/vpe.api @@ -26,7 +26,7 @@ * IP APIs: see .../src/vnet/ip/{ip.api, ip_api.c} * TAP APIs: see .../src/vnet/unix/{tap.api, tap_api.c} * VXLAN APIs: see .../src/vnet/vxlan/{vxlan.api, vxlan_api.c} - * AF-PACKET APIs: ... see /vnet/devices/af_packet/{af_packet.api, af_packet_api.c} + * AF-PACKET APIs: see ... /vnet/devices/af_packet/{af_packet.api, af_packet_api.c} * NETMAP APIs: see ... /src/vnet/devices/netmap/{netmap.api, netmap_api.c} * VHOST-USER APIs: see .../vnet/devices/virtio/{vhost_user.api, vhost_user_api.c} * VXLAN GPE APIs: see .../src/vnet/vxlan-gpe/{vxlan_gpe.api, vxlan_gpe_api.c} @@ -40,7 +40,8 @@ * LISP-GPE APIs: see .../src/vnet/lisp-gpe/{lisp_gpe.api, lisp_gpe_api.c} * MPLS APIs: see .../src/vnet/mpls/{mpls.api, mpls_api.c} * SR APIs: see .../src/vnet/sr/{sr.api, sr_api.c} - * DPDK APIs: ... see /src/vnet/devices/dpdk/{dpdk.api, dpdk_api.c} + * DPDK APIs: see ... /src/vnet/devices/dpdk/{dpdk.api, dpdk_api.c} + * CLASSIFY APIs: see ... /src/vnet/classify/{classify.api, classify_api.c} */ /** \brief Create a new subinterface with the given vlan id @@ -685,109 +686,6 @@ define bd_ip_mac_add_del_reply i32 retval; }; -/** \brief Add/Delete classification table request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param is_add- if non-zero add the table, else delete it - @param del_chain - if non-zero delete the whole chain of tables - @param table_index - if add, reuturns index of the created table, else specifies the table to delete - @param nbuckets - number of buckets when adding a table - @param memory_size - memory size when adding a table - @param match_n_vectors - number of match vectors - @param next_table_index - index of next table - @param miss_next_index - index of miss table - @param current_data_flag - option to use current node's packet payload - as the starting point from where packets are classified, - This option is only valid for L2/L3 input ACL for now. - 0: by default, classify data from the buffer's start location - 1: classify packets from VPP node’s current data pointer - @param current_data_offset - a signed value to shift the start location of - the packet to be classified - For example, if input IP ACL node is used, L2 header’s first byte - can be accessible by configuring current_data_offset to -14 - if there is no vlan tag. - This is valid only if current_data_flag is set to 1. - @param mask[] - match mask -*/ -define classify_add_del_table -{ - u32 client_index; - u32 context; - u8 is_add; - u8 del_chain; - u32 table_index; - u32 nbuckets; - u32 memory_size; - u32 skip_n_vectors; - u32 match_n_vectors; - u32 next_table_index; - u32 miss_next_index; - u32 current_data_flag; - i32 current_data_offset; - u8 mask[0]; -}; - -/** \brief Add/Delete classification table response - @param context - sender context, to match reply w/ request - @param retval - return code for the table add/del requst - @param new_table_index - for add, returned index of the new table - @param skip_n_vectors - for add, returned value of skip_n_vectors in table - @param match_n_vectors -for add, returned value of match_n_vectors in table -*/ -define classify_add_del_table_reply -{ - u32 context; - i32 retval; - u32 new_table_index; - u32 skip_n_vectors; - u32 match_n_vectors; -}; - -/** \brief Classify add / del session request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param is_add - add session if non-zero, else delete - @param table_index - index of the table to add/del the session, required - @param hit_next_index - for add, hit_next_index of new session, required - @param opaque_index - for add, opaque_index of new session - @param advance -for add, advance value for session - @param action - - 0: no action (by default) - metadata is not used. - 1: Classified IP packets will be looked up from the - specified ipv4 fib table (configured by metadata as VRF id). - Only valid for L3 input ACL node - 2: Classified IP packets will be looked up from the - specified ipv6 fib table (configured by metadata as VRF id). - Only valid for L3 input ACL node - @param metadata - valid only if action != 0 - VRF id if action is 1 or 2. - @param match[] - for add, match value for session, required -*/ -define classify_add_del_session -{ - u32 client_index; - u32 context; - u8 is_add; - u32 table_index; - u32 hit_next_index; - u32 opaque_index; - i32 advance; - u8 action; - u32 metadata; - u8 match[0]; -}; - -/** \brief Classify add / del session response - @param context - sender context, to match reply w/ request - @param retval - return code for the add/del session request -*/ -define classify_add_del_session_reply -{ - u32 context; - i32 retval; -}; - /** \brief Set/unset the classification table for an interface request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -1491,187 +1389,6 @@ define policer_details u64 last_update_time; }; -/** \brief Set/unset policer classify interface - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param sw_if_index - interface to set/unset policer classify - @param ip4_table_index - ip4 classify table index (~0 for skip) - @param ip6_table_index - ip6 classify table index (~0 for skip) - @param l2_table_index - l2 classify table index (~0 for skip) - @param is_add - Set if non-zero, else unset - Note: User is recommeneded to use just one valid table_index per call. - (ip4_table_index, ip6_table_index, or l2_table_index) -*/ -define policer_classify_set_interface -{ - u32 client_index; - u32 context; - u32 sw_if_index; - u32 ip4_table_index; - u32 ip6_table_index; - u32 l2_table_index; - u8 is_add; -}; - -/** \brief Set/unset policer classify interface response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define policer_classify_set_interface_reply -{ - u32 context; - i32 retval; -}; - -/** \brief Get list of policer classify interfaces and tables - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param type - classify table type -*/ -define policer_classify_dump -{ - u32 client_index; - u32 context; - u8 type; -}; - -/** \brief Policer iclassify operational state response. - @param context - sender context, to match reply w/ request - @param sw_if_index - software interface index - @param table_index - classify table index -*/ -define policer_classify_details -{ - u32 context; - u32 sw_if_index; - u32 table_index; -}; - -/** \brief Classify get table IDs request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request -*/ -define classify_table_ids -{ - u32 client_index; - u32 context; -}; - -/** \brief Reply for classify get table IDs request - @param context - sender context which was passed in the request - @param count - number of ids returned in response - @param ids - array of classify table ids -*/ -define classify_table_ids_reply -{ - u32 context; - i32 retval; - u32 count; - u32 ids[count]; -}; - -/** \brief Classify table ids by interface index request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param sw_if_index - index of the interface -*/ -define classify_table_by_interface -{ - u32 client_index; - u32 context; - u32 sw_if_index; -}; - -/** \brief Reply for classify table id by interface index request - @param context - sender context which was passed in the request - @param count - number of ids returned in response - @param sw_if_index - index of the interface - @param l2_table_id - l2 classify table index - @param ip4_table_id - ip4 classify table index - @param ip6_table_id - ip6 classify table index -*/ -define classify_table_by_interface_reply -{ - u32 context; - i32 retval; - u32 sw_if_index; - u32 l2_table_id; - u32 ip4_table_id; - u32 ip6_table_id; -}; - -/** \brief Classify table info - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param table_id - classify table index -*/ -define classify_table_info -{ - u32 client_index; - u32 context; - u32 table_id; -}; - -/** \brief Reply for classify table info request - @param context - sender context which was passed in the request - @param count - number of ids returned in response - @param table_id - classify table index - @param nbuckets - number of buckets when adding a table - @param match_n_vectors - number of match vectors - @param skip_n_vectors - number of skip_n_vectors - @param active_sessions - number of sessions (active entries) - @param next_table_index - index of next table - @param miss_next_index - index of miss table - @param mask[] - match mask -*/ -define classify_table_info_reply -{ - u32 context; - i32 retval; - u32 table_id; - u32 nbuckets; - u32 match_n_vectors; - u32 skip_n_vectors; - u32 active_sessions; - u32 next_table_index; - u32 miss_next_index; - u32 mask_length; - u8 mask[mask_length]; -}; - -/** \brief Classify sessions dump request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param table_id - classify table index -*/ -define classify_session_dump -{ - u32 client_index; - u32 context; - u32 table_id; -}; - -/** \brief Reply for classify table session dump request - @param context - sender context which was passed in the request - @param count - number of ids returned in response - @param table_id - classify table index - @param hit_next_index - hit_next_index of session - @param opaque_index - for add, opaque_index of session - @param advance - advance value of session - @param match[] - match value for session -*/ -define classify_session_details -{ - u32 context; - i32 retval; - u32 table_id; - u32 hit_next_index; - i32 advance; - u32 opaque_index; - u32 match_length; - u8 match[match_length]; -}; - /** \brief Configure IPFIX exporter process request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -1825,57 +1542,6 @@ define ipfix_classify_table_details { u8 transport_protocol; }; -/** \brief Set/unset flow classify interface - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param sw_if_index - interface to set/unset flow classify - @param ip4_table_index - ip4 classify table index (~0 for skip) - @param ip6_table_index - ip6 classify table index (~0 for skip) - @param l2_table_index - l2 classify table index (~0 for skip) - @param is_add - Set if non-zero, else unset - Note: User is recommeneded to use just one valid table_index per call. - (ip4_table_index, ip6_table_index, or l2_table_index) -*/ -define flow_classify_set_interface { - u32 client_index; - u32 context; - u32 sw_if_index; - u32 ip4_table_index; - u32 ip6_table_index; - u8 is_add; -}; - -/** \brief Set/unset flow classify interface response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define flow_classify_set_interface_reply { - u32 context; - i32 retval; -}; - -/** \brief Get list of flow classify interfaces and tables - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param type - classify table type -*/ -define flow_classify_dump { - u32 client_index; - u32 context; - u8 type; -}; - -/** \brief Flow classify operational state response. - @param context - sender context, to match reply w/ request - @param sw_if_index - software interface index - @param table_index - classify table index -*/ -define flow_classify_details { - u32 context; - u32 sw_if_index; - u32 table_index; -}; - /** \brief Query relative index via node names @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request -- cgit 1.2.3-korg From eb9e666a3c7045d36e2b2ae40122f40a0b5f9c65 Mon Sep 17 00:00:00 2001 From: Pavel Kotucek Date: Tue, 24 Jan 2017 13:40:26 +0100 Subject: API refactoring : flow Change-Id: I99e913b954f8b02f347bfeff093856a1c5e96781 Signed-off-by: Pavel Kotucek --- src/vnet.am | 8 +- src/vnet/flow/flow.api | 173 ++++++++++++++++++++ src/vnet/flow/flow_api.c | 397 ++++++++++++++++++++++++++++++++++++++++++++++ src/vnet/vnet_all_api_h.h | 1 + src/vpp/api/api.c | 299 ---------------------------------- src/vpp/api/vpe.api | 154 +----------------- 6 files changed, 578 insertions(+), 454 deletions(-) create mode 100644 src/vnet/flow/flow.api create mode 100644 src/vnet/flow/flow_api.c (limited to 'src/vpp/api/api.c') diff --git a/src/vnet.am b/src/vnet.am index 660efcf5..c751c100 100644 --- a/src/vnet.am +++ b/src/vnet.am @@ -707,12 +707,16 @@ nobase_include_HEADERS += \ # IPFIX / netflow v10 ######################################## libvnet_la_SOURCES += \ - vnet/flow/flow_report.c + vnet/flow/flow_report.c \ + vnet/flow/flow_api.c nobase_include_HEADERS += \ vnet/flow/flow_report.h \ vnet/flow/ipfix_info_elements.h \ - vnet/flow/ipfix_packet.h + vnet/flow/ipfix_packet.h \ + vnet/flow/flow.api.h + +API_FILES += vnet/flow/flow.api ######################################## # IPFIX classify code diff --git a/src/vnet/flow/flow.api b/src/vnet/flow/flow.api new file mode 100644 index 00000000..0e0f99bf --- /dev/null +++ b/src/vnet/flow/flow.api @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2015-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** \brief Configure IPFIX exporter process request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param collector_address - address of IPFIX collector + @param collector_port - port of IPFIX collector + @param src_address - address of IPFIX exporter + @param vrf_id - VRF / fib table ID + @param path_mtu - Path MTU between exporter and collector + @param template_interval - number of seconds after which to resend template + @param udp_checksum - UDP checksum calculation enable flag +*/ +define set_ipfix_exporter +{ + u32 client_index; + u32 context; + u8 collector_address[16]; + u16 collector_port; + u8 src_address[16]; + u32 vrf_id; + u32 path_mtu; + u32 template_interval; + u8 udp_checksum; +}; + +/** \brief Reply to IPFIX exporter configure request + @param context - sender context which was passed in the request +*/ +define set_ipfix_exporter_reply +{ + u32 context; + i32 retval; +}; + +/** \brief IPFIX exporter dump request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define ipfix_exporter_dump +{ + u32 client_index; + u32 context; +}; + +/** \brief Reply to IPFIX exporter dump request + @param context - sender context which was passed in the request + @param collector_address - address of IPFIX collector + @param collector_port - port of IPFIX collector + @param src_address - address of IPFIX exporter + @param fib_index - fib table index + @param path_mtu - Path MTU between exporter and collector + @param template_interval - number of seconds after which to resend template + @param udp_checksum - UDP checksum calculation enable flag +*/ +define ipfix_exporter_details +{ + u32 context; + u8 collector_address[16]; + u16 collector_port; + u8 src_address[16]; + u32 vrf_id; + u32 path_mtu; + u32 template_interval; + u8 udp_checksum; +}; + +/** \brief IPFIX classify stream configure request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param domain_id - domain ID reported in IPFIX messages for classify stream + @param src_port - source port of UDP session for classify stream +*/ +define set_ipfix_classify_stream { + u32 client_index; + u32 context; + u32 domain_id; + u16 src_port; +}; + +/** \brief IPFIX classify stream configure response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define set_ipfix_classify_stream_reply { + u32 context; + i32 retval; +}; + +/** \brief IPFIX classify stream dump request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define ipfix_classify_stream_dump { + u32 client_index; + u32 context; +}; + +/** \brief Reply to IPFIX classify stream dump request + @param context - sender context, to match reply w/ request + @param domain_id - domain ID reported in IPFIX messages for classify stream + @param src_port - source port of UDP session for classify stream +*/ +define ipfix_classify_stream_details { + u32 context; + u32 domain_id; + u16 src_port; +}; + +/** \brief IPFIX add or delete classifier table request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param table_id - classifier table ID + @param ip_version - version of IP used in the classifier table + @param transport_protocol - transport protocol used in the classifier table or 255 for unspecified +*/ +define ipfix_classify_table_add_del { + u32 client_index; + u32 context; + u32 table_id; + u8 ip_version; + u8 transport_protocol; + u8 is_add; +}; + +/** \brief IPFIX add classifier table response + @param context - sender context which was passed in the request +*/ +define ipfix_classify_table_add_del_reply { + u32 context; + i32 retval; +}; + +/** \brief IPFIX classify tables dump request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define ipfix_classify_table_dump { + u32 client_index; + u32 context; +}; + +/** \brief Reply to IPFIX classify tables dump request + @param context - sender context, to match reply w/ request + @param table_id - classifier table ID + @param ip_version - version of IP used in the classifier table + @param transport_protocol - transport protocol used in the classifier table or 255 for unspecified +*/ +define ipfix_classify_table_details { + u32 context; + u32 table_id; + u8 ip_version; + u8 transport_protocol; +}; + +/* + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/flow/flow_api.c b/src/vnet/flow/flow_api.c new file mode 100644 index 00000000..b975dda1 --- /dev/null +++ b/src/vnet/flow/flow_api.c @@ -0,0 +1,397 @@ +/* + *------------------------------------------------------------------ + * flow_api.c - flow api + * + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include + +#include +#include + +#include +#include +#include + +#include + +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs + +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) +#define vl_printfun +#include +#undef vl_printfun + +#include + +#define foreach_vpe_api_msg \ +_(SET_IPFIX_EXPORTER, set_ipfix_exporter) \ +_(IPFIX_EXPORTER_DUMP, ipfix_exporter_dump) \ +_(SET_IPFIX_CLASSIFY_STREAM, set_ipfix_classify_stream) \ +_(IPFIX_CLASSIFY_STREAM_DUMP, ipfix_classify_stream_dump) \ +_(IPFIX_CLASSIFY_TABLE_ADD_DEL, ipfix_classify_table_add_del) \ +_(IPFIX_CLASSIFY_TABLE_DUMP, ipfix_classify_table_dump) + +static void +vl_api_set_ipfix_exporter_t_handler (vl_api_set_ipfix_exporter_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + flow_report_main_t *frm = &flow_report_main; + vl_api_set_ipfix_exporter_reply_t *rmp; + ip4_address_t collector, src; + u16 collector_port = UDP_DST_PORT_ipfix; + u32 path_mtu; + u32 template_interval; + u8 udp_checksum; + u32 fib_id; + u32 fib_index = ~0; + int rv = 0; + + memcpy (collector.data, mp->collector_address, sizeof (collector.data)); + collector_port = ntohs (mp->collector_port); + if (collector_port == (u16) ~ 0) + collector_port = UDP_DST_PORT_ipfix; + memcpy (src.data, mp->src_address, sizeof (src.data)); + fib_id = ntohl (mp->vrf_id); + + ip4_main_t *im = &ip4_main; + if (fib_id == ~0) + { + fib_index = ~0; + } + else + { + uword *p = hash_get (im->fib_index_by_table_id, fib_id); + if (!p) + { + rv = VNET_API_ERROR_NO_SUCH_FIB; + goto out; + } + fib_index = p[0]; + } + + path_mtu = ntohl (mp->path_mtu); + if (path_mtu == ~0) + path_mtu = 512; // RFC 7011 section 10.3.3. + template_interval = ntohl (mp->template_interval); + if (template_interval == ~0) + template_interval = 20; + udp_checksum = mp->udp_checksum; + + if (collector.as_u32 == 0) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto out; + } + + if (src.as_u32 == 0) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto out; + } + + if (path_mtu > 1450 /* vpp does not support fragmentation */ ) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto out; + } + + if (path_mtu < 68) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto out; + } + + /* Reset report streams if we are reconfiguring IP addresses */ + if (frm->ipfix_collector.as_u32 != collector.as_u32 || + frm->src_address.as_u32 != src.as_u32 || + frm->collector_port != collector_port) + vnet_flow_reports_reset (frm); + + frm->ipfix_collector.as_u32 = collector.as_u32; + frm->collector_port = collector_port; + frm->src_address.as_u32 = src.as_u32; + frm->fib_index = fib_index; + frm->path_mtu = path_mtu; + frm->template_interval = template_interval; + frm->udp_checksum = udp_checksum; + + /* Turn on the flow reporting process */ + vlib_process_signal_event (vm, flow_report_process_node.index, 1, 0); + +out: + REPLY_MACRO (VL_API_SET_IPFIX_EXPORTER_REPLY); +} + +static void +vl_api_ipfix_exporter_dump_t_handler (vl_api_ipfix_exporter_dump_t * mp) +{ + flow_report_main_t *frm = &flow_report_main; + unix_shared_memory_queue_t *q; + vl_api_ipfix_exporter_details_t *rmp; + ip4_main_t *im = &ip4_main; + u32 vrf_id; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + return; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_IPFIX_EXPORTER_DETAILS); + rmp->context = mp->context; + memcpy (rmp->collector_address, frm->ipfix_collector.data, + sizeof (frm->ipfix_collector.data)); + rmp->collector_port = htons (frm->collector_port); + memcpy (rmp->src_address, frm->src_address.data, + sizeof (frm->src_address.data)); + if (frm->fib_index == ~0) + vrf_id = ~0; + else + vrf_id = im->fibs[frm->fib_index].ft_table_id; + rmp->vrf_id = htonl (vrf_id); + rmp->path_mtu = htonl (frm->path_mtu); + rmp->template_interval = htonl (frm->template_interval); + rmp->udp_checksum = (frm->udp_checksum != 0); + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void + vl_api_set_ipfix_classify_stream_t_handler + (vl_api_set_ipfix_classify_stream_t * mp) +{ + vl_api_set_ipfix_classify_stream_reply_t *rmp; + flow_report_classify_main_t *fcm = &flow_report_classify_main; + flow_report_main_t *frm = &flow_report_main; + u32 domain_id = 0; + u32 src_port = UDP_DST_PORT_ipfix; + int rv = 0; + + domain_id = ntohl (mp->domain_id); + src_port = ntohs (mp->src_port); + + if (fcm->src_port != 0 && + (fcm->domain_id != domain_id || fcm->src_port != (u16) src_port)) + { + int rv = vnet_stream_change (frm, fcm->domain_id, fcm->src_port, + domain_id, (u16) src_port); + ASSERT (rv == 0); + } + + fcm->domain_id = domain_id; + fcm->src_port = (u16) src_port; + + REPLY_MACRO (VL_API_SET_IPFIX_CLASSIFY_STREAM_REPLY); +} + +static void + vl_api_ipfix_classify_stream_dump_t_handler + (vl_api_ipfix_classify_stream_dump_t * mp) +{ + flow_report_classify_main_t *fcm = &flow_report_classify_main; + unix_shared_memory_queue_t *q; + vl_api_ipfix_classify_stream_details_t *rmp; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + return; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_IPFIX_CLASSIFY_STREAM_DETAILS); + rmp->context = mp->context; + rmp->domain_id = htonl (fcm->domain_id); + rmp->src_port = htons (fcm->src_port); + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void + vl_api_ipfix_classify_table_add_del_t_handler + (vl_api_ipfix_classify_table_add_del_t * mp) +{ + vl_api_ipfix_classify_table_add_del_reply_t *rmp; + flow_report_classify_main_t *fcm = &flow_report_classify_main; + flow_report_main_t *frm = &flow_report_main; + vnet_flow_report_add_del_args_t args; + ipfix_classify_table_t *table; + int is_add; + u32 classify_table_index; + u8 ip_version; + u8 transport_protocol; + int rv = 0; + + classify_table_index = ntohl (mp->table_id); + ip_version = mp->ip_version; + transport_protocol = mp->transport_protocol; + is_add = mp->is_add; + + if (fcm->src_port == 0) + { + /* call set_ipfix_classify_stream first */ + rv = VNET_API_ERROR_UNSPECIFIED; + goto out; + } + + memset (&args, 0, sizeof (args)); + + table = 0; + int i; + for (i = 0; i < vec_len (fcm->tables); i++) + if (ipfix_classify_table_index_valid (i)) + if (fcm->tables[i].classify_table_index == classify_table_index) + { + table = &fcm->tables[i]; + break; + } + + if (is_add) + { + if (table) + { + rv = VNET_API_ERROR_VALUE_EXIST; + goto out; + } + table = ipfix_classify_add_table (); + table->classify_table_index = classify_table_index; + } + else + { + if (!table) + { + rv = VNET_API_ERROR_NO_SUCH_ENTRY; + goto out; + } + } + + table->ip_version = ip_version; + table->transport_protocol = transport_protocol; + + args.opaque.as_uword = table - fcm->tables; + args.rewrite_callback = ipfix_classify_template_rewrite; + args.flow_data_callback = ipfix_classify_send_flows; + args.is_add = is_add; + args.domain_id = fcm->domain_id; + args.src_port = fcm->src_port; + + rv = vnet_flow_report_add_del (frm, &args); + + /* If deleting, or add failed */ + if (is_add == 0 || (rv && is_add)) + ipfix_classify_delete_table (table - fcm->tables); + +out: + REPLY_MACRO (VL_API_SET_IPFIX_CLASSIFY_STREAM_REPLY); +} + +static void +send_ipfix_classify_table_details (u32 table_index, + unix_shared_memory_queue_t * q, + u32 context) +{ + flow_report_classify_main_t *fcm = &flow_report_classify_main; + vl_api_ipfix_classify_table_details_t *mp; + + ipfix_classify_table_t *table = &fcm->tables[table_index]; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_IPFIX_CLASSIFY_TABLE_DETAILS); + mp->context = context; + mp->table_id = htonl (table->classify_table_index); + mp->ip_version = table->ip_version; + mp->transport_protocol = table->transport_protocol; + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void + vl_api_ipfix_classify_table_dump_t_handler + (vl_api_ipfix_classify_table_dump_t * mp) +{ + flow_report_classify_main_t *fcm = &flow_report_classify_main; + unix_shared_memory_queue_t *q; + u32 i; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + return; + + for (i = 0; i < vec_len (fcm->tables); i++) + if (ipfix_classify_table_index_valid (i)) + send_ipfix_classify_table_details (i, q, mp->context); +} + +/* + * flow_api_hookup + * Add vpe's API message handlers to the table. + * vlib has alread mapped shared memory and + * added the client registration handlers. + * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process() + */ +#define vl_msg_name_crc_list +#include +#undef vl_msg_name_crc_list + +static void +setup_message_id_table (api_main_t * am) +{ +#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id); + foreach_vl_msg_name_crc_flow; +#undef _ +} + +static clib_error_t * +flow_api_hookup (vlib_main_t * vm) +{ + api_main_t *am = &api_main; + +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler, \ + vl_noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 1); + foreach_vpe_api_msg; +#undef _ + + /* + * Set up the (msg_name, crc, message-id) table + */ + setup_message_id_table (am); + + return 0; +} + +VLIB_API_INIT_FUNCTION (flow_api_hookup); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/vnet_all_api_h.h b/src/vnet/vnet_all_api_h.h index c7789650..d76eee5a 100644 --- a/src/vnet/vnet_all_api_h.h +++ b/src/vnet/vnet_all_api_h.h @@ -53,6 +53,7 @@ #include #include #include +#include /* * fd.io coding-style-patch-verification: ON diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c index 6e7e9c10..9f6f260b 100644 --- a/src/vpp/api/api.c +++ b/src/vpp/api/api.c @@ -72,8 +72,6 @@ #include #include #include -#include -#include #include #include @@ -160,12 +158,6 @@ _(IOAM_ENABLE, ioam_enable) \ _(IOAM_DISABLE, ioam_disable) \ _(POLICER_ADD_DEL, policer_add_del) \ _(POLICER_DUMP, policer_dump) \ -_(SET_IPFIX_EXPORTER, set_ipfix_exporter) \ -_(IPFIX_EXPORTER_DUMP, ipfix_exporter_dump) \ -_(SET_IPFIX_CLASSIFY_STREAM, set_ipfix_classify_stream) \ -_(IPFIX_CLASSIFY_STREAM_DUMP, ipfix_classify_stream_dump) \ -_(IPFIX_CLASSIFY_TABLE_ADD_DEL, ipfix_classify_table_add_del) \ -_(IPFIX_CLASSIFY_TABLE_DUMP, ipfix_classify_table_dump) \ _(GET_NEXT_INDEX, get_next_index) \ _(PG_CREATE_INTERFACE, pg_create_interface) \ _(PG_CAPTURE, pg_capture) \ @@ -2422,297 +2414,6 @@ vl_api_policer_dump_t_handler (vl_api_policer_dump_t * mp) } -static void -vl_api_set_ipfix_exporter_t_handler (vl_api_set_ipfix_exporter_t * mp) -{ - vlib_main_t *vm = vlib_get_main (); - flow_report_main_t *frm = &flow_report_main; - vl_api_set_ipfix_exporter_reply_t *rmp; - ip4_address_t collector, src; - u16 collector_port = UDP_DST_PORT_ipfix; - u32 path_mtu; - u32 template_interval; - u8 udp_checksum; - u32 fib_id; - u32 fib_index = ~0; - int rv = 0; - - memcpy (collector.data, mp->collector_address, sizeof (collector.data)); - collector_port = ntohs (mp->collector_port); - if (collector_port == (u16) ~ 0) - collector_port = UDP_DST_PORT_ipfix; - memcpy (src.data, mp->src_address, sizeof (src.data)); - fib_id = ntohl (mp->vrf_id); - - ip4_main_t *im = &ip4_main; - if (fib_id == ~0) - { - fib_index = ~0; - } - else - { - uword *p = hash_get (im->fib_index_by_table_id, fib_id); - if (!p) - { - rv = VNET_API_ERROR_NO_SUCH_FIB; - goto out; - } - fib_index = p[0]; - } - - path_mtu = ntohl (mp->path_mtu); - if (path_mtu == ~0) - path_mtu = 512; // RFC 7011 section 10.3.3. - template_interval = ntohl (mp->template_interval); - if (template_interval == ~0) - template_interval = 20; - udp_checksum = mp->udp_checksum; - - if (collector.as_u32 == 0) - { - rv = VNET_API_ERROR_INVALID_VALUE; - goto out; - } - - if (src.as_u32 == 0) - { - rv = VNET_API_ERROR_INVALID_VALUE; - goto out; - } - - if (path_mtu > 1450 /* vpp does not support fragmentation */ ) - { - rv = VNET_API_ERROR_INVALID_VALUE; - goto out; - } - - if (path_mtu < 68) - { - rv = VNET_API_ERROR_INVALID_VALUE; - goto out; - } - - /* Reset report streams if we are reconfiguring IP addresses */ - if (frm->ipfix_collector.as_u32 != collector.as_u32 || - frm->src_address.as_u32 != src.as_u32 || - frm->collector_port != collector_port) - vnet_flow_reports_reset (frm); - - frm->ipfix_collector.as_u32 = collector.as_u32; - frm->collector_port = collector_port; - frm->src_address.as_u32 = src.as_u32; - frm->fib_index = fib_index; - frm->path_mtu = path_mtu; - frm->template_interval = template_interval; - frm->udp_checksum = udp_checksum; - - /* Turn on the flow reporting process */ - vlib_process_signal_event (vm, flow_report_process_node.index, 1, 0); - -out: - REPLY_MACRO (VL_API_SET_IPFIX_EXPORTER_REPLY); -} - -static void -vl_api_ipfix_exporter_dump_t_handler (vl_api_ipfix_exporter_dump_t * mp) -{ - flow_report_main_t *frm = &flow_report_main; - unix_shared_memory_queue_t *q; - vl_api_ipfix_exporter_details_t *rmp; - ip4_main_t *im = &ip4_main; - u32 vrf_id; - - q = vl_api_client_index_to_input_queue (mp->client_index); - if (!q) - return; - - rmp = vl_msg_api_alloc (sizeof (*rmp)); - memset (rmp, 0, sizeof (*rmp)); - rmp->_vl_msg_id = ntohs (VL_API_IPFIX_EXPORTER_DETAILS); - rmp->context = mp->context; - memcpy (rmp->collector_address, frm->ipfix_collector.data, - sizeof (frm->ipfix_collector.data)); - rmp->collector_port = htons (frm->collector_port); - memcpy (rmp->src_address, frm->src_address.data, - sizeof (frm->src_address.data)); - if (frm->fib_index == ~0) - vrf_id = ~0; - else - vrf_id = im->fibs[frm->fib_index].ft_table_id; - rmp->vrf_id = htonl (vrf_id); - rmp->path_mtu = htonl (frm->path_mtu); - rmp->template_interval = htonl (frm->template_interval); - rmp->udp_checksum = (frm->udp_checksum != 0); - - vl_msg_api_send_shmem (q, (u8 *) & rmp); -} - -static void - vl_api_set_ipfix_classify_stream_t_handler - (vl_api_set_ipfix_classify_stream_t * mp) -{ - vl_api_set_ipfix_classify_stream_reply_t *rmp; - flow_report_classify_main_t *fcm = &flow_report_classify_main; - flow_report_main_t *frm = &flow_report_main; - u32 domain_id = 0; - u32 src_port = UDP_DST_PORT_ipfix; - int rv = 0; - - domain_id = ntohl (mp->domain_id); - src_port = ntohs (mp->src_port); - - if (fcm->src_port != 0 && - (fcm->domain_id != domain_id || fcm->src_port != (u16) src_port)) - { - int rv = vnet_stream_change (frm, fcm->domain_id, fcm->src_port, - domain_id, (u16) src_port); - ASSERT (rv == 0); - } - - fcm->domain_id = domain_id; - fcm->src_port = (u16) src_port; - - REPLY_MACRO (VL_API_SET_IPFIX_CLASSIFY_STREAM_REPLY); -} - -static void - vl_api_ipfix_classify_stream_dump_t_handler - (vl_api_ipfix_classify_stream_dump_t * mp) -{ - flow_report_classify_main_t *fcm = &flow_report_classify_main; - unix_shared_memory_queue_t *q; - vl_api_ipfix_classify_stream_details_t *rmp; - - q = vl_api_client_index_to_input_queue (mp->client_index); - if (!q) - return; - - rmp = vl_msg_api_alloc (sizeof (*rmp)); - memset (rmp, 0, sizeof (*rmp)); - rmp->_vl_msg_id = ntohs (VL_API_IPFIX_CLASSIFY_STREAM_DETAILS); - rmp->context = mp->context; - rmp->domain_id = htonl (fcm->domain_id); - rmp->src_port = htons (fcm->src_port); - - vl_msg_api_send_shmem (q, (u8 *) & rmp); -} - -static void - vl_api_ipfix_classify_table_add_del_t_handler - (vl_api_ipfix_classify_table_add_del_t * mp) -{ - vl_api_ipfix_classify_table_add_del_reply_t *rmp; - flow_report_classify_main_t *fcm = &flow_report_classify_main; - flow_report_main_t *frm = &flow_report_main; - vnet_flow_report_add_del_args_t args; - ipfix_classify_table_t *table; - int is_add; - u32 classify_table_index; - u8 ip_version; - u8 transport_protocol; - int rv = 0; - - classify_table_index = ntohl (mp->table_id); - ip_version = mp->ip_version; - transport_protocol = mp->transport_protocol; - is_add = mp->is_add; - - if (fcm->src_port == 0) - { - /* call set_ipfix_classify_stream first */ - rv = VNET_API_ERROR_UNSPECIFIED; - goto out; - } - - memset (&args, 0, sizeof (args)); - - table = 0; - int i; - for (i = 0; i < vec_len (fcm->tables); i++) - if (ipfix_classify_table_index_valid (i)) - if (fcm->tables[i].classify_table_index == classify_table_index) - { - table = &fcm->tables[i]; - break; - } - - if (is_add) - { - if (table) - { - rv = VNET_API_ERROR_VALUE_EXIST; - goto out; - } - table = ipfix_classify_add_table (); - table->classify_table_index = classify_table_index; - } - else - { - if (!table) - { - rv = VNET_API_ERROR_NO_SUCH_ENTRY; - goto out; - } - } - - table->ip_version = ip_version; - table->transport_protocol = transport_protocol; - - args.opaque.as_uword = table - fcm->tables; - args.rewrite_callback = ipfix_classify_template_rewrite; - args.flow_data_callback = ipfix_classify_send_flows; - args.is_add = is_add; - args.domain_id = fcm->domain_id; - args.src_port = fcm->src_port; - - rv = vnet_flow_report_add_del (frm, &args); - - /* If deleting, or add failed */ - if (is_add == 0 || (rv && is_add)) - ipfix_classify_delete_table (table - fcm->tables); - -out: - REPLY_MACRO (VL_API_SET_IPFIX_CLASSIFY_STREAM_REPLY); -} - -static void -send_ipfix_classify_table_details (u32 table_index, - unix_shared_memory_queue_t * q, - u32 context) -{ - flow_report_classify_main_t *fcm = &flow_report_classify_main; - vl_api_ipfix_classify_table_details_t *mp; - - ipfix_classify_table_t *table = &fcm->tables[table_index]; - - mp = vl_msg_api_alloc (sizeof (*mp)); - memset (mp, 0, sizeof (*mp)); - mp->_vl_msg_id = ntohs (VL_API_IPFIX_CLASSIFY_TABLE_DETAILS); - mp->context = context; - mp->table_id = htonl (table->classify_table_index); - mp->ip_version = table->ip_version; - mp->transport_protocol = table->transport_protocol; - - vl_msg_api_send_shmem (q, (u8 *) & mp); -} - -static void - vl_api_ipfix_classify_table_dump_t_handler - (vl_api_ipfix_classify_table_dump_t * mp) -{ - flow_report_classify_main_t *fcm = &flow_report_classify_main; - unix_shared_memory_queue_t *q; - u32 i; - - q = vl_api_client_index_to_input_queue (mp->client_index); - if (!q) - return; - - for (i = 0; i < vec_len (fcm->tables); i++) - if (ipfix_classify_table_index_valid (i)) - send_ipfix_classify_table_details (i, q, mp->context); -} - static void vl_api_pg_create_interface_t_handler (vl_api_pg_create_interface_t * mp) { diff --git a/src/vpp/api/vpe.api b/src/vpp/api/vpe.api index a9b34d1f..f32ba670 100644 --- a/src/vpp/api/vpe.api +++ b/src/vpp/api/vpe.api @@ -42,6 +42,7 @@ * SR APIs: see .../src/vnet/sr/{sr.api, sr_api.c} * DPDK APIs: see ... /src/vnet/devices/dpdk/{dpdk.api, dpdk_api.c} * CLASSIFY APIs: see ... /src/vnet/classify/{classify.api, classify_api.c} + * FLOW APIs: see ... /src/vnet/flow/{flow.api, flow_api.c} */ /** \brief Create a new subinterface with the given vlan id @@ -1389,159 +1390,6 @@ define policer_details u64 last_update_time; }; -/** \brief Configure IPFIX exporter process request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param collector_address - address of IPFIX collector - @param collector_port - port of IPFIX collector - @param src_address - address of IPFIX exporter - @param vrf_id - VRF / fib table ID - @param path_mtu - Path MTU between exporter and collector - @param template_interval - number of seconds after which to resend template - @param udp_checksum - UDP checksum calculation enable flag -*/ -define set_ipfix_exporter -{ - u32 client_index; - u32 context; - u8 collector_address[16]; - u16 collector_port; - u8 src_address[16]; - u32 vrf_id; - u32 path_mtu; - u32 template_interval; - u8 udp_checksum; -}; - -/** \brief Reply to IPFIX exporter configure request - @param context - sender context which was passed in the request -*/ -define set_ipfix_exporter_reply -{ - u32 context; - i32 retval; -}; - -/** \brief IPFIX exporter dump request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request -*/ -define ipfix_exporter_dump -{ - u32 client_index; - u32 context; -}; - -/** \brief Reply to IPFIX exporter dump request - @param context - sender context which was passed in the request - @param collector_address - address of IPFIX collector - @param collector_port - port of IPFIX collector - @param src_address - address of IPFIX exporter - @param fib_index - fib table index - @param path_mtu - Path MTU between exporter and collector - @param template_interval - number of seconds after which to resend template - @param udp_checksum - UDP checksum calculation enable flag -*/ -define ipfix_exporter_details -{ - u32 context; - u8 collector_address[16]; - u16 collector_port; - u8 src_address[16]; - u32 vrf_id; - u32 path_mtu; - u32 template_interval; - u8 udp_checksum; -}; - -/** \brief IPFIX classify stream configure request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param domain_id - domain ID reported in IPFIX messages for classify stream - @param src_port - source port of UDP session for classify stream -*/ -define set_ipfix_classify_stream { - u32 client_index; - u32 context; - u32 domain_id; - u16 src_port; -}; - -/** \brief IPFIX classify stream configure response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define set_ipfix_classify_stream_reply { - u32 context; - i32 retval; -}; - -/** \brief IPFIX classify stream dump request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request -*/ -define ipfix_classify_stream_dump { - u32 client_index; - u32 context; -}; - -/** \brief Reply to IPFIX classify stream dump request - @param context - sender context, to match reply w/ request - @param domain_id - domain ID reported in IPFIX messages for classify stream - @param src_port - source port of UDP session for classify stream -*/ -define ipfix_classify_stream_details { - u32 context; - u32 domain_id; - u16 src_port; -}; - -/** \brief IPFIX add or delete classifier table request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param table_id - classifier table ID - @param ip_version - version of IP used in the classifier table - @param transport_protocol - transport protocol used in the classifier table or 255 for unspecified -*/ -define ipfix_classify_table_add_del { - u32 client_index; - u32 context; - u32 table_id; - u8 ip_version; - u8 transport_protocol; - u8 is_add; -}; - -/** \brief IPFIX add classifier table response - @param context - sender context which was passed in the request -*/ -define ipfix_classify_table_add_del_reply { - u32 context; - i32 retval; -}; - -/** \brief IPFIX classify tables dump request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request -*/ -define ipfix_classify_table_dump { - u32 client_index; - u32 context; -}; - -/** \brief Reply to IPFIX classify tables dump request - @param context - sender context, to match reply w/ request - @param table_id - classifier table ID - @param ip_version - version of IP used in the classifier table - @param transport_protocol - transport protocol used in the classifier table or 255 for unspecified -*/ -define ipfix_classify_table_details { - u32 context; - u32 table_id; - u8 ip_version; - u8 transport_protocol; -}; - /** \brief Query relative index via node names @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request -- cgit 1.2.3-korg From c8d8770a3e09c300eeff461a11ef3723b8e029cb Mon Sep 17 00:00:00 2001 From: Pavel Kotucek Date: Wed, 25 Jan 2017 07:25:32 +0100 Subject: API refactoring : dhcp Change-Id: I3829835ed2126e51e96690c907deac623dc77151 Signed-off-by: Pavel Kotucek --- src/vnet.am | 8 +- src/vnet/dhcp/dhcp.api | 166 ++++++++++++++++++++++++++++++ src/vnet/dhcp/dhcp_api.c | 253 ++++++++++++++++++++++++++++++++++++++++++++++ src/vnet/vnet_all_api_h.h | 1 + src/vpp/api/api.c | 158 ----------------------------- src/vpp/api/vpe.api | 147 +-------------------------- 6 files changed, 427 insertions(+), 306 deletions(-) create mode 100644 src/vnet/dhcp/dhcp.api create mode 100644 src/vnet/dhcp/dhcp_api.c (limited to 'src/vpp/api/api.c') diff --git a/src/vnet.am b/src/vnet.am index 669ea1ff..c6922493 100644 --- a/src/vnet.am +++ b/src/vnet.am @@ -657,10 +657,14 @@ endif ######################################## libvnet_la_SOURCES += \ vnet/dhcp/client.c \ - vnet/dhcp/client.h + vnet/dhcp/client.h \ + vnet/dhcp/dhcp_api.c nobase_include_HEADERS += \ - vnet/dhcp/client.h + vnet/dhcp/client.h \ + vnet/dhcp/dhcp.api.h + +API_FILES += vnet/dhcp/dhcp.api ######################################## # DHCP proxy diff --git a/src/vnet/dhcp/dhcp.api b/src/vnet/dhcp/dhcp.api new file mode 100644 index 00000000..c228cd04 --- /dev/null +++ b/src/vnet/dhcp/dhcp.api @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2015-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** \brief DHCP Proxy config add / del request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param vrf_id - vrf id + @param if_ipv6 - ipv6 if non-zero, else ipv4 + @param is_add - add the config if non-zero, else delete + @param insert_circuit_id - option82 suboption 1 fib number + @param dhcp_server[] - server address + @param dhcp_src_address[] - +*/ +define dhcp_proxy_config +{ + u32 client_index; + u32 context; + u32 vrf_id; + u8 is_ipv6; + u8 is_add; + u8 insert_circuit_id; + u8 dhcp_server[16]; + u8 dhcp_src_address[16]; +}; + +/** \brief DHCP Proxy config response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define dhcp_proxy_config_reply +{ + u32 context; + i32 retval; +}; + +/** \brief DHCP Proxy config 2 add / del request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param rx_vrf_id - receive vrf id + @param server_vrf_id - server vrf id + @param if_ipv6 - ipv6 if non-zero, else ipv4 + @param is_add - add the config if non-zero, else delete + @param insert_circuit_id - option82 suboption 1 fib number + @param dhcp_server[] - server address + @param dhcp_src_address[] - +*/ +define dhcp_proxy_config_2 +{ + u32 client_index; + u32 context; + u32 rx_vrf_id; + u32 server_vrf_id; + u8 is_ipv6; + u8 is_add; + u8 insert_circuit_id; + u8 dhcp_server[16]; + u8 dhcp_src_address[16]; +}; + +/** \brief DHCP Proxy config 2 add / del response + @param context - sender context, to match reply w/ request + @param retval - return code for request +*/ +define dhcp_proxy_config_2_reply +{ + u32 context; + i32 retval; +}; + +/** \brief DHCP Proxy set / unset vss request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param tbl_id - table id + @param oui - first part of vpn id + @param fib_id - second part of vpn id + @param is_ipv6 - ip6 if non-zero, else ip4 + @param is_add - set vss if non-zero, else delete +*/ +define dhcp_proxy_set_vss +{ + u32 client_index; + u32 context; + u32 tbl_id; + u32 oui; + u32 fib_id; + u8 is_ipv6; + u8 is_add; +}; + +/** \brief DHCP proxy set / unset vss response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define dhcp_proxy_set_vss_reply +{ + u32 context; + i32 retval; +}; + +/** \brief DHCP Client config add / del request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - index of the interface for DHCP client + @param hostname - hostname + @param is_add - add the config if non-zero, else delete + @param want_dhcp_event - DHCP event sent to the sender + via dhcp_compl_event API message if non-zero + @param pid - sender's pid +*/ +define dhcp_client_config +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u8 hostname[64]; + u8 is_add; + u8 want_dhcp_event; + u32 pid; +}; + +/** \brief DHCP Client config response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define dhcp_client_config_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Tell client about a DHCP completion event + @param client_index - opaque cookie to identify the sender + @param pid - client pid registered to receive notification + @param is_ipv6 - if non-zero the address is ipv6, else ipv4 + @param host_address - Host IP address + @param router_address - Router IP address + @param host_mac - Host MAC address +*/ +define dhcp_compl_event +{ + u32 client_index; + u32 pid; + u8 hostname[64]; + u8 is_ipv6; + u8 host_address[16]; + u8 router_address[16]; + u8 host_mac[6]; +}; + +/* + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ \ No newline at end of file diff --git a/src/vnet/dhcp/dhcp_api.c b/src/vnet/dhcp/dhcp_api.c new file mode 100644 index 00000000..88b32b24 --- /dev/null +++ b/src/vnet/dhcp/dhcp_api.c @@ -0,0 +1,253 @@ +/* + *------------------------------------------------------------------ + * dhcp_api.c - dhcp api + * + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include + +#include +#include +#include +#include +#include + +#include + +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs + +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) +#define vl_printfun +#include +#undef vl_printfun + +#include + +#define foreach_vpe_api_msg \ +_(DHCP_PROXY_CONFIG,dhcp_proxy_config) \ +_(DHCP_PROXY_CONFIG_2,dhcp_proxy_config_2) \ +_(DHCP_PROXY_SET_VSS,dhcp_proxy_set_vss) \ +_(DHCP_CLIENT_CONFIG, dhcp_client_config) + +static void +dhcpv4_proxy_config (vl_api_dhcp_proxy_config_t * mp) +{ + vl_api_dhcp_proxy_config_reply_t *rmp; + int rv; + + rv = dhcp_proxy_set_server ((ip4_address_t *) (&mp->dhcp_server), + (ip4_address_t *) (&mp->dhcp_src_address), + (u32) ntohl (mp->vrf_id), + (int) mp->insert_circuit_id, + (int) (mp->is_add == 0)); + + REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_REPLY); +} + + +static void +dhcpv6_proxy_config (vl_api_dhcp_proxy_config_t * mp) +{ + vl_api_dhcp_proxy_config_reply_t *rmp; + int rv = -1; + + rv = dhcpv6_proxy_set_server ((ip6_address_t *) (&mp->dhcp_server), + (ip6_address_t *) (&mp->dhcp_src_address), + (u32) ntohl (mp->vrf_id), + (int) mp->insert_circuit_id, + (int) (mp->is_add == 0)); + + REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_REPLY); +} + +static void +dhcpv4_proxy_config_2 (vl_api_dhcp_proxy_config_2_t * mp) +{ + vl_api_dhcp_proxy_config_reply_t *rmp; + int rv; + + rv = dhcp_proxy_set_server_2 ((ip4_address_t *) (&mp->dhcp_server), + (ip4_address_t *) (&mp->dhcp_src_address), + (u32) ntohl (mp->rx_vrf_id), + (u32) ntohl (mp->server_vrf_id), + (int) mp->insert_circuit_id, + (int) (mp->is_add == 0)); + + REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_2_REPLY); +} + + +static void +dhcpv6_proxy_config_2 (vl_api_dhcp_proxy_config_2_t * mp) +{ + vl_api_dhcp_proxy_config_reply_t *rmp; + int rv = -1; + + rv = dhcpv6_proxy_set_server_2 ((ip6_address_t *) (&mp->dhcp_server), + (ip6_address_t *) (&mp->dhcp_src_address), + (u32) ntohl (mp->rx_vrf_id), + (u32) ntohl (mp->server_vrf_id), + (int) mp->insert_circuit_id, + (int) (mp->is_add == 0)); + + REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_2_REPLY); +} + + +static void +vl_api_dhcp_proxy_set_vss_t_handler (vl_api_dhcp_proxy_set_vss_t * mp) +{ + vl_api_dhcp_proxy_set_vss_reply_t *rmp; + int rv; + if (!mp->is_ipv6) + rv = dhcp_proxy_set_option82_vss (ntohl (mp->tbl_id), + ntohl (mp->oui), + ntohl (mp->fib_id), + (int) mp->is_add == 0); + else + rv = dhcpv6_proxy_set_vss (ntohl (mp->tbl_id), + ntohl (mp->oui), + ntohl (mp->fib_id), (int) mp->is_add == 0); + + REPLY_MACRO (VL_API_DHCP_PROXY_SET_VSS_REPLY); +} + + +static void vl_api_dhcp_proxy_config_t_handler + (vl_api_dhcp_proxy_config_t * mp) +{ + if (mp->is_ipv6 == 0) + dhcpv4_proxy_config (mp); + else + dhcpv6_proxy_config (mp); +} + +void +dhcp_compl_event_callback (u32 client_index, u32 pid, u8 * hostname, + u8 is_ipv6, u8 * host_address, u8 * router_address, + u8 * host_mac) +{ + unix_shared_memory_queue_t *q; + vl_api_dhcp_compl_event_t *mp; + + q = vl_api_client_index_to_input_queue (client_index); + if (!q) + return; + + mp = vl_msg_api_alloc (sizeof (*mp)); + mp->client_index = client_index; + mp->pid = pid; + mp->is_ipv6 = is_ipv6; + clib_memcpy (&mp->hostname, hostname, vec_len (hostname)); + mp->hostname[vec_len (hostname) + 1] = '\n'; + clib_memcpy (&mp->host_address[0], host_address, 16); + clib_memcpy (&mp->router_address[0], router_address, 16); + + if (NULL != host_mac) + clib_memcpy (&mp->host_mac[0], host_mac, 6); + + mp->_vl_msg_id = ntohs (VL_API_DHCP_COMPL_EVENT); + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void vl_api_dhcp_proxy_config_2_t_handler + (vl_api_dhcp_proxy_config_2_t * mp) +{ + if (mp->is_ipv6 == 0) + dhcpv4_proxy_config_2 (mp); + else + dhcpv6_proxy_config_2 (mp); +} + +static void vl_api_dhcp_client_config_t_handler + (vl_api_dhcp_client_config_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_dhcp_client_config_reply_t *rmp; + int rv = 0; + + VALIDATE_SW_IF_INDEX (mp); + + rv = dhcp_client_config (vm, ntohl (mp->sw_if_index), + mp->hostname, mp->is_add, mp->client_index, + mp->want_dhcp_event ? dhcp_compl_event_callback : + NULL, mp->pid); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_DHCP_CLIENT_CONFIG_REPLY); +} + +/* + * dhcp_api_hookup + * Add vpe's API message handlers to the table. + * vlib has alread mapped shared memory and + * added the client registration handlers. + * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process() + */ +#define vl_msg_name_crc_list +#include +#undef vl_msg_name_crc_list + +static void +setup_message_id_table (api_main_t * am) +{ +#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id); + foreach_vl_msg_name_crc_dhcp; +#undef _ +} + +static clib_error_t * +dhcp_api_hookup (vlib_main_t * vm) +{ + api_main_t *am = &api_main; + +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler, \ + vl_noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 1); + foreach_vpe_api_msg; +#undef _ + + /* + * Set up the (msg_name, crc, message-id) table + */ + setup_message_id_table (am); + + return 0; +} + +VLIB_API_INIT_FUNCTION (dhcp_api_hookup); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/vnet_all_api_h.h b/src/vnet/vnet_all_api_h.h index d76eee5a..4ba3a0e3 100644 --- a/src/vnet/vnet_all_api_h.h +++ b/src/vnet/vnet_all_api_h.h @@ -54,6 +54,7 @@ #include #include #include +#include /* * fd.io coding-style-patch-verification: ON diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c index 9f6f260b..6317f557 100644 --- a/src/vpp/api/api.c +++ b/src/vpp/api/api.c @@ -52,12 +52,9 @@ #include #include #include -#include -#include #if WITH_LIBSSL > 0 #include #endif -#include #include #include #include @@ -124,10 +121,6 @@ _(PROXY_ARP_ADD_DEL, proxy_arp_add_del) \ _(PROXY_ARP_INTFC_ENABLE_DISABLE, proxy_arp_intfc_enable_disable) \ _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \ _(RESET_FIB, reset_fib) \ -_(DHCP_PROXY_CONFIG,dhcp_proxy_config) \ -_(DHCP_PROXY_CONFIG_2,dhcp_proxy_config_2) \ -_(DHCP_PROXY_SET_VSS,dhcp_proxy_set_vss) \ -_(DHCP_CLIENT_CONFIG, dhcp_client_config) \ _(CREATE_LOOPBACK, create_loopback) \ _(CONTROL_PING, control_ping) \ _(CLI_REQUEST, cli_request) \ @@ -1064,157 +1057,6 @@ vl_api_reset_fib_t_handler (vl_api_reset_fib_t * mp) REPLY_MACRO (VL_API_RESET_FIB_REPLY); } - -static void -dhcpv4_proxy_config (vl_api_dhcp_proxy_config_t * mp) -{ - vl_api_dhcp_proxy_config_reply_t *rmp; - int rv; - - rv = dhcp_proxy_set_server ((ip4_address_t *) (&mp->dhcp_server), - (ip4_address_t *) (&mp->dhcp_src_address), - (u32) ntohl (mp->vrf_id), - (int) mp->insert_circuit_id, - (int) (mp->is_add == 0)); - - REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_REPLY); -} - - -static void -dhcpv6_proxy_config (vl_api_dhcp_proxy_config_t * mp) -{ - vl_api_dhcp_proxy_config_reply_t *rmp; - int rv = -1; - - rv = dhcpv6_proxy_set_server ((ip6_address_t *) (&mp->dhcp_server), - (ip6_address_t *) (&mp->dhcp_src_address), - (u32) ntohl (mp->vrf_id), - (int) mp->insert_circuit_id, - (int) (mp->is_add == 0)); - - REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_REPLY); -} - -static void -dhcpv4_proxy_config_2 (vl_api_dhcp_proxy_config_2_t * mp) -{ - vl_api_dhcp_proxy_config_reply_t *rmp; - int rv; - - rv = dhcp_proxy_set_server_2 ((ip4_address_t *) (&mp->dhcp_server), - (ip4_address_t *) (&mp->dhcp_src_address), - (u32) ntohl (mp->rx_vrf_id), - (u32) ntohl (mp->server_vrf_id), - (int) mp->insert_circuit_id, - (int) (mp->is_add == 0)); - - REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_2_REPLY); -} - - -static void -dhcpv6_proxy_config_2 (vl_api_dhcp_proxy_config_2_t * mp) -{ - vl_api_dhcp_proxy_config_reply_t *rmp; - int rv = -1; - - rv = dhcpv6_proxy_set_server_2 ((ip6_address_t *) (&mp->dhcp_server), - (ip6_address_t *) (&mp->dhcp_src_address), - (u32) ntohl (mp->rx_vrf_id), - (u32) ntohl (mp->server_vrf_id), - (int) mp->insert_circuit_id, - (int) (mp->is_add == 0)); - - REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_2_REPLY); -} - - -static void -vl_api_dhcp_proxy_set_vss_t_handler (vl_api_dhcp_proxy_set_vss_t * mp) -{ - vl_api_dhcp_proxy_set_vss_reply_t *rmp; - int rv; - if (!mp->is_ipv6) - rv = dhcp_proxy_set_option82_vss (ntohl (mp->tbl_id), - ntohl (mp->oui), - ntohl (mp->fib_id), - (int) mp->is_add == 0); - else - rv = dhcpv6_proxy_set_vss (ntohl (mp->tbl_id), - ntohl (mp->oui), - ntohl (mp->fib_id), (int) mp->is_add == 0); - - REPLY_MACRO (VL_API_DHCP_PROXY_SET_VSS_REPLY); -} - - -static void vl_api_dhcp_proxy_config_t_handler - (vl_api_dhcp_proxy_config_t * mp) -{ - if (mp->is_ipv6 == 0) - dhcpv4_proxy_config (mp); - else - dhcpv6_proxy_config (mp); -} - -static void vl_api_dhcp_proxy_config_2_t_handler - (vl_api_dhcp_proxy_config_2_t * mp) -{ - if (mp->is_ipv6 == 0) - dhcpv4_proxy_config_2 (mp); - else - dhcpv6_proxy_config_2 (mp); -} - -void -dhcp_compl_event_callback (u32 client_index, u32 pid, u8 * hostname, - u8 is_ipv6, u8 * host_address, u8 * router_address, - u8 * host_mac) -{ - unix_shared_memory_queue_t *q; - vl_api_dhcp_compl_event_t *mp; - - q = vl_api_client_index_to_input_queue (client_index); - if (!q) - return; - - mp = vl_msg_api_alloc (sizeof (*mp)); - mp->client_index = client_index; - mp->pid = pid; - mp->is_ipv6 = is_ipv6; - clib_memcpy (&mp->hostname, hostname, vec_len (hostname)); - mp->hostname[vec_len (hostname) + 1] = '\n'; - clib_memcpy (&mp->host_address[0], host_address, 16); - clib_memcpy (&mp->router_address[0], router_address, 16); - - if (NULL != host_mac) - clib_memcpy (&mp->host_mac[0], host_mac, 6); - - mp->_vl_msg_id = ntohs (VL_API_DHCP_COMPL_EVENT); - - vl_msg_api_send_shmem (q, (u8 *) & mp); -} - -static void vl_api_dhcp_client_config_t_handler - (vl_api_dhcp_client_config_t * mp) -{ - vlib_main_t *vm = vlib_get_main (); - vl_api_dhcp_client_config_reply_t *rmp; - int rv = 0; - - VALIDATE_SW_IF_INDEX (mp); - - rv = dhcp_client_config (vm, ntohl (mp->sw_if_index), - mp->hostname, mp->is_add, mp->client_index, - mp->want_dhcp_event ? dhcp_compl_event_callback : - NULL, mp->pid); - - BAD_SW_IF_INDEX_LABEL; - - REPLY_MACRO (VL_API_DHCP_CLIENT_CONFIG_REPLY); -} - static void vl_api_create_loopback_t_handler (vl_api_create_loopback_t * mp) { diff --git a/src/vpp/api/vpe.api b/src/vpp/api/vpe.api index a00033c5..3a35a54a 100644 --- a/src/vpp/api/vpe.api +++ b/src/vpp/api/vpe.api @@ -43,6 +43,7 @@ * DPDK APIs: see ... /src/vnet/devices/dpdk/{dpdk.api, dpdk_api.c} * CLASSIFY APIs: see ... /src/vnet/classify/{classify.api, classify_api.c} * FLOW APIs: see ... /src/vnet/flow/{flow.api, flow_api.c} + * DHCP APIs: see ... /src/vnet/dhcp/{dhcpk.api, dhcp_api.c} */ /** \brief Create a new subinterface with the given vlan id @@ -398,68 +399,6 @@ define reset_fib_reply i32 retval; }; -/** \brief DHCP Proxy config add / del request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param vrf_id - vrf id - @param if_ipv6 - ipv6 if non-zero, else ipv4 - @param is_add - add the config if non-zero, else delete - @param insert_circuit_id - option82 suboption 1 fib number - @param dhcp_server[] - server address - @param dhcp_src_address[] - -*/ -define dhcp_proxy_config -{ - u32 client_index; - u32 context; - u32 vrf_id; - u8 is_ipv6; - u8 is_add; - u8 insert_circuit_id; - u8 dhcp_server[16]; - u8 dhcp_src_address[16]; -}; - -/** \brief DHCP Proxy config response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define dhcp_proxy_config_reply -{ - u32 context; - i32 retval; -}; - -/** \brief DHCP Proxy set / unset vss request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param tbl_id - table id - @param oui - first part of vpn id - @param fib_id - second part of vpn id - @param is_ipv6 - ip6 if non-zero, else ip4 - @param is_add - set vss if non-zero, else delete -*/ -define dhcp_proxy_set_vss -{ - u32 client_index; - u32 context; - u32 tbl_id; - u32 oui; - u32 fib_id; - u8 is_ipv6; - u8 is_add; -}; - -/** \brief DHCP proxy set / unset vss response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define dhcp_proxy_set_vss_reply -{ - u32 context; - i32 retval; -}; - /** \brief Create loopback interface request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -834,40 +773,6 @@ define add_node_next_reply u32 next_index; }; -/** \brief DHCP Proxy config 2 add / del request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param rx_vrf_id - receive vrf id - @param server_vrf_id - server vrf id - @param if_ipv6 - ipv6 if non-zero, else ipv4 - @param is_add - add the config if non-zero, else delete - @param insert_circuit_id - option82 suboption 1 fib number - @param dhcp_server[] - server address - @param dhcp_src_address[] - -*/ -define dhcp_proxy_config_2 -{ - u32 client_index; - u32 context; - u32 rx_vrf_id; - u32 server_vrf_id; - u8 is_ipv6; - u8 is_add; - u8 insert_circuit_id; - u8 dhcp_server[16]; - u8 dhcp_src_address[16]; -}; - -/** \brief DHCP Proxy config 2 add / del response - @param context - sender context, to match reply w/ request - @param retval - return code for request -*/ -define dhcp_proxy_config_2_reply -{ - u32 context; - i32 retval; -}; - /** \brief L2 interface ethernet flow point filtering enable/disable request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -1083,37 +988,6 @@ define ip6_nd_event u8 mac_ip; }; -/** \brief DHCP Client config add / del request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param sw_if_index - index of the interface for DHCP client - @param hostname - hostname - @param is_add - add the config if non-zero, else delete - @param want_dhcp_event - DHCP event sent to the sender - via dhcp_compl_event API message if non-zero - @param pid - sender's pid -*/ -define dhcp_client_config -{ - u32 client_index; - u32 context; - u32 sw_if_index; - u8 hostname[64]; - u8 is_add; - u8 want_dhcp_event; - u32 pid; -}; - -/** \brief DHCP Client config response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define dhcp_client_config_reply -{ - u32 context; - i32 retval; -}; - /** \brief Set/unset input ACL interface @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -1146,25 +1020,6 @@ define input_acl_set_interface_reply i32 retval; }; -/** \brief Tell client about a DHCP completion event - @param client_index - opaque cookie to identify the sender - @param pid - client pid registered to receive notification - @param is_ipv6 - if non-zero the address is ipv6, else ipv4 - @param host_address - Host IP address - @param router_address - Router IP address - @param host_mac - Host MAC address -*/ -define dhcp_compl_event -{ - u32 client_index; - u32 pid; - u8 hostname[64]; - u8 is_ipv6; - u8 host_address[16]; - u8 router_address[16]; - u8 host_mac[6]; -}; - /** \brief cop: enable/disable junk filtration features on an interface @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request -- cgit 1.2.3-korg From adec5878d3ac8399a3202728b0962a350939e7d9 Mon Sep 17 00:00:00 2001 From: Pavel Kotucek Date: Wed, 25 Jan 2017 08:50:53 +0100 Subject: API refactoring : l2 (add) Change-Id: I693a73ba9a5e3b0cb5d2a6c5d363f671e19c1f24 Signed-off-by: Pavel Kotucek --- src/vnet/l2/l2.api | 65 ++++++++++++++++++++++++++++++++++++++++ src/vnet/l2/l2_api.c | 83 +++++++++++++++++++++++++++++++++++++++++++++++++++- src/vpp/api/api.c | 82 --------------------------------------------------- src/vpp/api/vpe.api | 65 ---------------------------------------- 4 files changed, 147 insertions(+), 148 deletions(-) (limited to 'src/vpp/api/api.c') diff --git a/src/vnet/l2/l2.api b/src/vnet/l2/l2.api index 5b24f259..061990c0 100644 --- a/src/vnet/l2/l2.api +++ b/src/vnet/l2/l2.api @@ -260,6 +260,71 @@ define bridge_flags_reply u32 resulting_feature_bitmap; }; +/** \brief L2 interface vlan tag rewrite configure request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - interface the operation is applied to + @param vtr_op - Choose from l2_vtr_op_t enum values + @param push_dot1q - first pushed flag dot1q id set, else dot1ad + @param tag1 - Needed for any push or translate vtr op + @param tag2 - Needed for any push 2 or translate x-2 vtr ops +*/ +define l2_interface_vlan_tag_rewrite +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u32 vtr_op; + u32 push_dot1q; // ethertype of first pushed tag is dot1q/dot1ad + u32 tag1; // first pushed tag + u32 tag2; // second pushed tag +}; + +/** \brief L2 interface vlan tag rewrite response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define l2_interface_vlan_tag_rewrite_reply +{ + u32 context; + i32 retval; +}; + +/** \brief L2 interface pbb tag rewrite configure request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - interface the operation is applied to + @param vtr_op - Choose from l2_vtr_op_t enum values + @param inner_tag - needed for translate_qinq vtr op only + @param outer_tag - needed for translate_qinq vtr op only + @param b_dmac - B-tag remote mac address, needed for any push or translate_qinq vtr op + @param b_smac - B-tag local mac address, needed for any push or translate qinq vtr op + @param b_vlanid - B-tag vlanid, needed for any push or translate qinq vtr op + @param i_sid - I-tag service id, needed for any push or translate qinq vtr op +*/ +define l2_interface_pbb_tag_rewrite +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u32 vtr_op; + u16 outer_tag; + u8 b_dmac[6]; + u8 b_smac[6]; + u16 b_vlanid; + u32 i_sid; +}; + +/** \brief L2 interface pbb tag rewrite response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define l2_interface_pbb_tag_rewrite_reply +{ + u32 context; + i32 retval; +}; + /* * Local Variables: * eval: (c-set-style "gnu") diff --git a/src/vnet/l2/l2_api.c b/src/vnet/l2/l2_api.c index ef33509c..a3cc49bf 100644 --- a/src/vnet/l2/l2_api.c +++ b/src/vnet/l2/l2_api.c @@ -24,6 +24,7 @@ #include #include #include +#include #include @@ -54,7 +55,9 @@ _(BRIDGE_DOMAIN_ADD_DEL, bridge_domain_add_del) \ _(BRIDGE_DOMAIN_DUMP, bridge_domain_dump) \ _(BRIDGE_DOMAIN_DETAILS, bridge_domain_details) \ _(BRIDGE_DOMAIN_SW_IF_DETAILS, bridge_domain_sw_if_details) \ -_(BRIDGE_FLAGS, bridge_flags) +_(BRIDGE_FLAGS, bridge_flags) \ +_(L2_INTERFACE_VLAN_TAG_REWRITE, l2_interface_vlan_tag_rewrite) \ +_(L2_INTERFACE_PBB_TAG_REWRITE, l2_interface_pbb_tag_rewrite) static void send_l2_xconnect_details (unix_shared_memory_queue_t * q, u32 context, @@ -454,6 +457,84 @@ out: /* *INDENT-ON* */ } +static void + vl_api_l2_interface_vlan_tag_rewrite_t_handler + (vl_api_l2_interface_vlan_tag_rewrite_t * mp) +{ + int rv = 0; + vl_api_l2_interface_vlan_tag_rewrite_reply_t *rmp; + vnet_main_t *vnm = vnet_get_main (); + vlib_main_t *vm = vlib_get_main (); + u32 vtr_op; + + VALIDATE_SW_IF_INDEX (mp); + + vtr_op = ntohl (mp->vtr_op); + + /* The L2 code is unsuspicious */ + switch (vtr_op) + { + case L2_VTR_DISABLED: + case L2_VTR_PUSH_1: + case L2_VTR_PUSH_2: + case L2_VTR_POP_1: + case L2_VTR_POP_2: + case L2_VTR_TRANSLATE_1_1: + case L2_VTR_TRANSLATE_1_2: + case L2_VTR_TRANSLATE_2_1: + case L2_VTR_TRANSLATE_2_2: + break; + + default: + rv = VNET_API_ERROR_INVALID_VALUE; + goto bad_sw_if_index; + } + + rv = l2vtr_configure (vm, vnm, ntohl (mp->sw_if_index), vtr_op, + ntohl (mp->push_dot1q), ntohl (mp->tag1), + ntohl (mp->tag2)); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_L2_INTERFACE_VLAN_TAG_REWRITE_REPLY); +} + +static void + vl_api_l2_interface_pbb_tag_rewrite_t_handler + (vl_api_l2_interface_pbb_tag_rewrite_t * mp) +{ + vl_api_l2_interface_pbb_tag_rewrite_reply_t *rmp; + vnet_main_t *vnm = vnet_get_main (); + vlib_main_t *vm = vlib_get_main (); + u32 vtr_op; + int rv = 0; + + VALIDATE_SW_IF_INDEX (mp); + + vtr_op = ntohl (mp->vtr_op); + + switch (vtr_op) + { + case L2_VTR_DISABLED: + case L2_VTR_PUSH_2: + case L2_VTR_POP_2: + case L2_VTR_TRANSLATE_2_1: + break; + + default: + rv = VNET_API_ERROR_INVALID_VALUE; + goto bad_sw_if_index; + } + + rv = l2pbb_configure (vm, vnm, ntohl (mp->sw_if_index), vtr_op, + mp->b_dmac, mp->b_smac, ntohs (mp->b_vlanid), + ntohl (mp->i_sid), ntohs (mp->outer_tag)); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_L2_INTERFACE_PBB_TAG_REWRITE_REPLY); +} + /* * l2_api_hookup * Add vpe's API message handlers to the table. diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c index 6317f557..e6227a68 100644 --- a/src/vpp/api/api.c +++ b/src/vpp/api/api.c @@ -62,7 +62,6 @@ #include #include #include -#include #include #include #include @@ -134,7 +133,6 @@ _(ADD_NODE_NEXT, add_node_next) \ _(VXLAN_ADD_DEL_TUNNEL, vxlan_add_del_tunnel) \ _(VXLAN_TUNNEL_DUMP, vxlan_tunnel_dump) \ _(L2_INTERFACE_EFP_FILTER, l2_interface_efp_filter) \ -_(L2_INTERFACE_VLAN_TAG_REWRITE, l2_interface_vlan_tag_rewrite) \ _(SHOW_VERSION, show_version) \ _(VXLAN_GPE_ADD_DEL_TUNNEL, vxlan_gpe_add_del_tunnel) \ _(VXLAN_GPE_TUNNEL_DUMP, vxlan_gpe_tunnel_dump) \ @@ -160,7 +158,6 @@ _(IP_SOURCE_AND_PORT_RANGE_CHECK_ADD_DEL, \ _(IP_SOURCE_AND_PORT_RANGE_CHECK_INTERFACE_ADD_DEL, \ ip_source_and_port_range_check_interface_add_del) \ _(DELETE_SUBIF, delete_subif) \ -_(L2_INTERFACE_PBB_TAG_REWRITE, l2_interface_pbb_tag_rewrite) \ _(PUNT, punt) \ _(FEATURE_ENABLE_DISABLE, feature_enable_disable) @@ -1316,48 +1313,6 @@ vl_api_l2_interface_efp_filter_t_handler (vl_api_l2_interface_efp_filter_t * REPLY_MACRO (VL_API_L2_INTERFACE_EFP_FILTER_REPLY); } -static void - vl_api_l2_interface_vlan_tag_rewrite_t_handler - (vl_api_l2_interface_vlan_tag_rewrite_t * mp) -{ - int rv = 0; - vl_api_l2_interface_vlan_tag_rewrite_reply_t *rmp; - vnet_main_t *vnm = vnet_get_main (); - vlib_main_t *vm = vlib_get_main (); - u32 vtr_op; - - VALIDATE_SW_IF_INDEX (mp); - - vtr_op = ntohl (mp->vtr_op); - - /* The L2 code is unsuspicious */ - switch (vtr_op) - { - case L2_VTR_DISABLED: - case L2_VTR_PUSH_1: - case L2_VTR_PUSH_2: - case L2_VTR_POP_1: - case L2_VTR_POP_2: - case L2_VTR_TRANSLATE_1_1: - case L2_VTR_TRANSLATE_1_2: - case L2_VTR_TRANSLATE_2_1: - case L2_VTR_TRANSLATE_2_2: - break; - - default: - rv = VNET_API_ERROR_INVALID_VALUE; - goto bad_sw_if_index; - } - - rv = l2vtr_configure (vm, vnm, ntohl (mp->sw_if_index), vtr_op, - ntohl (mp->push_dot1q), ntohl (mp->tag1), - ntohl (mp->tag2)); - - BAD_SW_IF_INDEX_LABEL; - - REPLY_MACRO (VL_API_L2_INTERFACE_VLAN_TAG_REWRITE_REPLY); -} - static void vl_api_show_version_t_handler (vl_api_show_version_t * mp) { @@ -2497,43 +2452,6 @@ vl_api_delete_subif_t_handler (vl_api_delete_subif_t * mp) REPLY_MACRO (VL_API_DELETE_SUBIF_REPLY); } -static void - vl_api_l2_interface_pbb_tag_rewrite_t_handler - (vl_api_l2_interface_pbb_tag_rewrite_t * mp) -{ - vl_api_l2_interface_pbb_tag_rewrite_reply_t *rmp; - vnet_main_t *vnm = vnet_get_main (); - vlib_main_t *vm = vlib_get_main (); - u32 vtr_op; - int rv = 0; - - VALIDATE_SW_IF_INDEX (mp); - - vtr_op = ntohl (mp->vtr_op); - - switch (vtr_op) - { - case L2_VTR_DISABLED: - case L2_VTR_PUSH_2: - case L2_VTR_POP_2: - case L2_VTR_TRANSLATE_2_1: - break; - - default: - rv = VNET_API_ERROR_INVALID_VALUE; - goto bad_sw_if_index; - } - - rv = l2pbb_configure (vm, vnm, ntohl (mp->sw_if_index), vtr_op, - mp->b_dmac, mp->b_smac, ntohs (mp->b_vlanid), - ntohl (mp->i_sid), ntohs (mp->outer_tag)); - - BAD_SW_IF_INDEX_LABEL; - - REPLY_MACRO (VL_API_L2_INTERFACE_PBB_TAG_REWRITE_REPLY); - -} - static void vl_api_punt_t_handler (vl_api_punt_t * mp) { diff --git a/src/vpp/api/vpe.api b/src/vpp/api/vpe.api index 3a35a54a..981ae25e 100644 --- a/src/vpp/api/vpe.api +++ b/src/vpp/api/vpe.api @@ -797,36 +797,6 @@ define l2_interface_efp_filter_reply i32 retval; }; -/** \brief L2 interface vlan tag rewrite configure request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param sw_if_index - interface the operation is applied to - @param vtr_op - Choose from l2_vtr_op_t enum values - @param push_dot1q - first pushed flag dot1q id set, else dot1ad - @param tag1 - Needed for any push or translate vtr op - @param tag2 - Needed for any push 2 or translate x-2 vtr ops -*/ -define l2_interface_vlan_tag_rewrite -{ - u32 client_index; - u32 context; - u32 sw_if_index; - u32 vtr_op; - u32 push_dot1q; // ethertype of first pushed tag is dot1q/dot1ad - u32 tag1; // first pushed tag - u32 tag2; // second pushed tag -}; - -/** \brief L2 interface vlan tag rewrite response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define l2_interface_vlan_tag_rewrite_reply -{ - u32 context; - i32 retval; -}; - define create_subif { u32 client_index; @@ -1475,41 +1445,6 @@ define delete_subif_reply { i32 retval; }; -/** \brief L2 interface pbb tag rewrite configure request - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param sw_if_index - interface the operation is applied to - @param vtr_op - Choose from l2_vtr_op_t enum values - @param inner_tag - needed for translate_qinq vtr op only - @param outer_tag - needed for translate_qinq vtr op only - @param b_dmac - B-tag remote mac address, needed for any push or translate_qinq vtr op - @param b_smac - B-tag local mac address, needed for any push or translate qinq vtr op - @param b_vlanid - B-tag vlanid, needed for any push or translate qinq vtr op - @param i_sid - I-tag service id, needed for any push or translate qinq vtr op -*/ -define l2_interface_pbb_tag_rewrite -{ - u32 client_index; - u32 context; - u32 sw_if_index; - u32 vtr_op; - u16 outer_tag; - u8 b_dmac[6]; - u8 b_smac[6]; - u16 b_vlanid; - u32 i_sid; -}; - -/** \brief L2 interface pbb tag rewrite response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define l2_interface_pbb_tag_rewrite_reply -{ - u32 context; - i32 retval; -}; - /** \brief Punt traffic to the host @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request -- cgit 1.2.3-korg From 32e1c010b0c34fd0984f7fc45fae648a182025c5 Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Tue, 22 Nov 2016 17:07:28 +0000 Subject: IP Multicast FIB (mfib) - IPv[46] mfib tables with support for (*,G/m), (*,G) and (S,G) exact and longest prefix match - Replication represented via a new replicate DPO. - RPF configuration and data-plane checking - data-plane signals sent to listening control planes. The functions of multicast forwarding entries differ from their unicast conterparts, so we introduce a new mfib_table_t and mfib_entry_t objects. However, we re-use the fib_path_list to resolve and build the entry's output list. the fib_path_list provides the service to construct a replicate DPO for multicast. 'make tests' is added to with two new suites; TEST=mfib, this is invocation of the CLI command 'test mfib' which deals with many path add/remove, flag set/unset scenarios, TEST=ip-mcast, data-plane forwarding tests. Updated applications to use the new MIFB functions; - IPv6 NS/RA. - DHCPv6 unit tests for these are undated accordingly. Change-Id: I49ec37b01f1b170335a5697541c8fd30e6d3a961 Signed-off-by: Neale Ranns --- src/scripts/vnet/mcast/ip4 | 22 + src/vat/api_format.c | 173 +++++ src/vnet.am | 35 +- src/vnet/adj/adj.c | 11 +- src/vnet/adj/adj.h | 1 - src/vnet/adj/adj_internal.h | 2 + src/vnet/adj/adj_mcast.c | 346 ++++++++++ src/vnet/adj/adj_mcast.h | 78 +++ src/vnet/adj/adj_nbr.c | 2 +- src/vnet/adj/adj_rewrite.c | 53 -- src/vnet/adj/adj_rewrite.h | 49 -- src/vnet/dhcpv6/proxy_node.c | 46 +- src/vnet/dpo/dpo.c | 2 + src/vnet/dpo/dpo.h | 8 +- src/vnet/dpo/load_balance.c | 13 +- src/vnet/dpo/load_balance.h | 8 + src/vnet/dpo/replicate_dpo.c | 759 ++++++++++++++++++++++ src/vnet/dpo/replicate_dpo.h | 143 +++++ src/vnet/ethernet/arp.c | 84 ++- src/vnet/ethernet/ethernet.h | 2 + src/vnet/ethernet/interface.c | 20 + src/vnet/fib/fib_attached_export.c | 4 +- src/vnet/fib/fib_entry.h | 2 +- src/vnet/fib/fib_entry_delegate.c | 3 + src/vnet/fib/fib_entry_src.c | 4 + src/vnet/fib/fib_node.h | 2 + src/vnet/fib/fib_path.c | 91 ++- src/vnet/fib/fib_path_list.c | 9 +- src/vnet/fib/fib_path_list.h | 6 + src/vnet/fib/fib_table.c | 69 +- src/vnet/fib/fib_table.h | 16 + src/vnet/fib/fib_test.c | 207 +++--- src/vnet/fib/fib_types.c | 4 + src/vnet/fib/fib_types.h | 14 + src/vnet/fib/fib_urpf_list.c | 20 +- src/vnet/fib/ip4_fib.c | 50 +- src/vnet/fib/ip4_fib.h | 9 + src/vnet/fib/ip6_fib.c | 117 ++-- src/vnet/fib/ip6_fib.h | 11 +- src/vnet/fib/mpls_fib.c | 17 +- src/vnet/fib/mpls_fib.h | 9 + src/vnet/ip/ip.api | 53 ++ src/vnet/ip/ip4.h | 24 + src/vnet/ip/ip4_forward.c | 498 +++++---------- src/vnet/ip/ip4_input.c | 4 +- src/vnet/ip/ip6.h | 27 + src/vnet/ip/ip6_forward.c | 83 ++- src/vnet/ip/ip6_input.c | 43 +- src/vnet/ip/ip6_neighbor.c | 134 ++-- src/vnet/ip/ip_api.c | 210 +++++++ src/vnet/ip/lookup.c | 171 +++++ src/vnet/ip/lookup.h | 82 +-- src/vnet/mcast/mcast.c | 565 ----------------- src/vnet/mcast/mcast.h | 50 -- src/vnet/mcast/mcast_test.c | 149 ----- src/vnet/mfib/ip4_mfib.c | 465 ++++++++++++++ src/vnet/mfib/ip4_mfib.h | 95 +++ src/vnet/mfib/ip6_mfib.c | 663 +++++++++++++++++++ src/vnet/mfib/ip6_mfib.h | 109 ++++ src/vnet/mfib/mfib_entry.c | 1096 ++++++++++++++++++++++++++++++++ src/vnet/mfib/mfib_entry.h | 172 +++++ src/vnet/mfib/mfib_forward.c | 512 +++++++++++++++ src/vnet/mfib/mfib_itf.c | 119 ++++ src/vnet/mfib/mfib_itf.h | 63 ++ src/vnet/mfib/mfib_signal.c | 201 ++++++ src/vnet/mfib/mfib_signal.h | 59 ++ src/vnet/mfib/mfib_table.c | 489 ++++++++++++++ src/vnet/mfib/mfib_table.h | 331 ++++++++++ src/vnet/mfib/mfib_test.c | 1225 ++++++++++++++++++++++++++++++++++++ src/vnet/mfib/mfib_types.c | 213 +++++++ src/vnet/mfib/mfib_types.h | 185 ++++++ src/vnet/misc.c | 3 + src/vnet/rewrite.h | 31 + src/vnet/sr/sr.c | 4 +- src/vnet/util/radix.c | 1104 ++++++++++++++++++++++++++++++++ src/vnet/util/radix.h | 147 +++++ src/vnet/vxlan/vxlan.c | 112 +++- src/vpp/api/api.c | 14 +- src/vppinfra.am | 2 +- src/vppinfra/dlist.h | 2 +- src/vppinfra/format.c | 8 +- src/vppinfra/format.h | 4 +- src/vppinfra/unformat.c | 16 +- test/test_dhcp.py | 16 - test/test_ip6.py | 131 ++-- test/test_ip_mcast.py | 612 ++++++++++++++++++ test/test_mfib.py | 23 + test/vpp_interface.py | 3 +- test/vpp_ip_route.py | 101 ++- test/vpp_papi_provider.py | 34 +- 90 files changed, 11211 insertions(+), 1767 deletions(-) create mode 100644 src/scripts/vnet/mcast/ip4 create mode 100644 src/vnet/adj/adj_mcast.c create mode 100644 src/vnet/adj/adj_mcast.h delete mode 100644 src/vnet/adj/adj_rewrite.c delete mode 100644 src/vnet/adj/adj_rewrite.h create mode 100644 src/vnet/dpo/replicate_dpo.c create mode 100644 src/vnet/dpo/replicate_dpo.h delete mode 100644 src/vnet/mcast/mcast.c delete mode 100644 src/vnet/mcast/mcast.h delete mode 100644 src/vnet/mcast/mcast_test.c create mode 100644 src/vnet/mfib/ip4_mfib.c create mode 100644 src/vnet/mfib/ip4_mfib.h create mode 100644 src/vnet/mfib/ip6_mfib.c create mode 100644 src/vnet/mfib/ip6_mfib.h create mode 100644 src/vnet/mfib/mfib_entry.c create mode 100644 src/vnet/mfib/mfib_entry.h create mode 100644 src/vnet/mfib/mfib_forward.c create mode 100644 src/vnet/mfib/mfib_itf.c create mode 100644 src/vnet/mfib/mfib_itf.h create mode 100644 src/vnet/mfib/mfib_signal.c create mode 100644 src/vnet/mfib/mfib_signal.h create mode 100644 src/vnet/mfib/mfib_table.c create mode 100644 src/vnet/mfib/mfib_table.h create mode 100644 src/vnet/mfib/mfib_test.c create mode 100644 src/vnet/mfib/mfib_types.c create mode 100644 src/vnet/mfib/mfib_types.h create mode 100644 src/vnet/util/radix.c create mode 100644 src/vnet/util/radix.h create mode 100644 test/test_ip_mcast.py create mode 100644 test/test_mfib.py (limited to 'src/vpp/api/api.c') diff --git a/src/scripts/vnet/mcast/ip4 b/src/scripts/vnet/mcast/ip4 new file mode 100644 index 00000000..69f1ee00 --- /dev/null +++ b/src/scripts/vnet/mcast/ip4 @@ -0,0 +1,22 @@ +packet-generator new { + name x + limit 1 + node ip4-input + size 64-64 + no-recycle + data { + ICMP: 1.0.0.2 -> 232.1.1.1 + ICMP echo_request + incrementing 100 + } +} + +trace add pg-input 100 +loop create +loop create +set int state loop0 up +set int state loop1 up + +ip mroute add 232.1.1.1 via pg0 Accept +ip mroute add 232.1.1.1 via loop0 Forward +ip mroute add 232.1.1.1 via loop1 Forward diff --git a/src/vat/api_format.c b/src/vat/api_format.c index b83313de..4cfe4a58 100644 --- a/src/vat/api_format.c +++ b/src/vat/api_format.c @@ -48,6 +48,7 @@ #include #include #include +#include #include "vat/json_format.h" @@ -505,6 +506,53 @@ unformat_flow_classify_table_type (unformat_input_t * input, va_list * va) return 1; } +static const char *mfib_flag_names[] = MFIB_ENTRY_NAMES_SHORT; +static const char *mfib_flag_long_names[] = MFIB_ENTRY_NAMES_LONG; +static const char *mfib_itf_flag_long_names[] = MFIB_ITF_NAMES_LONG; +static const char *mfib_itf_flag_names[] = MFIB_ITF_NAMES_SHORT; + +uword +unformat_mfib_itf_flags (unformat_input_t * input, va_list * args) +{ + mfib_itf_flags_t old, *iflags = va_arg (*args, mfib_itf_flags_t *); + mfib_itf_attribute_t attr; + + old = *iflags; + FOR_EACH_MFIB_ITF_ATTRIBUTE (attr) + { + if (unformat (input, mfib_itf_flag_long_names[attr])) + *iflags |= (1 << attr); + } + FOR_EACH_MFIB_ITF_ATTRIBUTE (attr) + { + if (unformat (input, mfib_itf_flag_names[attr])) + *iflags |= (1 << attr); + } + + return (old == *iflags ? 0 : 1); +} + +uword +unformat_mfib_entry_flags (unformat_input_t * input, va_list * args) +{ + mfib_entry_flags_t old, *eflags = va_arg (*args, mfib_entry_flags_t *); + mfib_entry_attribute_t attr; + + old = *eflags; + FOR_EACH_MFIB_ATTRIBUTE (attr) + { + if (unformat (input, mfib_flag_long_names[attr])) + *eflags |= (1 << attr); + } + FOR_EACH_MFIB_ATTRIBUTE (attr) + { + if (unformat (input, mfib_flag_names[attr])) + *eflags |= (1 << attr); + } + + return (old == *eflags ? 0 : 1); +} + #if (VPP_API_TEST_BUILTIN==0) u8 * format_ip4_address (u8 * s, va_list * args) @@ -3592,6 +3640,7 @@ _(bridge_domain_add_del_reply) \ _(sw_interface_set_l2_xconnect_reply) \ _(l2fib_add_del_reply) \ _(ip_add_del_route_reply) \ +_(ip_mroute_add_del_reply) \ _(mpls_route_add_del_reply) \ _(mpls_ip_bind_unbind_reply) \ _(proxy_arp_add_del_reply) \ @@ -3792,6 +3841,7 @@ _(TAP_MODIFY_REPLY, tap_modify_reply) \ _(TAP_DELETE_REPLY, tap_delete_reply) \ _(SW_INTERFACE_TAP_DETAILS, sw_interface_tap_details) \ _(IP_ADD_DEL_ROUTE_REPLY, ip_add_del_route_reply) \ +_(IP_MROUTE_ADD_DEL_REPLY, ip_mroute_add_del_reply) \ _(MPLS_ROUTE_ADD_DEL_REPLY, mpls_route_add_del_reply) \ _(MPLS_IP_BIND_UNBIND_REPLY, mpls_ip_bind_unbind_reply) \ _(PROXY_ARP_ADD_DEL_REPLY, proxy_arp_add_del_reply) \ @@ -6383,6 +6433,126 @@ api_ip_add_del_route (vat_main_t * vam) return (vam->retval); } +static int +api_ip_mroute_add_del (vat_main_t * vam) +{ + unformat_input_t *i = vam->input; + vl_api_ip_mroute_add_del_t *mp; + f64 timeout; + u32 sw_if_index = ~0, vrf_id = 0; + u8 is_ipv6 = 0; + u8 is_local = 0; + u8 create_vrf_if_needed = 0; + u8 is_add = 1; + u8 address_set = 0; + u32 grp_address_length = 0; + ip4_address_t v4_grp_address, v4_src_address; + ip6_address_t v6_grp_address, v6_src_address; + mfib_itf_flags_t iflags = 0; + mfib_entry_flags_t eflags = 0; + + /* Parse args required to build the message */ + while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT) + { + if (unformat (i, "sw_if_index %d", &sw_if_index)) + ; + else if (unformat (i, "%U %U", + unformat_ip4_address, &v4_src_address, + unformat_ip4_address, &v4_grp_address)) + { + grp_address_length = 64; + address_set = 1; + is_ipv6 = 0; + } + else if (unformat (i, "%U %U", + unformat_ip6_address, &v6_src_address, + unformat_ip6_address, &v6_grp_address)) + { + grp_address_length = 256; + address_set = 1; + is_ipv6 = 1; + } + else if (unformat (i, "%U", unformat_ip4_address, &v4_grp_address)) + { + memset (&v4_src_address, 0, sizeof (v4_src_address)); + grp_address_length = 32; + address_set = 1; + is_ipv6 = 0; + } + else if (unformat (i, "%U", unformat_ip6_address, &v6_grp_address)) + { + memset (&v6_src_address, 0, sizeof (v6_src_address)); + grp_address_length = 128; + address_set = 1; + is_ipv6 = 1; + } + else if (unformat (i, "/%d", &grp_address_length)) + ; + else if (unformat (i, "local")) + { + is_local = 1; + } + else if (unformat (i, "del")) + is_add = 0; + else if (unformat (i, "add")) + is_add = 1; + else if (unformat (i, "vrf %d", &vrf_id)) + ; + else if (unformat (i, "create-vrf")) + create_vrf_if_needed = 1; + else if (unformat (i, "%U", unformat_mfib_itf_flags, &iflags)) + ; + else if (unformat (i, "%U", unformat_mfib_entry_flags, &eflags)) + ; + else + { + clib_warning ("parse error '%U'", format_unformat_error, i); + return -99; + } + } + + if (address_set == 0) + { + errmsg ("missing addresses\n"); + return -99; + } + + /* Construct the API message */ + M (IP_MROUTE_ADD_DEL, ip_mroute_add_del); + + mp->next_hop_sw_if_index = ntohl (sw_if_index); + mp->table_id = ntohl (vrf_id); + mp->create_vrf_if_needed = create_vrf_if_needed; + + mp->is_add = is_add; + mp->is_ipv6 = is_ipv6; + mp->is_local = is_local; + mp->itf_flags = ntohl (iflags); + mp->entry_flags = ntohl (eflags); + mp->grp_address_length = grp_address_length; + mp->grp_address_length = ntohs (mp->grp_address_length); + + if (is_ipv6) + { + clib_memcpy (mp->grp_address, &v6_grp_address, sizeof (v6_grp_address)); + clib_memcpy (mp->src_address, &v6_src_address, sizeof (v6_src_address)); + } + else + { + clib_memcpy (mp->grp_address, &v4_grp_address, sizeof (v4_grp_address)); + clib_memcpy (mp->src_address, &v4_src_address, sizeof (v4_src_address)); + + } + + /* send it... */ + S; + /* Wait for a reply... */ + W; + + /* Return the good/bad news */ + return (vam->retval); +} + static int api_mpls_route_add_del (vat_main_t * vam) { @@ -17512,6 +17682,9 @@ _(ip_add_del_route, \ "[ | sw_if_index ] [resolve-attempts ]\n" \ "[weight ] [drop] [local] [classify ] [del]\n" \ "[multipath] [count ]") \ +_(ip_mroute_add_del, \ + " / [table-id ]\n" \ + "[ | sw_if_index ] [local] [del]") \ _(mpls_route_add_del, \ "