From a23197980e40d4d9414bcfaf59005a1dc2a89251 Mon Sep 17 00:00:00 2001 From: sreejith Date: Wed, 29 Mar 2017 01:15:02 -0400 Subject: Added vpp intial source code from master branch 17.01.1 Change-Id: I81bdace6f330825a1746a853766779dfb24765fd Signed-off-by: sreejith --- vpp/vpp/vpp-api/api.c | 7485 ++++++++++++++++++++++++++++++++ vpp/vpp/vpp-api/custom_dump.c | 3139 ++++++++++++++ vpp/vpp/vpp-api/gmon.c | 319 ++ vpp/vpp/vpp-api/summary_stats_client.c | 302 ++ vpp/vpp/vpp-api/test_client.c | 1531 +++++++ vpp/vpp/vpp-api/test_ha.c | 249 ++ vpp/vpp/vpp-api/vpe.api | 4848 +++++++++++++++++++++ vpp/vpp/vpp-api/vpe_all_api_h.h | 37 + vpp/vpp/vpp-api/vpe_msg_enum.h | 37 + vpp/vpp/vpp-api/vpp_get_metrics.c | 253 ++ 10 files changed, 18200 insertions(+) create mode 100644 vpp/vpp/vpp-api/api.c create mode 100644 vpp/vpp/vpp-api/custom_dump.c create mode 100644 vpp/vpp/vpp-api/gmon.c create mode 100644 vpp/vpp/vpp-api/summary_stats_client.c create mode 100644 vpp/vpp/vpp-api/test_client.c create mode 100644 vpp/vpp/vpp-api/test_ha.c create mode 100644 vpp/vpp/vpp-api/vpe.api create mode 100644 vpp/vpp/vpp-api/vpe_all_api_h.h create mode 100644 vpp/vpp/vpp-api/vpe_msg_enum.h create mode 100644 vpp/vpp/vpp-api/vpp_get_metrics.c (limited to 'vpp/vpp/vpp-api') diff --git a/vpp/vpp/vpp-api/api.c b/vpp/vpp/vpp-api/api.c new file mode 100644 index 00000000..70798f73 --- /dev/null +++ b/vpp/vpp/vpp-api/api.c @@ -0,0 +1,7485 @@ +/* + *------------------------------------------------------------------ + * api.c - message handler registration + * + * Copyright (c) 2010-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if IPV6SR > 0 +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef BIHASH_TYPE +#undef __included_bihash_template_h__ +#include + +#if DPDK > 0 +#include +#endif + +#if IPSEC > 0 +#include +#include +#endif /* IPSEC */ +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs + +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) +#define vl_printfun +#include +#undef vl_printfun + +#include + +#define foreach_vpe_api_msg \ +_(WANT_OAM_EVENTS, want_oam_events) \ +_(OAM_ADD_DEL, oam_add_del) \ +_(MPLS_ROUTE_ADD_DEL, mpls_route_add_del) \ +_(MPLS_IP_BIND_UNBIND, mpls_ip_bind_unbind) \ +_(IS_ADDRESS_REACHABLE, is_address_reachable) \ +_(SW_INTERFACE_SET_MPLS_ENABLE, sw_interface_set_mpls_enable) \ +_(SW_INTERFACE_SET_VPATH, sw_interface_set_vpath) \ +_(SW_INTERFACE_SET_VXLAN_BYPASS, sw_interface_set_vxlan_bypass) \ +_(SW_INTERFACE_SET_L2_XCONNECT, sw_interface_set_l2_xconnect) \ +_(SW_INTERFACE_SET_L2_BRIDGE, sw_interface_set_l2_bridge) \ +_(SW_INTERFACE_SET_DPDK_HQOS_PIPE, sw_interface_set_dpdk_hqos_pipe) \ +_(SW_INTERFACE_SET_DPDK_HQOS_SUBPORT, sw_interface_set_dpdk_hqos_subport) \ +_(SW_INTERFACE_SET_DPDK_HQOS_TCTBL, sw_interface_set_dpdk_hqos_tctbl) \ +_(BRIDGE_DOMAIN_ADD_DEL, bridge_domain_add_del) \ +_(BRIDGE_DOMAIN_DUMP, bridge_domain_dump) \ +_(BRIDGE_DOMAIN_DETAILS, bridge_domain_details) \ +_(BRIDGE_DOMAIN_SW_IF_DETAILS, bridge_domain_sw_if_details) \ +_(L2FIB_ADD_DEL, l2fib_add_del) \ +_(L2_FLAGS, l2_flags) \ +_(BRIDGE_FLAGS, bridge_flags) \ +_(CREATE_VLAN_SUBIF, create_vlan_subif) \ +_(CREATE_SUBIF, create_subif) \ +_(MPLS_TUNNEL_ADD_DEL, mpls_tunnel_add_del) \ +_(PROXY_ARP_ADD_DEL, proxy_arp_add_del) \ +_(PROXY_ARP_INTFC_ENABLE_DISABLE, proxy_arp_intfc_enable_disable) \ +_(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \ +_(RESET_FIB, reset_fib) \ +_(DHCP_PROXY_CONFIG,dhcp_proxy_config) \ +_(DHCP_PROXY_CONFIG_2,dhcp_proxy_config_2) \ +_(DHCP_PROXY_SET_VSS,dhcp_proxy_set_vss) \ +_(DHCP_CLIENT_CONFIG, dhcp_client_config) \ +_(CREATE_LOOPBACK, create_loopback) \ +_(CONTROL_PING, control_ping) \ +_(CLI_REQUEST, cli_request) \ +_(CLI_INBAND, cli_inband) \ +_(SET_ARP_NEIGHBOR_LIMIT, set_arp_neighbor_limit) \ +_(L2_PATCH_ADD_DEL, l2_patch_add_del) \ +_(CLASSIFY_ADD_DEL_TABLE, classify_add_del_table) \ +_(CLASSIFY_ADD_DEL_SESSION, classify_add_del_session) \ +_(CLASSIFY_SET_INTERFACE_IP_TABLE, classify_set_interface_ip_table) \ +_(CLASSIFY_SET_INTERFACE_L2_TABLES, classify_set_interface_l2_tables) \ +_(GET_NODE_INDEX, get_node_index) \ +_(ADD_NODE_NEXT, add_node_next) \ +_(L2TPV3_CREATE_TUNNEL, l2tpv3_create_tunnel) \ +_(L2TPV3_SET_TUNNEL_COOKIES, l2tpv3_set_tunnel_cookies) \ +_(L2TPV3_INTERFACE_ENABLE_DISABLE, l2tpv3_interface_enable_disable) \ +_(L2TPV3_SET_LOOKUP_KEY, l2tpv3_set_lookup_key) \ +_(SW_IF_L2TPV3_TUNNEL_DUMP, sw_if_l2tpv3_tunnel_dump) \ +_(VXLAN_ADD_DEL_TUNNEL, vxlan_add_del_tunnel) \ +_(VXLAN_TUNNEL_DUMP, vxlan_tunnel_dump) \ +_(GRE_ADD_DEL_TUNNEL, gre_add_del_tunnel) \ +_(GRE_TUNNEL_DUMP, gre_tunnel_dump) \ +_(L2_FIB_CLEAR_TABLE, l2_fib_clear_table) \ +_(L2_INTERFACE_EFP_FILTER, l2_interface_efp_filter) \ +_(L2_INTERFACE_VLAN_TAG_REWRITE, l2_interface_vlan_tag_rewrite) \ +_(CREATE_VHOST_USER_IF, create_vhost_user_if) \ +_(MODIFY_VHOST_USER_IF, modify_vhost_user_if) \ +_(DELETE_VHOST_USER_IF, delete_vhost_user_if) \ +_(SW_INTERFACE_VHOST_USER_DUMP, sw_interface_vhost_user_dump) \ +_(SW_INTERFACE_VHOST_USER_DETAILS, sw_interface_vhost_user_details) \ +_(SHOW_VERSION, show_version) \ +_(L2_FIB_TABLE_DUMP, l2_fib_table_dump) \ +_(L2_FIB_TABLE_ENTRY, l2_fib_table_entry) \ +_(VXLAN_GPE_ADD_DEL_TUNNEL, vxlan_gpe_add_del_tunnel) \ +_(VXLAN_GPE_TUNNEL_DUMP, vxlan_gpe_tunnel_dump) \ +_(INTERFACE_NAME_RENUMBER, interface_name_renumber) \ +_(WANT_IP4_ARP_EVENTS, want_ip4_arp_events) \ +_(WANT_IP6_ND_EVENTS, want_ip6_nd_events) \ +_(INPUT_ACL_SET_INTERFACE, input_acl_set_interface) \ +_(IPSEC_SPD_ADD_DEL, ipsec_spd_add_del) \ +_(IPSEC_INTERFACE_ADD_DEL_SPD, ipsec_interface_add_del_spd) \ +_(IPSEC_SPD_ADD_DEL_ENTRY, ipsec_spd_add_del_entry) \ +_(IPSEC_SAD_ADD_DEL_ENTRY, ipsec_sad_add_del_entry) \ +_(IPSEC_SA_SET_KEY, ipsec_sa_set_key) \ +_(IKEV2_PROFILE_ADD_DEL, ikev2_profile_add_del) \ +_(IKEV2_PROFILE_SET_AUTH, ikev2_profile_set_auth) \ +_(IKEV2_PROFILE_SET_ID, ikev2_profile_set_id) \ +_(IKEV2_PROFILE_SET_TS, ikev2_profile_set_ts) \ +_(IKEV2_SET_LOCAL_KEY, ikev2_set_local_key) \ +_(DELETE_LOOPBACK, delete_loopback) \ +_(BD_IP_MAC_ADD_DEL, bd_ip_mac_add_del) \ +_(COP_INTERFACE_ENABLE_DISABLE, cop_interface_enable_disable) \ +_(COP_WHITELIST_ENABLE_DISABLE, cop_whitelist_enable_disable) \ +_(GET_NODE_GRAPH, get_node_graph) \ +_(IOAM_ENABLE, ioam_enable) \ +_(IOAM_DISABLE, ioam_disable) \ +_(LISP_ADD_DEL_LOCATOR_SET, lisp_add_del_locator_set) \ +_(LISP_ADD_DEL_LOCATOR, lisp_add_del_locator) \ +_(LISP_ADD_DEL_LOCAL_EID, lisp_add_del_local_eid) \ +_(LISP_GPE_ADD_DEL_FWD_ENTRY, lisp_gpe_add_del_fwd_entry) \ +_(LISP_ADD_DEL_MAP_RESOLVER, lisp_add_del_map_resolver) \ +_(LISP_ADD_DEL_MAP_SERVER, lisp_add_del_map_server) \ +_(LISP_GPE_ENABLE_DISABLE, lisp_gpe_enable_disable) \ +_(LISP_ENABLE_DISABLE, lisp_enable_disable) \ +_(LISP_RLOC_PROBE_ENABLE_DISABLE, lisp_rloc_probe_enable_disable) \ +_(LISP_MAP_REGISTER_ENABLE_DISABLE, lisp_map_register_enable_disable) \ +_(LISP_GPE_ADD_DEL_IFACE, lisp_gpe_add_del_iface) \ +_(LISP_ADD_DEL_REMOTE_MAPPING, lisp_add_del_remote_mapping) \ +_(LISP_ADD_DEL_ADJACENCY, lisp_add_del_adjacency) \ +_(LISP_PITR_SET_LOCATOR_SET, lisp_pitr_set_locator_set) \ +_(LISP_MAP_REQUEST_MODE, lisp_map_request_mode) \ +_(LISP_EID_TABLE_ADD_DEL_MAP, lisp_eid_table_add_del_map) \ +_(LISP_LOCATOR_SET_DUMP, lisp_locator_set_dump) \ +_(LISP_LOCATOR_DUMP, lisp_locator_dump) \ +_(LISP_EID_TABLE_DUMP, lisp_eid_table_dump) \ +_(LISP_GPE_TUNNEL_DUMP, lisp_gpe_tunnel_dump) \ +_(LISP_MAP_RESOLVER_DUMP, lisp_map_resolver_dump) \ +_(LISP_MAP_SERVER_DUMP, lisp_map_server_dump) \ +_(LISP_EID_TABLE_MAP_DUMP, lisp_eid_table_map_dump) \ +_(LISP_EID_TABLE_VNI_DUMP, lisp_eid_table_vni_dump) \ +_(LISP_ADJACENCIES_GET, lisp_adjacencies_get) \ +_(SHOW_LISP_RLOC_PROBE_STATE, show_lisp_rloc_probe_state) \ +_(SHOW_LISP_MAP_REGISTER_STATE, show_lisp_map_register_state) \ +_(SHOW_LISP_STATUS, show_lisp_status) \ +_(LISP_ADD_DEL_MAP_REQUEST_ITR_RLOCS, \ + lisp_add_del_map_request_itr_rlocs) \ +_(LISP_GET_MAP_REQUEST_ITR_RLOCS, lisp_get_map_request_itr_rlocs) \ +_(SHOW_LISP_PITR, show_lisp_pitr) \ +_(SHOW_LISP_MAP_REQUEST_MODE, show_lisp_map_request_mode) \ +_(SR_MULTICAST_MAP_ADD_DEL, sr_multicast_map_add_del) \ +_(AF_PACKET_CREATE, af_packet_create) \ +_(AF_PACKET_DELETE, af_packet_delete) \ +_(POLICER_ADD_DEL, policer_add_del) \ +_(POLICER_DUMP, policer_dump) \ +_(POLICER_CLASSIFY_SET_INTERFACE, policer_classify_set_interface) \ +_(POLICER_CLASSIFY_DUMP, policer_classify_dump) \ +_(NETMAP_CREATE, netmap_create) \ +_(NETMAP_DELETE, netmap_delete) \ +_(MPLS_TUNNEL_DUMP, mpls_tunnel_dump) \ +_(MPLS_TUNNEL_DETAILS, mpls_tunnel_details) \ +_(MPLS_FIB_DUMP, mpls_fib_dump) \ +_(MPLS_FIB_DETAILS, mpls_fib_details) \ +_(CLASSIFY_TABLE_IDS,classify_table_ids) \ +_(CLASSIFY_TABLE_BY_INTERFACE, classify_table_by_interface) \ +_(CLASSIFY_TABLE_INFO,classify_table_info) \ +_(CLASSIFY_SESSION_DUMP,classify_session_dump) \ +_(CLASSIFY_SESSION_DETAILS,classify_session_details) \ +_(SET_IPFIX_EXPORTER, set_ipfix_exporter) \ +_(IPFIX_EXPORTER_DUMP, ipfix_exporter_dump) \ +_(SET_IPFIX_CLASSIFY_STREAM, set_ipfix_classify_stream) \ +_(IPFIX_CLASSIFY_STREAM_DUMP, ipfix_classify_stream_dump) \ +_(IPFIX_CLASSIFY_TABLE_ADD_DEL, ipfix_classify_table_add_del) \ +_(IPFIX_CLASSIFY_TABLE_DUMP, ipfix_classify_table_dump) \ +_(GET_NEXT_INDEX, get_next_index) \ +_(PG_CREATE_INTERFACE, pg_create_interface) \ +_(PG_CAPTURE, pg_capture) \ +_(PG_ENABLE_DISABLE, pg_enable_disable) \ +_(IP_SOURCE_AND_PORT_RANGE_CHECK_ADD_DEL, \ + ip_source_and_port_range_check_add_del) \ +_(IP_SOURCE_AND_PORT_RANGE_CHECK_INTERFACE_ADD_DEL, \ + ip_source_and_port_range_check_interface_add_del) \ +_(IPSEC_GRE_ADD_DEL_TUNNEL, ipsec_gre_add_del_tunnel) \ +_(IPSEC_GRE_TUNNEL_DUMP, ipsec_gre_tunnel_dump) \ +_(DELETE_SUBIF, delete_subif) \ +_(L2_INTERFACE_PBB_TAG_REWRITE, l2_interface_pbb_tag_rewrite) \ +_(PUNT, punt) \ +_(FLOW_CLASSIFY_SET_INTERFACE, flow_classify_set_interface) \ +_(FLOW_CLASSIFY_DUMP, flow_classify_dump) \ +_(IPSEC_SPD_DUMP, ipsec_spd_dump) \ +_(FEATURE_ENABLE_DISABLE, feature_enable_disable) \ +_(BFD_UDP_ADD, bfd_udp_add) \ +_(BFD_UDP_DEL, bfd_udp_del) \ +_(BFD_UDP_SESSION_DUMP, bfd_udp_session_dump) \ +_(BFD_SESSION_SET_FLAGS, bfd_session_set_flags) \ +_(WANT_BFD_EVENTS, want_bfd_events) + +#define QUOTE_(x) #x +#define QUOTE(x) QUOTE_(x) + +typedef enum +{ + RESOLVE_IP4_ADD_DEL_ROUTE = 1, + RESOLVE_IP6_ADD_DEL_ROUTE, +} resolve_t; + +static vlib_node_registration_t vpe_resolver_process_node; +vpe_api_main_t vpe_api_main; + +static int arp_change_delete_callback (u32 pool_index, u8 * notused); +static int nd_change_delete_callback (u32 pool_index, u8 * notused); + +/* Clean up all registrations belonging to the indicated client */ +int +vl_api_memclnt_delete_callback (u32 client_index) +{ + vpe_api_main_t *vam = &vpe_api_main; + vpe_client_registration_t *rp; + uword *p; + int stats_memclnt_delete_callback (u32 client_index); + + stats_memclnt_delete_callback (client_index); + +#define _(a) \ + p = hash_get (vam->a##_registration_hash, client_index); \ + if (p) { \ + rp = pool_elt_at_index (vam->a##_registrations, p[0]); \ + pool_put (vam->a##_registrations, rp); \ + hash_unset (vam->a##_registration_hash, client_index); \ + } + foreach_registration_hash; +#undef _ + return 0; +} + +pub_sub_handler (oam_events, OAM_EVENTS); +pub_sub_handler (bfd_events, BFD_EVENTS); + +#define RESOLUTION_EVENT 1 +#define RESOLUTION_PENDING_EVENT 2 +#define IP4_ARP_EVENT 3 +#define IP6_ND_EVENT 4 + +int ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp); + +int ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp); + +void +handle_ip4_arp_event (u32 pool_index) +{ + vpe_api_main_t *vam = &vpe_api_main; + vnet_main_t *vnm = vam->vnet_main; + vlib_main_t *vm = vam->vlib_main; + vl_api_ip4_arp_event_t *event; + vl_api_ip4_arp_event_t *mp; + unix_shared_memory_queue_t *q; + + /* Client can cancel, die, etc. */ + if (pool_is_free_index (vam->arp_events, pool_index)) + return; + + event = pool_elt_at_index (vam->arp_events, pool_index); + + q = vl_api_client_index_to_input_queue (event->client_index); + if (!q) + { + (void) vnet_add_del_ip4_arp_change_event + (vnm, arp_change_delete_callback, + event->pid, &event->address, + vpe_resolver_process_node.index, IP4_ARP_EVENT, + ~0 /* pool index, notused */ , 0 /* is_add */ ); + return; + } + + if (q->cursize < q->maxsize) + { + mp = vl_msg_api_alloc (sizeof (*mp)); + clib_memcpy (mp, event, sizeof (*mp)); + vl_msg_api_send_shmem (q, (u8 *) & mp); + } + else + { + static f64 last_time; + /* + * Throttle syslog msgs. + * It's pretty tempting to just revoke the registration... + */ + if (vlib_time_now (vm) > last_time + 10.0) + { + clib_warning ("arp event for %U to pid %d: queue stuffed!", + format_ip4_address, &event->address, event->pid); + last_time = vlib_time_now (vm); + } + } +} + +void +handle_ip6_nd_event (u32 pool_index) +{ + vpe_api_main_t *vam = &vpe_api_main; + vnet_main_t *vnm = vam->vnet_main; + vlib_main_t *vm = vam->vlib_main; + vl_api_ip6_nd_event_t *event; + vl_api_ip6_nd_event_t *mp; + unix_shared_memory_queue_t *q; + + /* Client can cancel, die, etc. */ + if (pool_is_free_index (vam->nd_events, pool_index)) + return; + + event = pool_elt_at_index (vam->nd_events, pool_index); + + q = vl_api_client_index_to_input_queue (event->client_index); + if (!q) + { + (void) vnet_add_del_ip6_nd_change_event + (vnm, nd_change_delete_callback, + event->pid, &event->address, + vpe_resolver_process_node.index, IP6_ND_EVENT, + ~0 /* pool index, notused */ , 0 /* is_add */ ); + return; + } + + if (q->cursize < q->maxsize) + { + mp = vl_msg_api_alloc (sizeof (*mp)); + clib_memcpy (mp, event, sizeof (*mp)); + vl_msg_api_send_shmem (q, (u8 *) & mp); + } + else + { + static f64 last_time; + /* + * Throttle syslog msgs. + * It's pretty tempting to just revoke the registration... + */ + if (vlib_time_now (vm) > last_time + 10.0) + { + clib_warning ("ip6 nd event for %U to pid %d: queue stuffed!", + format_ip6_address, &event->address, event->pid); + last_time = vlib_time_now (vm); + } + } +} + +static uword +resolver_process (vlib_main_t * vm, + vlib_node_runtime_t * rt, vlib_frame_t * f) +{ + uword event_type; + uword *event_data = 0; + f64 timeout = 100.0; + int i; + + while (1) + { + vlib_process_wait_for_event_or_clock (vm, timeout); + + event_type = vlib_process_get_events (vm, &event_data); + + switch (event_type) + { + case RESOLUTION_PENDING_EVENT: + timeout = 1.0; + break; + + case RESOLUTION_EVENT: + clib_warning ("resolver: BOGUS TYPE"); + break; + + case IP4_ARP_EVENT: + for (i = 0; i < vec_len (event_data); i++) + handle_ip4_arp_event (event_data[i]); + break; + + case IP6_ND_EVENT: + for (i = 0; i < vec_len (event_data); i++) + handle_ip6_nd_event (event_data[i]); + break; + + case ~0: /* timeout */ + break; + } + + vec_reset_length (event_data); + } + return 0; /* or not */ +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (vpe_resolver_process_node,static) = { + .function = resolver_process, + .type = VLIB_NODE_TYPE_PROCESS, + .name = "vpe-route-resolver-process", +}; +/* *INDENT-ON* */ + +static int +mpls_route_add_del_t_handler (vnet_main_t * vnm, + vl_api_mpls_route_add_del_t * mp) +{ + u32 fib_index, next_hop_fib_index; + mpls_label_t *label_stack = NULL; + int rv, ii, n_labels;; + + fib_prefix_t pfx = { + .fp_len = 21, + .fp_proto = FIB_PROTOCOL_MPLS, + .fp_eos = mp->mr_eos, + .fp_label = ntohl (mp->mr_label), + }; + if (pfx.fp_eos) + { + if (mp->mr_next_hop_proto_is_ip4) + { + pfx.fp_payload_proto = DPO_PROTO_IP4; + } + else + { + pfx.fp_payload_proto = DPO_PROTO_IP6; + } + } + else + { + pfx.fp_payload_proto = DPO_PROTO_MPLS; + } + + rv = add_del_route_check (FIB_PROTOCOL_MPLS, + mp->mr_table_id, + mp->mr_next_hop_sw_if_index, + dpo_proto_to_fib (pfx.fp_payload_proto), + mp->mr_next_hop_table_id, + mp->mr_create_table_if_needed, + &fib_index, &next_hop_fib_index); + + if (0 != rv) + return (rv); + + ip46_address_t nh; + memset (&nh, 0, sizeof (nh)); + + if (mp->mr_next_hop_proto_is_ip4) + memcpy (&nh.ip4, mp->mr_next_hop, sizeof (nh.ip4)); + else + memcpy (&nh.ip6, mp->mr_next_hop, sizeof (nh.ip6)); + + n_labels = mp->mr_next_hop_n_out_labels; + if (n_labels == 0) + ; + else if (1 == n_labels) + vec_add1 (label_stack, ntohl (mp->mr_next_hop_out_label_stack[0])); + else + { + vec_validate (label_stack, n_labels - 1); + for (ii = 0; ii < n_labels; ii++) + label_stack[ii] = ntohl (mp->mr_next_hop_out_label_stack[ii]); + } + + return (add_del_route_t_handler (mp->mr_is_multipath, mp->mr_is_add, 0, // mp->is_drop, + 0, // mp->is_unreach, + 0, // mp->is_prohibit, + 0, // mp->is_local, + mp->mr_is_classify, + mp->mr_classify_table_index, + mp->mr_is_resolve_host, + mp->mr_is_resolve_attached, + fib_index, &pfx, + mp->mr_next_hop_proto_is_ip4, + &nh, ntohl (mp->mr_next_hop_sw_if_index), + next_hop_fib_index, + mp->mr_next_hop_weight, + ntohl (mp->mr_next_hop_via_label), + label_stack)); +} + +void +vl_api_mpls_route_add_del_t_handler (vl_api_mpls_route_add_del_t * mp) +{ + vl_api_mpls_route_add_del_reply_t *rmp; + vnet_main_t *vnm; + int rv; + + vnm = vnet_get_main (); + vnm->api_errno = 0; + + rv = mpls_route_add_del_t_handler (vnm, mp); + + rv = (rv == 0) ? vnm->api_errno : rv; + + REPLY_MACRO (VL_API_MPLS_ROUTE_ADD_DEL_REPLY); +} + +static int +mpls_ip_bind_unbind_handler (vnet_main_t * vnm, + vl_api_mpls_ip_bind_unbind_t * mp) +{ + u32 mpls_fib_index, ip_fib_index; + + mpls_fib_index = + fib_table_find (FIB_PROTOCOL_MPLS, ntohl (mp->mb_mpls_table_id)); + + if (~0 == mpls_fib_index) + { + if (mp->mb_create_table_if_needed) + { + mpls_fib_index = + fib_table_find_or_create_and_lock (FIB_PROTOCOL_MPLS, + ntohl (mp->mb_mpls_table_id)); + } + else + return VNET_API_ERROR_NO_SUCH_FIB; + } + + ip_fib_index = fib_table_find ((mp->mb_is_ip4 ? + FIB_PROTOCOL_IP4 : + FIB_PROTOCOL_IP6), + ntohl (mp->mb_ip_table_id)); + if (~0 == ip_fib_index) + return VNET_API_ERROR_NO_SUCH_FIB; + + fib_prefix_t pfx = { + .fp_len = mp->mb_address_length, + }; + + if (mp->mb_is_ip4) + { + pfx.fp_proto = FIB_PROTOCOL_IP4; + clib_memcpy (&pfx.fp_addr.ip4, mp->mb_address, + sizeof (pfx.fp_addr.ip4)); + } + else + { + pfx.fp_proto = FIB_PROTOCOL_IP6; + clib_memcpy (&pfx.fp_addr.ip6, mp->mb_address, + sizeof (pfx.fp_addr.ip6)); + } + + if (mp->mb_is_bind) + fib_table_entry_local_label_add (ip_fib_index, &pfx, + ntohl (mp->mb_label)); + else + fib_table_entry_local_label_remove (ip_fib_index, &pfx, + ntohl (mp->mb_label)); + + return (0); +} + +void +vl_api_mpls_ip_bind_unbind_t_handler (vl_api_mpls_ip_bind_unbind_t * mp) +{ + vl_api_mpls_route_add_del_reply_t *rmp; + vnet_main_t *vnm; + int rv; + + vnm = vnet_get_main (); + vnm->api_errno = 0; + + rv = mpls_ip_bind_unbind_handler (vnm, mp); + + rv = (rv == 0) ? vnm->api_errno : rv; + + REPLY_MACRO (VL_API_MPLS_ROUTE_ADD_DEL_REPLY); +} + +static void +vl_api_sw_interface_set_vpath_t_handler (vl_api_sw_interface_set_vpath_t * mp) +{ + vl_api_sw_interface_set_vpath_reply_t *rmp; + int rv = 0; + u32 sw_if_index = ntohl (mp->sw_if_index); + + VALIDATE_SW_IF_INDEX (mp); + + l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_VPATH, mp->enable); + vnet_feature_enable_disable ("ip4-unicast", "vpath-input-ip4", + sw_if_index, mp->enable, 0, 0); + vnet_feature_enable_disable ("ip4-multicast", "vpath-input-ip4", + sw_if_index, mp->enable, 0, 0); + vnet_feature_enable_disable ("ip6-unicast", "vpath-input-ip6", + sw_if_index, mp->enable, 0, 0); + vnet_feature_enable_disable ("ip6-multicast", "vpath-input-ip6", + sw_if_index, mp->enable, 0, 0); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_SW_INTERFACE_SET_VPATH_REPLY); +} + +static void + vl_api_sw_interface_set_vxlan_bypass_t_handler + (vl_api_sw_interface_set_vxlan_bypass_t * mp) +{ + vl_api_sw_interface_set_vxlan_bypass_reply_t *rmp; + int rv = 0; + u32 sw_if_index = ntohl (mp->sw_if_index); + + VALIDATE_SW_IF_INDEX (mp); + + if (mp->is_ipv6) + { + /* not yet implemented */ + } + else + vnet_feature_enable_disable ("ip4-unicast", "ip4-vxlan-bypass", + sw_if_index, mp->enable, 0, 0); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_SW_INTERFACE_SET_VXLAN_BYPASS_REPLY); +} + +static void + vl_api_sw_interface_set_l2_xconnect_t_handler + (vl_api_sw_interface_set_l2_xconnect_t * mp) +{ + vl_api_sw_interface_set_l2_xconnect_reply_t *rmp; + int rv = 0; + u32 rx_sw_if_index = ntohl (mp->rx_sw_if_index); + u32 tx_sw_if_index = ntohl (mp->tx_sw_if_index); + vlib_main_t *vm = vlib_get_main (); + vnet_main_t *vnm = vnet_get_main (); + + VALIDATE_RX_SW_IF_INDEX (mp); + + if (mp->enable) + { + VALIDATE_TX_SW_IF_INDEX (mp); + rv = set_int_l2_mode (vm, vnm, MODE_L2_XC, + rx_sw_if_index, 0, 0, 0, tx_sw_if_index); + } + else + { + rv = set_int_l2_mode (vm, vnm, MODE_L3, rx_sw_if_index, 0, 0, 0, 0); + } + + BAD_RX_SW_IF_INDEX_LABEL; + BAD_TX_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_SW_INTERFACE_SET_L2_XCONNECT_REPLY); +} + +static void + vl_api_sw_interface_set_l2_bridge_t_handler + (vl_api_sw_interface_set_l2_bridge_t * mp) +{ + bd_main_t *bdm = &bd_main; + vl_api_sw_interface_set_l2_bridge_reply_t *rmp; + int rv = 0; + u32 rx_sw_if_index = ntohl (mp->rx_sw_if_index); + u32 bd_id = ntohl (mp->bd_id); + u32 bd_index; + u32 bvi = mp->bvi; + u8 shg = mp->shg; + vlib_main_t *vm = vlib_get_main (); + vnet_main_t *vnm = vnet_get_main (); + + VALIDATE_RX_SW_IF_INDEX (mp); + + bd_index = bd_find_or_add_bd_index (bdm, bd_id); + + if (mp->enable) + { + //VALIDATE_TX_SW_IF_INDEX(mp); + rv = set_int_l2_mode (vm, vnm, MODE_L2_BRIDGE, + rx_sw_if_index, bd_index, bvi, shg, 0); + } + else + { + rv = set_int_l2_mode (vm, vnm, MODE_L3, rx_sw_if_index, 0, 0, 0, 0); + } + + BAD_RX_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_SW_INTERFACE_SET_L2_BRIDGE_REPLY); +} + +static void + vl_api_sw_interface_set_dpdk_hqos_pipe_t_handler + (vl_api_sw_interface_set_dpdk_hqos_pipe_t * mp) +{ + vl_api_sw_interface_set_dpdk_hqos_pipe_reply_t *rmp; + int rv = 0; + +#if DPDK > 0 + dpdk_main_t *dm = &dpdk_main; + dpdk_device_t *xd; + + u32 sw_if_index = ntohl (mp->sw_if_index); + u32 subport = ntohl (mp->subport); + u32 pipe = ntohl (mp->pipe); + u32 profile = ntohl (mp->profile); + vnet_hw_interface_t *hw; + + VALIDATE_SW_IF_INDEX (mp); + + /* hw_if & dpdk device */ + hw = vnet_get_sup_hw_interface (dm->vnet_main, sw_if_index); + + xd = vec_elt_at_index (dm->devices, hw->dev_instance); + + rv = rte_sched_pipe_config (xd->hqos_ht->hqos, subport, pipe, profile); + + BAD_SW_IF_INDEX_LABEL; +#else + clib_warning ("setting HQoS pipe parameters without DPDK not implemented"); + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif /* DPDK */ + + REPLY_MACRO (VL_API_SW_INTERFACE_SET_DPDK_HQOS_PIPE_REPLY); +} + +static void + vl_api_sw_interface_set_dpdk_hqos_subport_t_handler + (vl_api_sw_interface_set_dpdk_hqos_subport_t * mp) +{ + vl_api_sw_interface_set_dpdk_hqos_subport_reply_t *rmp; + int rv = 0; + +#if DPDK > 0 + dpdk_main_t *dm = &dpdk_main; + dpdk_device_t *xd; + struct rte_sched_subport_params p; + + u32 sw_if_index = ntohl (mp->sw_if_index); + u32 subport = ntohl (mp->subport); + p.tb_rate = ntohl (mp->tb_rate); + p.tb_size = ntohl (mp->tb_size); + p.tc_rate[0] = ntohl (mp->tc_rate[0]); + p.tc_rate[1] = ntohl (mp->tc_rate[1]); + p.tc_rate[2] = ntohl (mp->tc_rate[2]); + p.tc_rate[3] = ntohl (mp->tc_rate[3]); + p.tc_period = ntohl (mp->tc_period); + + vnet_hw_interface_t *hw; + + VALIDATE_SW_IF_INDEX (mp); + + /* hw_if & dpdk device */ + hw = vnet_get_sup_hw_interface (dm->vnet_main, sw_if_index); + + xd = vec_elt_at_index (dm->devices, hw->dev_instance); + + rv = rte_sched_subport_config (xd->hqos_ht->hqos, subport, &p); + + BAD_SW_IF_INDEX_LABEL; +#else + clib_warning + ("setting HQoS subport parameters without DPDK not implemented"); + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif /* DPDK */ + + REPLY_MACRO (VL_API_SW_INTERFACE_SET_DPDK_HQOS_SUBPORT_REPLY); +} + +static void + vl_api_sw_interface_set_dpdk_hqos_tctbl_t_handler + (vl_api_sw_interface_set_dpdk_hqos_tctbl_t * mp) +{ + vl_api_sw_interface_set_dpdk_hqos_tctbl_reply_t *rmp; + int rv = 0; + +#if DPDK > 0 + dpdk_main_t *dm = &dpdk_main; + vlib_thread_main_t *tm = vlib_get_thread_main (); + dpdk_device_t *xd; + + u32 sw_if_index = ntohl (mp->sw_if_index); + u32 entry = ntohl (mp->entry); + u32 tc = ntohl (mp->tc); + u32 queue = ntohl (mp->queue); + u32 val, i; + + vnet_hw_interface_t *hw; + + VALIDATE_SW_IF_INDEX (mp); + + /* hw_if & dpdk device */ + hw = vnet_get_sup_hw_interface (dm->vnet_main, sw_if_index); + + xd = vec_elt_at_index (dm->devices, hw->dev_instance); + + if (tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) + { + clib_warning ("invalid traffic class !!"); + rv = VNET_API_ERROR_INVALID_VALUE; + goto done; + } + if (queue >= RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS) + { + clib_warning ("invalid queue !!"); + rv = VNET_API_ERROR_INVALID_VALUE; + goto done; + } + + /* Detect the set of worker threads */ + uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers"); + + if (p == 0) + { + clib_warning ("worker thread registration AWOL !!"); + rv = VNET_API_ERROR_INVALID_VALUE_2; + goto done; + } + + vlib_thread_registration_t *tr = (vlib_thread_registration_t *) p[0]; + int worker_thread_first = tr->first_index; + int worker_thread_count = tr->count; + + val = tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue; + for (i = 0; i < worker_thread_count; i++) + xd->hqos_wt[worker_thread_first + i].hqos_tc_table[entry] = val; + + BAD_SW_IF_INDEX_LABEL; +done: +#else + clib_warning ("setting HQoS DSCP table entry without DPDK not implemented"); + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif /* DPDK */ + + REPLY_MACRO (VL_API_SW_INTERFACE_SET_DPDK_HQOS_TCTBL_REPLY); +} + +static void +vl_api_bridge_domain_add_del_t_handler (vl_api_bridge_domain_add_del_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + bd_main_t *bdm = &bd_main; + vl_api_bridge_domain_add_del_reply_t *rmp; + int rv = 0; + u32 enable_flags = 0, disable_flags = 0; + u32 bd_id = ntohl (mp->bd_id); + u32 bd_index; + + if (mp->is_add) + { + bd_index = bd_find_or_add_bd_index (bdm, bd_id); + + if (mp->flood) + enable_flags |= L2_FLOOD; + else + disable_flags |= L2_FLOOD; + + if (mp->uu_flood) + enable_flags |= L2_UU_FLOOD; + else + disable_flags |= L2_UU_FLOOD; + + if (mp->forward) + enable_flags |= L2_FWD; + else + disable_flags |= L2_FWD; + + if (mp->arp_term) + enable_flags |= L2_ARP_TERM; + else + disable_flags |= L2_ARP_TERM; + + if (mp->learn) + enable_flags |= L2_LEARN; + else + disable_flags |= L2_LEARN; + + if (enable_flags) + bd_set_flags (vm, bd_index, enable_flags, 1 /* enable */ ); + + if (disable_flags) + bd_set_flags (vm, bd_index, disable_flags, 0 /* disable */ ); + + bd_set_mac_age (vm, bd_index, mp->mac_age); + } + else + rv = bd_delete_bd_index (bdm, bd_id); + + REPLY_MACRO (VL_API_BRIDGE_DOMAIN_ADD_DEL_REPLY); +} + +static void +vl_api_bridge_domain_details_t_handler (vl_api_bridge_domain_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void + vl_api_bridge_domain_sw_if_details_t_handler + (vl_api_bridge_domain_sw_if_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void +send_bridge_domain_details (unix_shared_memory_queue_t * q, + l2_bridge_domain_t * bd_config, + u32 n_sw_ifs, u32 context) +{ + vl_api_bridge_domain_details_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_BRIDGE_DOMAIN_DETAILS); + mp->bd_id = ntohl (bd_config->bd_id); + mp->flood = bd_feature_flood (bd_config); + mp->uu_flood = bd_feature_uu_flood (bd_config); + mp->forward = bd_feature_forward (bd_config); + mp->learn = bd_feature_learn (bd_config); + mp->arp_term = bd_feature_arp_term (bd_config); + mp->bvi_sw_if_index = ntohl (bd_config->bvi_sw_if_index); + mp->mac_age = bd_config->mac_age; + mp->n_sw_ifs = ntohl (n_sw_ifs); + mp->context = context; + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +send_bd_sw_if_details (l2input_main_t * l2im, + unix_shared_memory_queue_t * q, + l2_flood_member_t * member, u32 bd_id, u32 context) +{ + vl_api_bridge_domain_sw_if_details_t *mp; + l2_input_config_t *input_cfg; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_BRIDGE_DOMAIN_SW_IF_DETAILS); + mp->bd_id = ntohl (bd_id); + mp->sw_if_index = ntohl (member->sw_if_index); + input_cfg = vec_elt_at_index (l2im->configs, member->sw_if_index); + mp->shg = input_cfg->shg; + mp->context = context; + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +vl_api_bridge_domain_dump_t_handler (vl_api_bridge_domain_dump_t * mp) +{ + bd_main_t *bdm = &bd_main; + l2input_main_t *l2im = &l2input_main; + unix_shared_memory_queue_t *q; + l2_bridge_domain_t *bd_config; + u32 bd_id, bd_index; + u32 end; + + q = vl_api_client_index_to_input_queue (mp->client_index); + + if (q == 0) + return; + + bd_id = ntohl (mp->bd_id); + + bd_index = (bd_id == ~0) ? 0 : bd_find_or_add_bd_index (bdm, bd_id); + end = (bd_id == ~0) ? vec_len (l2im->bd_configs) : bd_index + 1; + for (; bd_index < end; bd_index++) + { + bd_config = l2input_bd_config_from_index (l2im, bd_index); + /* skip dummy bd_id 0 */ + if (bd_config && (bd_config->bd_id > 0)) + { + u32 n_sw_ifs; + l2_flood_member_t *m; + + n_sw_ifs = vec_len (bd_config->members); + send_bridge_domain_details (q, bd_config, n_sw_ifs, mp->context); + + vec_foreach (m, bd_config->members) + { + send_bd_sw_if_details (l2im, q, m, bd_config->bd_id, mp->context); + } + } + } +} + +static void +vl_api_l2fib_add_del_t_handler (vl_api_l2fib_add_del_t * mp) +{ + bd_main_t *bdm = &bd_main; + l2input_main_t *l2im = &l2input_main; + vl_api_l2fib_add_del_reply_t *rmp; + int rv = 0; + u64 mac = 0; + u32 sw_if_index = ntohl (mp->sw_if_index); + u32 bd_id = ntohl (mp->bd_id); + u32 bd_index; + u32 static_mac; + u32 filter_mac; + u32 bvi_mac; + uword *p; + + mac = mp->mac; + + p = hash_get (bdm->bd_index_by_bd_id, bd_id); + if (!p) + { + rv = VNET_API_ERROR_NO_SUCH_ENTRY; + goto bad_sw_if_index; + } + bd_index = p[0]; + + if (mp->is_add) + { + filter_mac = mp->filter_mac ? 1 : 0; + if (filter_mac == 0) + { + VALIDATE_SW_IF_INDEX (mp); + if (vec_len (l2im->configs) <= sw_if_index) + { + rv = VNET_API_ERROR_INVALID_SW_IF_INDEX; + goto bad_sw_if_index; + } + else + { + l2_input_config_t *config; + config = vec_elt_at_index (l2im->configs, sw_if_index); + if (config->bridge == 0) + { + rv = VNET_API_ERROR_INVALID_SW_IF_INDEX; + goto bad_sw_if_index; + } + } + } + static_mac = mp->static_mac ? 1 : 0; + bvi_mac = mp->bvi_mac ? 1 : 0; + l2fib_add_entry (mac, bd_index, sw_if_index, static_mac, filter_mac, + bvi_mac); + } + else + { + l2fib_del_entry (mac, bd_index); + } + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_L2FIB_ADD_DEL_REPLY); +} + +static void +vl_api_l2_flags_t_handler (vl_api_l2_flags_t * mp) +{ + vl_api_l2_flags_reply_t *rmp; + int rv = 0; + u32 sw_if_index = ntohl (mp->sw_if_index); + u32 flags = ntohl (mp->feature_bitmap); + u32 rbm = 0; + + VALIDATE_SW_IF_INDEX (mp); + +#define _(a,b) \ + if (flags & L2INPUT_FEAT_ ## a) \ + rbm = l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_ ## a, mp->is_set); + foreach_l2input_feat; +#undef _ + + BAD_SW_IF_INDEX_LABEL; + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_L2_FLAGS_REPLY, + ({ + rmp->resulting_feature_bitmap = ntohl(rbm); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_bridge_flags_t_handler (vl_api_bridge_flags_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + bd_main_t *bdm = &bd_main; + vl_api_bridge_flags_reply_t *rmp; + int rv = 0; + u32 bd_id = ntohl (mp->bd_id); + u32 bd_index; + u32 flags = ntohl (mp->feature_bitmap); + uword *p; + + p = hash_get (bdm->bd_index_by_bd_id, bd_id); + if (p == 0) + { + rv = VNET_API_ERROR_NO_SUCH_ENTRY; + goto out; + } + + bd_index = p[0]; + + bd_set_flags (vm, bd_index, flags, mp->is_set); + +out: + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_BRIDGE_FLAGS_REPLY, + ({ + rmp->resulting_feature_bitmap = ntohl(flags); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_bd_ip_mac_add_del_t_handler (vl_api_bd_ip_mac_add_del_t * mp) +{ + bd_main_t *bdm = &bd_main; + vl_api_bd_ip_mac_add_del_reply_t *rmp; + int rv = 0; + u32 bd_id = ntohl (mp->bd_id); + u32 bd_index; + uword *p; + + p = hash_get (bdm->bd_index_by_bd_id, bd_id); + if (p == 0) + { + rv = VNET_API_ERROR_NO_SUCH_ENTRY; + goto out; + } + + bd_index = p[0]; + if (bd_add_del_ip_mac (bd_index, mp->ip_address, + mp->mac_address, mp->is_ipv6, mp->is_add)) + rv = VNET_API_ERROR_UNSPECIFIED; + +out: + REPLY_MACRO (VL_API_BD_IP_MAC_ADD_DEL_REPLY); +} + +static void +vl_api_create_vlan_subif_t_handler (vl_api_create_vlan_subif_t * mp) +{ + vl_api_create_vlan_subif_reply_t *rmp; + vnet_main_t *vnm = vnet_get_main (); + u32 hw_if_index, sw_if_index = (u32) ~ 0; + vnet_hw_interface_t *hi; + int rv = 0; + u32 id; + vnet_sw_interface_t template; + uword *p; + vnet_interface_main_t *im = &vnm->interface_main; + u64 sup_and_sub_key; + u64 *kp; + unix_shared_memory_queue_t *q; + clib_error_t *error; + + VALIDATE_SW_IF_INDEX (mp); + + hw_if_index = ntohl (mp->sw_if_index); + hi = vnet_get_hw_interface (vnm, hw_if_index); + + id = ntohl (mp->vlan_id); + if (id == 0 || id > 4095) + { + rv = VNET_API_ERROR_INVALID_VLAN; + goto out; + } + + sup_and_sub_key = ((u64) (hi->sw_if_index) << 32) | (u64) id; + + p = hash_get_mem (im->sw_if_index_by_sup_and_sub, &sup_and_sub_key); + if (p) + { + rv = VNET_API_ERROR_VLAN_ALREADY_EXISTS; + goto out; + } + + kp = clib_mem_alloc (sizeof (*kp)); + *kp = sup_and_sub_key; + + memset (&template, 0, sizeof (template)); + template.type = VNET_SW_INTERFACE_TYPE_SUB; + template.sup_sw_if_index = hi->sw_if_index; + template.sub.id = id; + template.sub.eth.raw_flags = 0; + template.sub.eth.flags.one_tag = 1; + template.sub.eth.outer_vlan_id = id; + template.sub.eth.flags.exact_match = 1; + + error = vnet_create_sw_interface (vnm, &template, &sw_if_index); + if (error) + { + clib_error_report (error); + rv = VNET_API_ERROR_INVALID_REGISTRATION; + goto out; + } + hash_set (hi->sub_interface_sw_if_index_by_id, id, sw_if_index); + hash_set_mem (im->sw_if_index_by_sup_and_sub, kp, sw_if_index); + + BAD_SW_IF_INDEX_LABEL; + +out: + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + return; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_CREATE_VLAN_SUBIF_REPLY); + rmp->context = mp->context; + rmp->retval = ntohl (rv); + rmp->sw_if_index = ntohl (sw_if_index); + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +vl_api_create_subif_t_handler (vl_api_create_subif_t * mp) +{ + vl_api_create_subif_reply_t *rmp; + vnet_main_t *vnm = vnet_get_main (); + u32 sw_if_index = ~0; + int rv = 0; + u32 sub_id; + vnet_sw_interface_t *si; + vnet_hw_interface_t *hi; + vnet_sw_interface_t template; + uword *p; + vnet_interface_main_t *im = &vnm->interface_main; + u64 sup_and_sub_key; + u64 *kp; + clib_error_t *error; + + VALIDATE_SW_IF_INDEX (mp); + + si = vnet_get_sup_sw_interface (vnm, ntohl (mp->sw_if_index)); + hi = vnet_get_sup_hw_interface (vnm, ntohl (mp->sw_if_index)); + + if (hi->bond_info == VNET_HW_INTERFACE_BOND_INFO_SLAVE) + { + rv = VNET_API_ERROR_BOND_SLAVE_NOT_ALLOWED; + goto out; + } + + sw_if_index = si->sw_if_index; + sub_id = ntohl (mp->sub_id); + + sup_and_sub_key = ((u64) (sw_if_index) << 32) | (u64) sub_id; + + p = hash_get_mem (im->sw_if_index_by_sup_and_sub, &sup_and_sub_key); + if (p) + { + if (CLIB_DEBUG > 0) + clib_warning ("sup sw_if_index %d, sub id %d already exists\n", + sw_if_index, sub_id); + rv = VNET_API_ERROR_SUBIF_ALREADY_EXISTS; + goto out; + } + + kp = clib_mem_alloc (sizeof (*kp)); + *kp = sup_and_sub_key; + + memset (&template, 0, sizeof (template)); + template.type = VNET_SW_INTERFACE_TYPE_SUB; + template.sup_sw_if_index = sw_if_index; + template.sub.id = sub_id; + template.sub.eth.flags.no_tags = mp->no_tags; + template.sub.eth.flags.one_tag = mp->one_tag; + template.sub.eth.flags.two_tags = mp->two_tags; + template.sub.eth.flags.dot1ad = mp->dot1ad; + template.sub.eth.flags.exact_match = mp->exact_match; + template.sub.eth.flags.default_sub = mp->default_sub; + template.sub.eth.flags.outer_vlan_id_any = mp->outer_vlan_id_any; + template.sub.eth.flags.inner_vlan_id_any = mp->inner_vlan_id_any; + template.sub.eth.outer_vlan_id = ntohs (mp->outer_vlan_id); + template.sub.eth.inner_vlan_id = ntohs (mp->inner_vlan_id); + + error = vnet_create_sw_interface (vnm, &template, &sw_if_index); + if (error) + { + clib_error_report (error); + rv = VNET_API_ERROR_SUBIF_CREATE_FAILED; + goto out; + } + + hash_set (hi->sub_interface_sw_if_index_by_id, sub_id, sw_if_index); + hash_set_mem (im->sw_if_index_by_sup_and_sub, kp, sw_if_index); + + BAD_SW_IF_INDEX_LABEL; + +out: + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_CREATE_SUBIF_REPLY, + ({ + rmp->sw_if_index = ntohl(sw_if_index); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_mpls_tunnel_add_del_t_handler (vl_api_mpls_tunnel_add_del_t * mp) +{ + vl_api_mpls_tunnel_add_del_reply_t *rmp; + int rv = 0; + stats_main_t *sm = &stats_main; + u32 tunnel_sw_if_index; + int ii; + + dslock (sm, 1 /* release hint */ , 5 /* tag */ ); + + if (mp->mt_is_add) + { + fib_route_path_t rpath, *rpaths = NULL; + mpls_label_t *label_stack = NULL; + + memset (&rpath, 0, sizeof (rpath)); + + if (mp->mt_next_hop_proto_is_ip4) + { + rpath.frp_proto = FIB_PROTOCOL_IP4; + clib_memcpy (&rpath.frp_addr.ip4, + mp->mt_next_hop, sizeof (rpath.frp_addr.ip4)); + } + else + { + rpath.frp_proto = FIB_PROTOCOL_IP6; + clib_memcpy (&rpath.frp_addr.ip6, + mp->mt_next_hop, sizeof (rpath.frp_addr.ip6)); + } + rpath.frp_sw_if_index = ntohl (mp->mt_next_hop_sw_if_index); + + for (ii = 0; ii < mp->mt_next_hop_n_out_labels; ii++) + vec_add1 (label_stack, ntohl (mp->mt_next_hop_out_label_stack[ii])); + + vec_add1 (rpaths, rpath); + + vnet_mpls_tunnel_add (rpaths, label_stack, + mp->mt_l2_only, &tunnel_sw_if_index); + vec_free (rpaths); + vec_free (label_stack); + } + else + { + tunnel_sw_if_index = ntohl (mp->mt_sw_if_index); + vnet_mpls_tunnel_del (tunnel_sw_if_index); + } + + dsunlock (sm); + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_MPLS_TUNNEL_ADD_DEL_REPLY, + ({ + rmp->sw_if_index = ntohl(tunnel_sw_if_index); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_proxy_arp_add_del_t_handler (vl_api_proxy_arp_add_del_t * mp) +{ + vl_api_proxy_arp_add_del_reply_t *rmp; + u32 fib_index; + int rv; + ip4_main_t *im = &ip4_main; + stats_main_t *sm = &stats_main; + int vnet_proxy_arp_add_del (ip4_address_t * lo_addr, + ip4_address_t * hi_addr, + u32 fib_index, int is_del); + uword *p; + + dslock (sm, 1 /* release hint */ , 6 /* tag */ ); + + p = hash_get (im->fib_index_by_table_id, ntohl (mp->vrf_id)); + + if (!p) + { + rv = VNET_API_ERROR_NO_SUCH_FIB; + goto out; + } + + fib_index = p[0]; + + rv = vnet_proxy_arp_add_del ((ip4_address_t *) mp->low_address, + (ip4_address_t *) mp->hi_address, + fib_index, mp->is_add == 0); + +out: + dsunlock (sm); + REPLY_MACRO (VL_API_PROXY_ARP_ADD_DEL_REPLY); +} + +static void + vl_api_proxy_arp_intfc_enable_disable_t_handler + (vl_api_proxy_arp_intfc_enable_disable_t * mp) +{ + int rv = 0; + vnet_main_t *vnm = vnet_get_main (); + vl_api_proxy_arp_intfc_enable_disable_reply_t *rmp; + vnet_sw_interface_t *si; + u32 sw_if_index; + + VALIDATE_SW_IF_INDEX (mp); + + sw_if_index = ntohl (mp->sw_if_index); + + if (pool_is_free_index (vnm->interface_main.sw_interfaces, sw_if_index)) + { + rv = VNET_API_ERROR_INVALID_SW_IF_INDEX; + goto out; + } + + si = vnet_get_sw_interface (vnm, sw_if_index); + + ASSERT (si); + + if (mp->enable_disable) + si->flags |= VNET_SW_INTERFACE_FLAG_PROXY_ARP; + else + si->flags &= ~VNET_SW_INTERFACE_FLAG_PROXY_ARP; + + BAD_SW_IF_INDEX_LABEL; + +out: + REPLY_MACRO (VL_API_PROXY_ARP_INTFC_ENABLE_DISABLE_REPLY); +} + +static void +vl_api_is_address_reachable_t_handler (vl_api_is_address_reachable_t * mp) +{ +#if 0 + vpe_main_t *rm = &vpe_main; + ip4_main_t *im4 = &ip4_main; + ip6_main_t *im6 = &ip6_main; + ip_lookup_main_t *lm; + union + { + ip4_address_t ip4; + ip6_address_t ip6; + } addr; + u32 adj_index, sw_if_index; + vl_api_is_address_reachable_t *rmp; + ip_adjacency_t *adj; + unix_shared_memory_queue_t *q; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + { + increment_missing_api_client_counter (rm->vlib_main); + return; + } + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + clib_memcpy (rmp, mp, sizeof (*rmp)); + + sw_if_index = mp->next_hop_sw_if_index; + clib_memcpy (&addr, mp->address, sizeof (addr)); + if (mp->is_ipv6) + { + lm = &im6->lookup_main; + adj_index = ip6_fib_lookup (im6, sw_if_index, &addr.ip6); + } + else + { + lm = &im4->lookup_main; + // FIXME NOT an ADJ + adj_index = ip4_fib_lookup (im4, sw_if_index, &addr.ip4); + } + if (adj_index == ~0) + { + rmp->is_error = 1; + goto send; + } + adj = ip_get_adjacency (lm, adj_index); + + if (adj->lookup_next_index == IP_LOOKUP_NEXT_REWRITE + && adj->rewrite_header.sw_if_index == sw_if_index) + { + rmp->is_known = 1; + } + else + { + if (adj->lookup_next_index == IP_LOOKUP_NEXT_ARP + && adj->rewrite_header.sw_if_index == sw_if_index) + { + if (mp->is_ipv6) + ip6_probe_neighbor (rm->vlib_main, &addr.ip6, sw_if_index); + else + ip4_probe_neighbor (rm->vlib_main, &addr.ip4, sw_if_index); + } + else if (adj->lookup_next_index == IP_LOOKUP_NEXT_DROP) + { + rmp->is_known = 1; + goto send; + } + rmp->is_known = 0; + } + +send: + vl_msg_api_send_shmem (q, (u8 *) & rmp); +#endif +} + +static void + vl_api_sw_interface_set_mpls_enable_t_handler + (vl_api_sw_interface_set_mpls_enable_t * mp) +{ + vl_api_sw_interface_set_mpls_enable_reply_t *rmp; + int rv = 0; + + VALIDATE_SW_IF_INDEX (mp); + + mpls_sw_interface_enable_disable (&mpls_main, + ntohl (mp->sw_if_index), mp->enable); + + BAD_SW_IF_INDEX_LABEL; + REPLY_MACRO (VL_API_SW_INTERFACE_SET_MPLS_ENABLE_REPLY); +} + +/* + * WARNING: replicated pending api refactor completion + */ +static void +send_sw_interface_flags_deleted (vpe_api_main_t * am, + unix_shared_memory_queue_t * q, + u32 sw_if_index) +{ + vl_api_sw_interface_set_flags_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_SET_FLAGS); + mp->sw_if_index = ntohl (sw_if_index); + + mp->admin_up_down = 0; + mp->link_up_down = 0; + mp->deleted = 1; + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +void +send_oam_event (oam_target_t * t) +{ + vpe_api_main_t *vam = &vpe_api_main; + unix_shared_memory_queue_t *q; + vpe_client_registration_t *reg; + vl_api_oam_event_t *mp; + + /* *INDENT-OFF* */ + pool_foreach(reg, vam->oam_events_registrations, + ({ + q = vl_api_client_index_to_input_queue (reg->client_index); + if (q) + { + mp = vl_msg_api_alloc (sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_OAM_EVENT); + clib_memcpy (mp->dst_address, &t->dst_address, + sizeof (mp->dst_address)); + mp->state = t->state; + vl_msg_api_send_shmem (q, (u8 *)&mp); + } + })); + /* *INDENT-ON* */ +} + +static void +vl_api_oam_add_del_t_handler (vl_api_oam_add_del_t * mp) +{ + vl_api_oam_add_del_reply_t *rmp; + int rv; + + rv = vpe_oam_add_del_target ((ip4_address_t *) mp->src_address, + (ip4_address_t *) mp->dst_address, + ntohl (mp->vrf_id), (int) (mp->is_add)); + + REPLY_MACRO (VL_API_OAM_ADD_DEL_REPLY); +} + +static void +vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp) +{ + stats_main_t *sm = &stats_main; + vnet_interface_main_t *im = sm->interface_main; + vl_api_vnet_summary_stats_reply_t *rmp; + vlib_combined_counter_main_t *cm; + vlib_counter_t v; + int i, which; + u64 total_pkts[VLIB_N_RX_TX]; + u64 total_bytes[VLIB_N_RX_TX]; + + unix_shared_memory_queue_t *q = + vl_api_client_index_to_input_queue (mp->client_index); + + if (!q) + return; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_VNET_SUMMARY_STATS_REPLY); + rmp->context = mp->context; + rmp->retval = 0; + + memset (total_pkts, 0, sizeof (total_pkts)); + memset (total_bytes, 0, sizeof (total_bytes)); + + vnet_interface_counter_lock (im); + + vec_foreach (cm, im->combined_sw_if_counters) + { + which = cm - im->combined_sw_if_counters; + + for (i = 0; i < vec_len (cm->maxi); i++) + { + vlib_get_combined_counter (cm, i, &v); + total_pkts[which] += v.packets; + total_bytes[which] += v.bytes; + } + } + vnet_interface_counter_unlock (im); + + rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]); + rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]); + rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]); + rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]); + rmp->vector_rate = + clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main)); + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +/* *INDENT-OFF* */ +typedef CLIB_PACKED (struct { + ip4_address_t address; + u32 address_length: 6; + u32 index:26; +}) ip4_route_t; +/* *INDENT-ON* */ + +static int +ip4_reset_fib_t_handler (vl_api_reset_fib_t * mp) +{ + vnet_main_t *vnm = vnet_get_main (); + vnet_interface_main_t *im = &vnm->interface_main; + ip4_main_t *im4 = &ip4_main; + static u32 *sw_if_indices_to_shut; + stats_main_t *sm = &stats_main; + fib_table_t *fib_table; + ip4_fib_t *fib; + u32 sw_if_index; + int i; + int rv = VNET_API_ERROR_NO_SUCH_FIB; + u32 target_fib_id = ntohl (mp->vrf_id); + + dslock (sm, 1 /* release hint */ , 8 /* tag */ ); + + /* *INDENT-OFF* */ + pool_foreach (fib_table, im4->fibs, + ({ + fib = &fib_table->v4; + vnet_sw_interface_t * si; + + if (fib->table_id != target_fib_id) + continue; + + /* remove any mpls encap/decap labels */ + mpls_fib_reset_labels (fib->table_id); + + /* remove any proxy arps in this fib */ + vnet_proxy_arp_fib_reset (fib->table_id); + + /* Set the flow hash for this fib to the default */ + vnet_set_ip4_flow_hash (fib->table_id, IP_FLOW_HASH_DEFAULT); + + vec_reset_length (sw_if_indices_to_shut); + + /* Shut down interfaces in this FIB / clean out intfc routes */ + pool_foreach (si, im->sw_interfaces, + ({ + u32 sw_if_index = si->sw_if_index; + + if (sw_if_index < vec_len (im4->fib_index_by_sw_if_index) + && (im4->fib_index_by_sw_if_index[si->sw_if_index] == + fib->index)) + vec_add1 (sw_if_indices_to_shut, si->sw_if_index); + })); + + for (i = 0; i < vec_len (sw_if_indices_to_shut); i++) { + sw_if_index = sw_if_indices_to_shut[i]; + // vec_foreach (sw_if_index, sw_if_indices_to_shut) { + + u32 flags = vnet_sw_interface_get_flags (vnm, sw_if_index); + flags &= ~(VNET_SW_INTERFACE_FLAG_ADMIN_UP); + vnet_sw_interface_set_flags (vnm, sw_if_index, flags); + } + + fib_table_flush(fib->index, FIB_PROTOCOL_IP4, FIB_SOURCE_API); + fib_table_flush(fib->index, FIB_PROTOCOL_IP4, FIB_SOURCE_INTERFACE); + + rv = 0; + break; + })); /* pool_foreach (fib) */ + /* *INDENT-ON* */ + + dsunlock (sm); + return rv; +} + +static int +ip6_reset_fib_t_handler (vl_api_reset_fib_t * mp) +{ + vnet_main_t *vnm = vnet_get_main (); + vnet_interface_main_t *im = &vnm->interface_main; + ip6_main_t *im6 = &ip6_main; + stats_main_t *sm = &stats_main; + static u32 *sw_if_indices_to_shut; + fib_table_t *fib_table; + ip6_fib_t *fib; + u32 sw_if_index; + int i; + int rv = VNET_API_ERROR_NO_SUCH_FIB; + u32 target_fib_id = ntohl (mp->vrf_id); + + dslock (sm, 1 /* release hint */ , 9 /* tag */ ); + + /* *INDENT-OFF* */ + pool_foreach (fib_table, im6->fibs, + ({ + vnet_sw_interface_t * si; + fib = &(fib_table->v6); + + if (fib->table_id != target_fib_id) + continue; + + vec_reset_length (sw_if_indices_to_shut); + + /* Shut down interfaces in this FIB / clean out intfc routes */ + pool_foreach (si, im->sw_interfaces, + ({ + if (im6->fib_index_by_sw_if_index[si->sw_if_index] == + fib->index) + vec_add1 (sw_if_indices_to_shut, si->sw_if_index); + })); + + for (i = 0; i < vec_len (sw_if_indices_to_shut); i++) { + sw_if_index = sw_if_indices_to_shut[i]; + // vec_foreach (sw_if_index, sw_if_indices_to_shut) { + + u32 flags = vnet_sw_interface_get_flags (vnm, sw_if_index); + flags &= ~(VNET_SW_INTERFACE_FLAG_ADMIN_UP); + vnet_sw_interface_set_flags (vnm, sw_if_index, flags); + } + + fib_table_flush(fib->index, FIB_PROTOCOL_IP6, FIB_SOURCE_API); + fib_table_flush(fib->index, FIB_PROTOCOL_IP6, FIB_SOURCE_INTERFACE); + + rv = 0; + break; + })); /* pool_foreach (fib) */ + /* *INDENT-ON* */ + + dsunlock (sm); + return rv; +} + +static void +vl_api_reset_fib_t_handler (vl_api_reset_fib_t * mp) +{ + int rv; + vl_api_reset_fib_reply_t *rmp; + + if (mp->is_ipv6) + rv = ip6_reset_fib_t_handler (mp); + else + rv = ip4_reset_fib_t_handler (mp); + + REPLY_MACRO (VL_API_RESET_FIB_REPLY); +} + + +static void +dhcpv4_proxy_config (vl_api_dhcp_proxy_config_t * mp) +{ + vl_api_dhcp_proxy_config_reply_t *rmp; + int rv; + + rv = dhcp_proxy_set_server ((ip4_address_t *) (&mp->dhcp_server), + (ip4_address_t *) (&mp->dhcp_src_address), + (u32) ntohl (mp->vrf_id), + (int) mp->insert_circuit_id, + (int) (mp->is_add == 0)); + + REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_REPLY); +} + + +static void +dhcpv6_proxy_config (vl_api_dhcp_proxy_config_t * mp) +{ + vl_api_dhcp_proxy_config_reply_t *rmp; + int rv = -1; + + rv = dhcpv6_proxy_set_server ((ip6_address_t *) (&mp->dhcp_server), + (ip6_address_t *) (&mp->dhcp_src_address), + (u32) ntohl (mp->vrf_id), + (int) mp->insert_circuit_id, + (int) (mp->is_add == 0)); + + REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_REPLY); +} + +static void +dhcpv4_proxy_config_2 (vl_api_dhcp_proxy_config_2_t * mp) +{ + vl_api_dhcp_proxy_config_reply_t *rmp; + int rv; + + rv = dhcp_proxy_set_server_2 ((ip4_address_t *) (&mp->dhcp_server), + (ip4_address_t *) (&mp->dhcp_src_address), + (u32) ntohl (mp->rx_vrf_id), + (u32) ntohl (mp->server_vrf_id), + (int) mp->insert_circuit_id, + (int) (mp->is_add == 0)); + + REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_2_REPLY); +} + + +static void +dhcpv6_proxy_config_2 (vl_api_dhcp_proxy_config_2_t * mp) +{ + vl_api_dhcp_proxy_config_reply_t *rmp; + int rv = -1; + + rv = dhcpv6_proxy_set_server_2 ((ip6_address_t *) (&mp->dhcp_server), + (ip6_address_t *) (&mp->dhcp_src_address), + (u32) ntohl (mp->rx_vrf_id), + (u32) ntohl (mp->server_vrf_id), + (int) mp->insert_circuit_id, + (int) (mp->is_add == 0)); + + REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_2_REPLY); +} + + +static void +vl_api_dhcp_proxy_set_vss_t_handler (vl_api_dhcp_proxy_set_vss_t * mp) +{ + vl_api_dhcp_proxy_set_vss_reply_t *rmp; + int rv; + if (!mp->is_ipv6) + rv = dhcp_proxy_set_option82_vss (ntohl (mp->tbl_id), + ntohl (mp->oui), + ntohl (mp->fib_id), + (int) mp->is_add == 0); + else + rv = dhcpv6_proxy_set_vss (ntohl (mp->tbl_id), + ntohl (mp->oui), + ntohl (mp->fib_id), (int) mp->is_add == 0); + + REPLY_MACRO (VL_API_DHCP_PROXY_SET_VSS_REPLY); +} + + +static void vl_api_dhcp_proxy_config_t_handler + (vl_api_dhcp_proxy_config_t * mp) +{ + if (mp->is_ipv6 == 0) + dhcpv4_proxy_config (mp); + else + dhcpv6_proxy_config (mp); +} + +static void vl_api_dhcp_proxy_config_2_t_handler + (vl_api_dhcp_proxy_config_2_t * mp) +{ + if (mp->is_ipv6 == 0) + dhcpv4_proxy_config_2 (mp); + else + dhcpv6_proxy_config_2 (mp); +} + +void +dhcp_compl_event_callback (u32 client_index, u32 pid, u8 * hostname, + u8 is_ipv6, u8 * host_address, u8 * router_address, + u8 * host_mac) +{ + unix_shared_memory_queue_t *q; + vl_api_dhcp_compl_event_t *mp; + + q = vl_api_client_index_to_input_queue (client_index); + if (!q) + return; + + mp = vl_msg_api_alloc (sizeof (*mp)); + mp->client_index = client_index; + mp->pid = pid; + mp->is_ipv6 = is_ipv6; + clib_memcpy (&mp->hostname, hostname, vec_len (hostname)); + mp->hostname[vec_len (hostname) + 1] = '\n'; + clib_memcpy (&mp->host_address[0], host_address, 16); + clib_memcpy (&mp->router_address[0], router_address, 16); + + if (NULL != host_mac) + clib_memcpy (&mp->host_mac[0], host_mac, 6); + + mp->_vl_msg_id = ntohs (VL_API_DHCP_COMPL_EVENT); + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void vl_api_dhcp_client_config_t_handler + (vl_api_dhcp_client_config_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_dhcp_client_config_reply_t *rmp; + int rv = 0; + + VALIDATE_SW_IF_INDEX (mp); + + rv = dhcp_client_config (vm, ntohl (mp->sw_if_index), + mp->hostname, mp->is_add, mp->client_index, + mp->want_dhcp_event ? dhcp_compl_event_callback : + NULL, mp->pid); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_DHCP_CLIENT_CONFIG_REPLY); +} + +static void +vl_api_create_loopback_t_handler (vl_api_create_loopback_t * mp) +{ + vl_api_create_loopback_reply_t *rmp; + u32 sw_if_index; + int rv; + + rv = vnet_create_loopback_interface (&sw_if_index, mp->mac_address); + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_CREATE_LOOPBACK_REPLY, + ({ + rmp->sw_if_index = ntohl (sw_if_index); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_delete_loopback_t_handler (vl_api_delete_loopback_t * mp) +{ + vl_api_delete_loopback_reply_t *rmp; + u32 sw_if_index; + int rv; + + sw_if_index = ntohl (mp->sw_if_index); + rv = vnet_delete_loopback_interface (sw_if_index); + + REPLY_MACRO (VL_API_DELETE_LOOPBACK_REPLY); +} + +static void +vl_api_control_ping_t_handler (vl_api_control_ping_t * mp) +{ + vl_api_control_ping_reply_t *rmp; + int rv = 0; + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_CONTROL_PING_REPLY, + ({ + rmp->vpe_pid = ntohl (getpid()); + })); + /* *INDENT-ON* */ +} + +static void +shmem_cli_output (uword arg, u8 * buffer, uword buffer_bytes) +{ + u8 **shmem_vecp = (u8 **) arg; + u8 *shmem_vec; + void *oldheap; + api_main_t *am = &api_main; + u32 offset; + + shmem_vec = *shmem_vecp; + + offset = vec_len (shmem_vec); + + pthread_mutex_lock (&am->vlib_rp->mutex); + oldheap = svm_push_data_heap (am->vlib_rp); + + vec_validate (shmem_vec, offset + buffer_bytes - 1); + + clib_memcpy (shmem_vec + offset, buffer, buffer_bytes); + + svm_pop_heap (oldheap); + pthread_mutex_unlock (&am->vlib_rp->mutex); + + *shmem_vecp = shmem_vec; +} + + +static void +vl_api_cli_request_t_handler (vl_api_cli_request_t * mp) +{ + vl_api_cli_reply_t *rp; + unix_shared_memory_queue_t *q; + vlib_main_t *vm = vlib_get_main (); + api_main_t *am = &api_main; + unformat_input_t input; + u8 *shmem_vec = 0; + void *oldheap; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + return; + + rp = vl_msg_api_alloc (sizeof (*rp)); + rp->_vl_msg_id = ntohs (VL_API_CLI_REPLY); + rp->context = mp->context; + + unformat_init_vector (&input, (u8 *) (uword) mp->cmd_in_shmem); + + vlib_cli_input (vm, &input, shmem_cli_output, (uword) & shmem_vec); + + pthread_mutex_lock (&am->vlib_rp->mutex); + oldheap = svm_push_data_heap (am->vlib_rp); + + vec_add1 (shmem_vec, 0); + + svm_pop_heap (oldheap); + pthread_mutex_unlock (&am->vlib_rp->mutex); + + rp->reply_in_shmem = (uword) shmem_vec; + + vl_msg_api_send_shmem (q, (u8 *) & rp); +} + +static void +inband_cli_output (uword arg, u8 * buffer, uword buffer_bytes) +{ + u8 **mem_vecp = (u8 **) arg; + u8 *mem_vec = *mem_vecp; + u32 offset = vec_len (mem_vec); + + vec_validate (mem_vec, offset + buffer_bytes - 1); + clib_memcpy (mem_vec + offset, buffer, buffer_bytes); + *mem_vecp = mem_vec; +} + +static void +vl_api_cli_inband_t_handler (vl_api_cli_inband_t * mp) +{ + vl_api_cli_inband_reply_t *rmp; + int rv = 0; + unix_shared_memory_queue_t *q; + vlib_main_t *vm = vlib_get_main (); + unformat_input_t input; + u8 *out_vec = 0; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + return; + + unformat_init_string (&input, (char *) mp->cmd, ntohl (mp->length)); + vlib_cli_input (vm, &input, inband_cli_output, (uword) & out_vec); + + u32 len = vec_len (out_vec); + /* *INDENT-OFF* */ + REPLY_MACRO3(VL_API_CLI_INBAND_REPLY, len, + ({ + rmp->length = htonl (len); + clib_memcpy (rmp->reply, out_vec, len); + })); + /* *INDENT-ON* */ + vec_free (out_vec); +} + +static void +vl_api_set_arp_neighbor_limit_t_handler (vl_api_set_arp_neighbor_limit_t * mp) +{ + int rv; + vl_api_set_arp_neighbor_limit_reply_t *rmp; + vnet_main_t *vnm = vnet_get_main (); + clib_error_t *error; + + vnm->api_errno = 0; + + if (mp->is_ipv6) + error = ip6_set_neighbor_limit (ntohl (mp->arp_neighbor_limit)); + else + error = ip4_set_arp_limit (ntohl (mp->arp_neighbor_limit)); + + if (error) + { + clib_error_report (error); + rv = VNET_API_ERROR_UNSPECIFIED; + } + else + { + rv = vnm->api_errno; + } + + REPLY_MACRO (VL_API_SET_ARP_NEIGHBOR_LIMIT_REPLY); +} + +static void vl_api_sr_tunnel_add_del_t_handler + (vl_api_sr_tunnel_add_del_t * mp) +{ +#if IP6SR == 0 + clib_warning ("unimplemented"); +#else + ip6_sr_add_del_tunnel_args_t _a, *a = &_a; + int rv = 0; + vl_api_sr_tunnel_add_del_reply_t *rmp; + ip6_address_t *segments = 0, *seg; + ip6_address_t *tags = 0, *tag; + ip6_address_t *this_address; + int i; + + if (mp->n_segments == 0) + { + rv = -11; + goto out; + } + + memset (a, 0, sizeof (*a)); + a->src_address = (ip6_address_t *) & mp->src_address; + a->dst_address = (ip6_address_t *) & mp->dst_address; + a->dst_mask_width = mp->dst_mask_width; + a->flags_net_byte_order = mp->flags_net_byte_order; + a->is_del = (mp->is_add == 0); + a->rx_table_id = ntohl (mp->outer_vrf_id); + a->tx_table_id = ntohl (mp->inner_vrf_id); + + a->name = format (0, "%s", mp->name); + if (!(vec_len (a->name))) + a->name = 0; + + a->policy_name = format (0, "%s", mp->policy_name); + if (!(vec_len (a->policy_name))) + a->policy_name = 0; + + /* Yank segments and tags out of the API message */ + this_address = (ip6_address_t *) mp->segs_and_tags; + for (i = 0; i < mp->n_segments; i++) + { + vec_add2 (segments, seg, 1); + clib_memcpy (seg->as_u8, this_address->as_u8, sizeof (*this_address)); + this_address++; + } + for (i = 0; i < mp->n_tags; i++) + { + vec_add2 (tags, tag, 1); + clib_memcpy (tag->as_u8, this_address->as_u8, sizeof (*this_address)); + this_address++; + } + + a->segments = segments; + a->tags = tags; + + rv = ip6_sr_add_del_tunnel (a); + +out: + + REPLY_MACRO (VL_API_SR_TUNNEL_ADD_DEL_REPLY); +#endif +} + +static void vl_api_sr_policy_add_del_t_handler + (vl_api_sr_policy_add_del_t * mp) +{ +#if IP6SR == 0 + clib_warning ("unimplemented"); +#else + ip6_sr_add_del_policy_args_t _a, *a = &_a; + int rv = 0; + vl_api_sr_policy_add_del_reply_t *rmp; + int i; + + memset (a, 0, sizeof (*a)); + a->is_del = (mp->is_add == 0); + + a->name = format (0, "%s", mp->name); + if (!(vec_len (a->name))) + { + rv = VNET_API_ERROR_NO_SUCH_NODE2; + goto out; + } + + if (!(mp->tunnel_names[0])) + { + rv = VNET_API_ERROR_NO_SUCH_NODE2; + goto out; + } + + // start deserializing tunnel_names + int num_tunnels = mp->tunnel_names[0]; //number of tunnels + u8 *deser_tun_names = mp->tunnel_names; + deser_tun_names += 1; //moving along + + u8 *tun_name = 0; + int tun_name_len = 0; + + for (i = 0; i < num_tunnels; i++) + { + tun_name_len = *deser_tun_names; + deser_tun_names += 1; + vec_resize (tun_name, tun_name_len); + memcpy (tun_name, deser_tun_names, tun_name_len); + vec_add1 (a->tunnel_names, tun_name); + deser_tun_names += tun_name_len; + tun_name = 0; + } + + rv = ip6_sr_add_del_policy (a); + +out: + + REPLY_MACRO (VL_API_SR_POLICY_ADD_DEL_REPLY); +#endif +} + +static void vl_api_sr_multicast_map_add_del_t_handler + (vl_api_sr_multicast_map_add_del_t * mp) +{ +#if IP6SR == 0 + clib_warning ("unimplemented"); +#else + ip6_sr_add_del_multicastmap_args_t _a, *a = &_a; + int rv = 0; + vl_api_sr_multicast_map_add_del_reply_t *rmp; + + memset (a, 0, sizeof (*a)); + a->is_del = (mp->is_add == 0); + + a->multicast_address = (ip6_address_t *) & mp->multicast_address; + a->policy_name = format (0, "%s", mp->policy_name); + + if (a->multicast_address == 0) + { + rv = -1; + goto out; + } + + if (!(a->policy_name)) + { + rv = -2; + goto out; + } + +#if DPDK > 0 /* Cannot call replicate without DPDK */ + rv = ip6_sr_add_del_multicastmap (a); +#else + clib_warning ("multicast replication without DPDK not implemented"); + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif /* DPDK */ + +out: + + REPLY_MACRO (VL_API_SR_MULTICAST_MAP_ADD_DEL_REPLY); +#endif +} + +#define foreach_classify_add_del_table_field \ +_(table_index) \ +_(nbuckets) \ +_(memory_size) \ +_(skip_n_vectors) \ +_(match_n_vectors) \ +_(next_table_index) \ +_(miss_next_index) \ +_(current_data_flag) \ +_(current_data_offset) + +static void vl_api_classify_add_del_table_t_handler + (vl_api_classify_add_del_table_t * mp) +{ + vl_api_classify_add_del_table_reply_t *rmp; + vnet_classify_main_t *cm = &vnet_classify_main; + vnet_classify_table_t *t; + int rv; + +#define _(a) u32 a; + foreach_classify_add_del_table_field; +#undef _ + +#define _(a) a = ntohl(mp->a); + foreach_classify_add_del_table_field; +#undef _ + + /* The underlying API fails silently, on purpose, so check here */ + if (mp->is_add == 0) /* delete */ + { + if (pool_is_free_index (cm->tables, table_index)) + { + rv = VNET_API_ERROR_NO_SUCH_TABLE; + goto out; + } + } + else /* add or update */ + { + if (table_index != ~0 && pool_is_free_index (cm->tables, table_index)) + table_index = ~0; + } + + rv = vnet_classify_add_del_table + (cm, mp->mask, nbuckets, memory_size, + skip_n_vectors, match_n_vectors, + next_table_index, miss_next_index, &table_index, + current_data_flag, current_data_offset, mp->is_add, mp->del_chain); + +out: + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_CLASSIFY_ADD_DEL_TABLE_REPLY, + ({ + if (rv == 0 && mp->is_add) + { + t = pool_elt_at_index (cm->tables, table_index); + rmp->skip_n_vectors = ntohl(t->skip_n_vectors); + rmp->match_n_vectors = ntohl(t->match_n_vectors); + rmp->new_table_index = ntohl(table_index); + } + else + { + rmp->skip_n_vectors = ~0; + rmp->match_n_vectors = ~0; + rmp->new_table_index = ~0; + } + })); + /* *INDENT-ON* */ +} + +static void vl_api_classify_add_del_session_t_handler + (vl_api_classify_add_del_session_t * mp) +{ + vnet_classify_main_t *cm = &vnet_classify_main; + vl_api_classify_add_del_session_reply_t *rmp; + int rv; + u32 table_index, hit_next_index, opaque_index, metadata; + i32 advance; + u8 action; + + table_index = ntohl (mp->table_index); + hit_next_index = ntohl (mp->hit_next_index); + opaque_index = ntohl (mp->opaque_index); + advance = ntohl (mp->advance); + action = mp->action; + metadata = ntohl (mp->metadata); + + rv = vnet_classify_add_del_session + (cm, table_index, mp->match, hit_next_index, opaque_index, + advance, action, metadata, mp->is_add); + + REPLY_MACRO (VL_API_CLASSIFY_ADD_DEL_SESSION_REPLY); +} + +static void vl_api_classify_set_interface_ip_table_t_handler + (vl_api_classify_set_interface_ip_table_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_classify_set_interface_ip_table_reply_t *rmp; + int rv; + u32 table_index, sw_if_index; + + table_index = ntohl (mp->table_index); + sw_if_index = ntohl (mp->sw_if_index); + + VALIDATE_SW_IF_INDEX (mp); + + if (mp->is_ipv6) + rv = vnet_set_ip6_classify_intfc (vm, sw_if_index, table_index); + else + rv = vnet_set_ip4_classify_intfc (vm, sw_if_index, table_index); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_CLASSIFY_SET_INTERFACE_IP_TABLE_REPLY); +} + +static void vl_api_classify_set_interface_l2_tables_t_handler + (vl_api_classify_set_interface_l2_tables_t * mp) +{ + vl_api_classify_set_interface_l2_tables_reply_t *rmp; + int rv; + u32 sw_if_index, ip4_table_index, ip6_table_index, other_table_index; + int enable; + + ip4_table_index = ntohl (mp->ip4_table_index); + ip6_table_index = ntohl (mp->ip6_table_index); + other_table_index = ntohl (mp->other_table_index); + sw_if_index = ntohl (mp->sw_if_index); + + VALIDATE_SW_IF_INDEX (mp); + + if (mp->is_input) + rv = vnet_l2_input_classify_set_tables (sw_if_index, ip4_table_index, + ip6_table_index, + other_table_index); + else + rv = vnet_l2_output_classify_set_tables (sw_if_index, ip4_table_index, + ip6_table_index, + other_table_index); + + if (rv == 0) + { + if (ip4_table_index != ~0 || ip6_table_index != ~0 + || other_table_index != ~0) + enable = 1; + else + enable = 0; + + if (mp->is_input) + vnet_l2_input_classify_enable_disable (sw_if_index, enable); + else + vnet_l2_output_classify_enable_disable (sw_if_index, enable); + } + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_CLASSIFY_SET_INTERFACE_L2_TABLES_REPLY); +} + +static void +vl_api_l2_fib_clear_table_t_handler (vl_api_l2_fib_clear_table_t * mp) +{ + int rv = 0; + vl_api_l2_fib_clear_table_reply_t *rmp; + + /* DAW-FIXME: This API should only clear non-static l2fib entries, but + * that is not currently implemented. When that TODO is fixed + * this call should be changed to pass 1 instead of 0. + */ + l2fib_clear_table (0); + + REPLY_MACRO (VL_API_L2_FIB_CLEAR_TABLE_REPLY); +} + +extern void l2_efp_filter_configure (vnet_main_t * vnet_main, + u32 sw_if_index, u32 enable); + +static void +vl_api_l2_interface_efp_filter_t_handler (vl_api_l2_interface_efp_filter_t * + mp) +{ + int rv; + vl_api_l2_interface_efp_filter_reply_t *rmp; + vnet_main_t *vnm = vnet_get_main (); + + // enable/disable the feature + l2_efp_filter_configure (vnm, mp->sw_if_index, mp->enable_disable); + rv = vnm->api_errno; + + REPLY_MACRO (VL_API_L2_INTERFACE_EFP_FILTER_REPLY); +} + +static void + vl_api_l2_interface_vlan_tag_rewrite_t_handler + (vl_api_l2_interface_vlan_tag_rewrite_t * mp) +{ + int rv = 0; + vl_api_l2_interface_vlan_tag_rewrite_reply_t *rmp; + vnet_main_t *vnm = vnet_get_main (); + vlib_main_t *vm = vlib_get_main (); + u32 vtr_op; + + VALIDATE_SW_IF_INDEX (mp); + + vtr_op = ntohl (mp->vtr_op); + + /* The L2 code is unsuspicious */ + switch (vtr_op) + { + case L2_VTR_DISABLED: + case L2_VTR_PUSH_1: + case L2_VTR_PUSH_2: + case L2_VTR_POP_1: + case L2_VTR_POP_2: + case L2_VTR_TRANSLATE_1_1: + case L2_VTR_TRANSLATE_1_2: + case L2_VTR_TRANSLATE_2_1: + case L2_VTR_TRANSLATE_2_2: + break; + + default: + rv = VNET_API_ERROR_INVALID_VALUE; + goto bad_sw_if_index; + } + + rv = l2vtr_configure (vm, vnm, ntohl (mp->sw_if_index), vtr_op, + ntohl (mp->push_dot1q), ntohl (mp->tag1), + ntohl (mp->tag2)); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_L2_INTERFACE_VLAN_TAG_REWRITE_REPLY); +} + +static void +vl_api_create_vhost_user_if_t_handler (vl_api_create_vhost_user_if_t * mp) +{ + int rv = 0; + vl_api_create_vhost_user_if_reply_t *rmp; + u32 sw_if_index = (u32) ~ 0; + vnet_main_t *vnm = vnet_get_main (); + vlib_main_t *vm = vlib_get_main (); + + rv = vhost_user_create_if (vnm, vm, (char *) mp->sock_filename, + mp->is_server, &sw_if_index, (u64) ~ 0, + mp->renumber, ntohl (mp->custom_dev_instance), + (mp->use_custom_mac) ? mp->mac_address : NULL); + + /* Remember an interface tag for the new interface */ + if (rv == 0) + { + /* If a tag was supplied... */ + if (mp->tag[0]) + { + /* Make sure it's a proper C-string */ + mp->tag[ARRAY_LEN (mp->tag) - 1] = 0; + u8 *tag = format (0, "%s%c", mp->tag, 0); + vnet_set_sw_interface_tag (vnm, tag, sw_if_index); + } + } + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_CREATE_VHOST_USER_IF_REPLY, + ({ + rmp->sw_if_index = ntohl (sw_if_index); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_modify_vhost_user_if_t_handler (vl_api_modify_vhost_user_if_t * mp) +{ + int rv = 0; + vl_api_modify_vhost_user_if_reply_t *rmp; + u32 sw_if_index = ntohl (mp->sw_if_index); + + vnet_main_t *vnm = vnet_get_main (); + vlib_main_t *vm = vlib_get_main (); + + rv = vhost_user_modify_if (vnm, vm, (char *) mp->sock_filename, + mp->is_server, sw_if_index, (u64) ~ 0, + mp->renumber, ntohl (mp->custom_dev_instance)); + + REPLY_MACRO (VL_API_MODIFY_VHOST_USER_IF_REPLY); +} + +static void +vl_api_delete_vhost_user_if_t_handler (vl_api_delete_vhost_user_if_t * mp) +{ + int rv = 0; + vl_api_delete_vhost_user_if_reply_t *rmp; + vpe_api_main_t *vam = &vpe_api_main; + u32 sw_if_index = ntohl (mp->sw_if_index); + + vnet_main_t *vnm = vnet_get_main (); + vlib_main_t *vm = vlib_get_main (); + + rv = vhost_user_delete_if (vnm, vm, sw_if_index); + + REPLY_MACRO (VL_API_DELETE_VHOST_USER_IF_REPLY); + if (!rv) + { + unix_shared_memory_queue_t *q = + vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + return; + + vnet_clear_sw_interface_tag (vnm, sw_if_index); + send_sw_interface_flags_deleted (vam, q, sw_if_index); + } +} + +static void + vl_api_sw_interface_vhost_user_details_t_handler + (vl_api_sw_interface_vhost_user_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void +send_sw_interface_vhost_user_details (vpe_api_main_t * am, + unix_shared_memory_queue_t * q, + vhost_user_intf_details_t * vui, + u32 context) +{ + vl_api_sw_interface_vhost_user_details_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_VHOST_USER_DETAILS); + mp->sw_if_index = ntohl (vui->sw_if_index); + mp->virtio_net_hdr_sz = ntohl (vui->virtio_net_hdr_sz); + mp->features = clib_net_to_host_u64 (vui->features); + mp->is_server = vui->is_server; + mp->num_regions = ntohl (vui->num_regions); + mp->sock_errno = ntohl (vui->sock_errno); + mp->context = context; + + strncpy ((char *) mp->sock_filename, + (char *) vui->sock_filename, ARRAY_LEN (mp->sock_filename) - 1); + strncpy ((char *) mp->interface_name, + (char *) vui->if_name, ARRAY_LEN (mp->interface_name) - 1); + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void + vl_api_sw_interface_vhost_user_dump_t_handler + (vl_api_sw_interface_vhost_user_dump_t * mp) +{ + int rv = 0; + vpe_api_main_t *am = &vpe_api_main; + vnet_main_t *vnm = vnet_get_main (); + vlib_main_t *vm = vlib_get_main (); + vhost_user_intf_details_t *ifaces = NULL; + vhost_user_intf_details_t *vuid = NULL; + unix_shared_memory_queue_t *q; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + rv = vhost_user_dump_ifs (vnm, vm, &ifaces); + if (rv) + return; + + vec_foreach (vuid, ifaces) + { + send_sw_interface_vhost_user_details (am, q, vuid, mp->context); + } + vec_free (ifaces); +} + +static void +send_sw_if_l2tpv3_tunnel_details (vpe_api_main_t * am, + unix_shared_memory_queue_t * q, + l2t_session_t * s, + l2t_main_t * lm, u32 context) +{ + vl_api_sw_if_l2tpv3_tunnel_details_t *mp; + u8 *if_name = NULL; + vnet_sw_interface_t *si = NULL; + + si = vnet_get_hw_sw_interface (lm->vnet_main, s->hw_if_index); + + if_name = format (if_name, "%U", + format_vnet_sw_interface_name, lm->vnet_main, si); + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_SW_IF_L2TPV3_TUNNEL_DETAILS); + strncpy ((char *) mp->interface_name, + (char *) if_name, ARRAY_LEN (mp->interface_name) - 1); + mp->sw_if_index = ntohl (si->sw_if_index); + mp->local_session_id = s->local_session_id; + mp->remote_session_id = s->remote_session_id; + mp->local_cookie[0] = s->local_cookie[0]; + mp->local_cookie[1] = s->local_cookie[1]; + mp->remote_cookie = s->remote_cookie; + clib_memcpy (mp->client_address, &s->client_address, + sizeof (s->client_address)); + clib_memcpy (mp->our_address, &s->our_address, sizeof (s->our_address)); + mp->l2_sublayer_present = s->l2_sublayer_present; + mp->context = context; + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + + +static void +vl_api_sw_if_l2tpv3_tunnel_dump_t_handler (vl_api_sw_if_l2tpv3_tunnel_dump_t * + mp) +{ + vpe_api_main_t *am = &vpe_api_main; + l2t_main_t *lm = &l2t_main; + unix_shared_memory_queue_t *q; + l2t_session_t *session; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + /* *INDENT-OFF* */ + pool_foreach (session, lm->sessions, + ({ + send_sw_if_l2tpv3_tunnel_details (am, q, session, lm, mp->context); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_l2_fib_table_entry_t_handler (vl_api_l2_fib_table_entry_t * mp) +{ + clib_warning ("BUG"); +} + +static void +send_l2fib_table_entry (vpe_api_main_t * am, + unix_shared_memory_queue_t * q, + l2fib_entry_key_t * l2fe_key, + l2fib_entry_result_t * l2fe_res, u32 context) +{ + vl_api_l2_fib_table_entry_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_L2_FIB_TABLE_ENTRY); + + mp->bd_id = + ntohl (l2input_main.bd_configs[l2fe_key->fields.bd_index].bd_id); + + mp->mac = l2fib_make_key (l2fe_key->fields.mac, 0); + mp->sw_if_index = ntohl (l2fe_res->fields.sw_if_index); + mp->static_mac = l2fe_res->fields.static_mac; + mp->filter_mac = l2fe_res->fields.filter; + mp->bvi_mac = l2fe_res->fields.bvi; + mp->context = context; + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +vl_api_l2_fib_table_dump_t_handler (vl_api_l2_fib_table_dump_t * mp) +{ + vpe_api_main_t *am = &vpe_api_main; + bd_main_t *bdm = &bd_main; + l2fib_entry_key_t *l2fe_key = NULL; + l2fib_entry_result_t *l2fe_res = NULL; + u32 ni, bd_id = ntohl (mp->bd_id); + u32 bd_index; + unix_shared_memory_queue_t *q; + uword *p; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + /* see l2fib_table_dump: ~0 means "any" */ + if (bd_id == ~0) + bd_index = ~0; + else + { + p = hash_get (bdm->bd_index_by_bd_id, bd_id); + if (p == 0) + return; + + bd_index = p[0]; + } + + l2fib_table_dump (bd_index, &l2fe_key, &l2fe_res); + + vec_foreach_index (ni, l2fe_key) + { + send_l2fib_table_entry (am, q, vec_elt_at_index (l2fe_key, ni), + vec_elt_at_index (l2fe_res, ni), mp->context); + } + vec_free (l2fe_key); + vec_free (l2fe_res); +} + +static void +vl_api_show_version_t_handler (vl_api_show_version_t * mp) +{ + vl_api_show_version_reply_t *rmp; + int rv = 0; + char *vpe_api_get_build_directory (void); + char *vpe_api_get_version (void); + char *vpe_api_get_build_date (void); + + unix_shared_memory_queue_t *q = + vl_api_client_index_to_input_queue (mp->client_index); + + if (!q) + return; + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_SHOW_VERSION_REPLY, + ({ + strncpy ((char *) rmp->program, "vpe", ARRAY_LEN(rmp->program)-1); + strncpy ((char *) rmp->build_directory, vpe_api_get_build_directory(), + ARRAY_LEN(rmp->build_directory)-1); + strncpy ((char *) rmp->version, vpe_api_get_version(), + ARRAY_LEN(rmp->version)-1); + strncpy ((char *) rmp->build_date, vpe_api_get_build_date(), + ARRAY_LEN(rmp->build_date)-1); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_get_node_index_t_handler (vl_api_get_node_index_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_get_node_index_reply_t *rmp; + vlib_node_t *n; + int rv = 0; + u32 node_index = ~0; + + n = vlib_get_node_by_name (vm, mp->node_name); + + if (n == 0) + rv = VNET_API_ERROR_NO_SUCH_NODE; + else + node_index = n->index; + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_GET_NODE_INDEX_REPLY, + ({ + rmp->node_index = ntohl(node_index); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_get_next_index_t_handler (vl_api_get_next_index_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_get_next_index_reply_t *rmp; + vlib_node_t *node, *next_node; + int rv = 0; + u32 next_node_index = ~0, next_index = ~0; + uword *p; + + node = vlib_get_node_by_name (vm, mp->node_name); + + if (node == 0) + { + rv = VNET_API_ERROR_NO_SUCH_NODE; + goto out; + } + + next_node = vlib_get_node_by_name (vm, mp->next_name); + + if (next_node == 0) + { + rv = VNET_API_ERROR_NO_SUCH_NODE2; + goto out; + } + else + next_node_index = next_node->index; + + p = hash_get (node->next_slot_by_node, next_node_index); + + if (p == 0) + { + rv = VNET_API_ERROR_NO_SUCH_ENTRY; + goto out; + } + else + next_index = p[0]; + +out: + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_GET_NEXT_INDEX_REPLY, + ({ + rmp->next_index = ntohl(next_index); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_add_node_next_t_handler (vl_api_add_node_next_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_add_node_next_reply_t *rmp; + vlib_node_t *n, *next; + int rv = 0; + u32 next_index = ~0; + + n = vlib_get_node_by_name (vm, mp->node_name); + + if (n == 0) + { + rv = VNET_API_ERROR_NO_SUCH_NODE; + goto out; + } + + next = vlib_get_node_by_name (vm, mp->next_name); + + if (next == 0) + rv = VNET_API_ERROR_NO_SUCH_NODE2; + else + next_index = vlib_node_add_next (vm, n->index, next->index); + +out: + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_GET_NODE_INDEX_REPLY, + ({ + rmp->next_index = ntohl(next_index); + })); + /* *INDENT-ON* */ +} + +static void vl_api_l2tpv3_create_tunnel_t_handler + (vl_api_l2tpv3_create_tunnel_t * mp) +{ + vl_api_l2tpv3_create_tunnel_reply_t *rmp; + l2t_main_t *lm = &l2t_main; + u32 sw_if_index = (u32) ~ 0; + int rv; + + if (mp->is_ipv6 != 1) + { + rv = VNET_API_ERROR_UNIMPLEMENTED; + goto out; + } + + u32 encap_fib_index; + + if (mp->encap_vrf_id != ~0) + { + uword *p; + ip6_main_t *im = &ip6_main; + if (! + (p = + hash_get (im->fib_index_by_table_id, ntohl (mp->encap_vrf_id)))) + { + rv = VNET_API_ERROR_NO_SUCH_FIB; + goto out; + } + encap_fib_index = p[0]; + } + else + { + encap_fib_index = ~0; + } + + rv = create_l2tpv3_ipv6_tunnel (lm, + (ip6_address_t *) mp->client_address, + (ip6_address_t *) mp->our_address, + ntohl (mp->local_session_id), + ntohl (mp->remote_session_id), + clib_net_to_host_u64 (mp->local_cookie), + clib_net_to_host_u64 (mp->remote_cookie), + mp->l2_sublayer_present, + encap_fib_index, &sw_if_index); + +out: + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_L2TPV3_CREATE_TUNNEL_REPLY, + ({ + rmp->sw_if_index = ntohl (sw_if_index); + })); + /* *INDENT-ON* */ +} + +static void vl_api_l2tpv3_set_tunnel_cookies_t_handler + (vl_api_l2tpv3_set_tunnel_cookies_t * mp) +{ + vl_api_l2tpv3_set_tunnel_cookies_reply_t *rmp; + l2t_main_t *lm = &l2t_main; + int rv; + + VALIDATE_SW_IF_INDEX (mp); + + rv = l2tpv3_set_tunnel_cookies (lm, ntohl (mp->sw_if_index), + clib_net_to_host_u64 (mp->new_local_cookie), + clib_net_to_host_u64 + (mp->new_remote_cookie)); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_L2TPV3_SET_TUNNEL_COOKIES_REPLY); +} + +static void vl_api_l2tpv3_interface_enable_disable_t_handler + (vl_api_l2tpv3_interface_enable_disable_t * mp) +{ + int rv; + vnet_main_t *vnm = vnet_get_main (); + vl_api_l2tpv3_interface_enable_disable_reply_t *rmp; + + VALIDATE_SW_IF_INDEX (mp); + + rv = l2tpv3_interface_enable_disable + (vnm, ntohl (mp->sw_if_index), mp->enable_disable); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_L2TPV3_INTERFACE_ENABLE_DISABLE_REPLY); +} + +static void vl_api_l2tpv3_set_lookup_key_t_handler + (vl_api_l2tpv3_set_lookup_key_t * mp) +{ + int rv = 0; + l2t_main_t *lm = &l2t_main; + vl_api_l2tpv3_set_lookup_key_reply_t *rmp; + + if (mp->key > L2T_LOOKUP_SESSION_ID) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto out; + } + + lm->lookup_type = mp->key; + +out: + REPLY_MACRO (VL_API_L2TPV3_SET_LOOKUP_KEY_REPLY); +} + +static void vl_api_vxlan_add_del_tunnel_t_handler + (vl_api_vxlan_add_del_tunnel_t * mp) +{ + vl_api_vxlan_add_del_tunnel_reply_t *rmp; + int rv = 0; + vnet_vxlan_add_del_tunnel_args_t _a, *a = &_a; + u32 encap_fib_index; + uword *p; + ip4_main_t *im = &ip4_main; + vnet_main_t *vnm = vnet_get_main (); + u32 sw_if_index = ~0; + + p = hash_get (im->fib_index_by_table_id, ntohl (mp->encap_vrf_id)); + if (!p) + { + rv = VNET_API_ERROR_NO_SUCH_FIB; + goto out; + } + encap_fib_index = p[0]; + memset (a, 0, sizeof (*a)); + + a->is_add = mp->is_add; + a->is_ip6 = mp->is_ipv6; + + /* ip addresses sent in network byte order */ + ip46_from_addr_buf (mp->is_ipv6, mp->dst_address, &a->dst); + ip46_from_addr_buf (mp->is_ipv6, mp->src_address, &a->src); + + /* Check src & dst are different */ + if (ip46_address_cmp (&a->dst, &a->src) == 0) + { + rv = VNET_API_ERROR_SAME_SRC_DST; + goto out; + } + a->mcast_sw_if_index = ntohl (mp->mcast_sw_if_index); + if (ip46_address_is_multicast (&a->dst) && + pool_is_free_index (vnm->interface_main.sw_interfaces, + a->mcast_sw_if_index)) + { + rv = VNET_API_ERROR_INVALID_SW_IF_INDEX; + goto out; + } + a->encap_fib_index = encap_fib_index; + a->decap_next_index = ntohl (mp->decap_next_index); + a->vni = ntohl (mp->vni); + rv = vnet_vxlan_add_del_tunnel (a, &sw_if_index); + +out: + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_VXLAN_ADD_DEL_TUNNEL_REPLY, + ({ + rmp->sw_if_index = ntohl (sw_if_index); + })); + /* *INDENT-ON* */ +} + +static void send_vxlan_tunnel_details + (vxlan_tunnel_t * t, unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_vxlan_tunnel_details_t *rmp; + ip4_main_t *im4 = &ip4_main; + ip6_main_t *im6 = &ip6_main; + u8 is_ipv6 = !ip46_address_is_ip4 (&t->dst); + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_VXLAN_TUNNEL_DETAILS); + if (is_ipv6) + { + memcpy (rmp->src_address, t->src.ip6.as_u8, 16); + memcpy (rmp->dst_address, t->dst.ip6.as_u8, 16); + rmp->encap_vrf_id = htonl (im6->fibs[t->encap_fib_index].ft_table_id); + } + else + { + memcpy (rmp->src_address, t->src.ip4.as_u8, 4); + memcpy (rmp->dst_address, t->dst.ip4.as_u8, 4); + rmp->encap_vrf_id = htonl (im4->fibs[t->encap_fib_index].ft_table_id); + } + rmp->mcast_sw_if_index = htonl (t->mcast_sw_if_index); + rmp->vni = htonl (t->vni); + rmp->decap_next_index = htonl (t->decap_next_index); + rmp->sw_if_index = htonl (t->sw_if_index); + rmp->is_ipv6 = is_ipv6; + rmp->context = context; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void vl_api_vxlan_tunnel_dump_t_handler + (vl_api_vxlan_tunnel_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + vxlan_main_t *vxm = &vxlan_main; + vxlan_tunnel_t *t; + u32 sw_if_index; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + sw_if_index = ntohl (mp->sw_if_index); + + if (~0 == sw_if_index) + { + /* *INDENT-OFF* */ + pool_foreach (t, vxm->tunnels, + ({ + send_vxlan_tunnel_details(t, q, mp->context); + })); + /* *INDENT-ON* */ + } + else + { + if ((sw_if_index >= vec_len (vxm->tunnel_index_by_sw_if_index)) || + (~0 == vxm->tunnel_index_by_sw_if_index[sw_if_index])) + { + return; + } + t = &vxm->tunnels[vxm->tunnel_index_by_sw_if_index[sw_if_index]]; + send_vxlan_tunnel_details (t, q, mp->context); + } +} + +static void vl_api_gre_add_del_tunnel_t_handler + (vl_api_gre_add_del_tunnel_t * mp) +{ + vl_api_gre_add_del_tunnel_reply_t *rmp; + int rv = 0; + vnet_gre_add_del_tunnel_args_t _a, *a = &_a; + u32 outer_fib_id; + uword *p; + ip4_main_t *im = &ip4_main; + u32 sw_if_index = ~0; + + p = hash_get (im->fib_index_by_table_id, ntohl (mp->outer_fib_id)); + if (!p) + { + rv = VNET_API_ERROR_NO_SUCH_FIB; + goto out; + } + outer_fib_id = p[0]; + + /* Check src & dst are different */ + if ((mp->is_ipv6 && memcmp (mp->src_address, mp->dst_address, 16) == 0) || + (!mp->is_ipv6 && memcmp (mp->src_address, mp->dst_address, 4) == 0)) + { + rv = VNET_API_ERROR_SAME_SRC_DST; + goto out; + } + memset (a, 0, sizeof (*a)); + + a->is_add = mp->is_add; + a->teb = mp->teb; + + /* ip addresses sent in network byte order */ + clib_memcpy (&(a->src), mp->src_address, 4); + clib_memcpy (&(a->dst), mp->dst_address, 4); + + a->outer_fib_id = outer_fib_id; + rv = vnet_gre_add_del_tunnel (a, &sw_if_index); + +out: + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_GRE_ADD_DEL_TUNNEL_REPLY, + ({ + rmp->sw_if_index = ntohl (sw_if_index); + })); + /* *INDENT-ON* */ +} + +static void send_gre_tunnel_details + (gre_tunnel_t * t, unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_gre_tunnel_details_t *rmp; + ip4_main_t *im = &ip4_main; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_GRE_TUNNEL_DETAILS); + clib_memcpy (rmp->src_address, &(t->tunnel_src), 4); + clib_memcpy (rmp->dst_address, &(t->tunnel_dst), 4); + rmp->outer_fib_id = htonl (im->fibs[t->outer_fib_index].ft_table_id); + rmp->teb = (GRE_TUNNEL_TYPE_TEB == t->type); + rmp->sw_if_index = htonl (t->sw_if_index); + rmp->context = context; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +vl_api_gre_tunnel_dump_t_handler (vl_api_gre_tunnel_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + gre_main_t *gm = &gre_main; + gre_tunnel_t *t; + u32 sw_if_index; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + sw_if_index = ntohl (mp->sw_if_index); + + if (~0 == sw_if_index) + { + /* *INDENT-OFF* */ + pool_foreach (t, gm->tunnels, + ({ + send_gre_tunnel_details(t, q, mp->context); + })); + /* *INDENT-ON* */ + } + else + { + if ((sw_if_index >= vec_len (gm->tunnel_index_by_sw_if_index)) || + (~0 == gm->tunnel_index_by_sw_if_index[sw_if_index])) + { + return; + } + t = &gm->tunnels[gm->tunnel_index_by_sw_if_index[sw_if_index]]; + send_gre_tunnel_details (t, q, mp->context); + } +} + +static void +vl_api_l2_patch_add_del_t_handler (vl_api_l2_patch_add_del_t * mp) +{ + extern int vnet_l2_patch_add_del (u32 rx_sw_if_index, u32 tx_sw_if_index, + int is_add); + vl_api_l2_patch_add_del_reply_t *rmp; + int vnet_l2_patch_add_del (u32 rx_sw_if_index, u32 tx_sw_if_index, + int is_add); + int rv = 0; + + VALIDATE_RX_SW_IF_INDEX (mp); + VALIDATE_TX_SW_IF_INDEX (mp); + + rv = vnet_l2_patch_add_del (ntohl (mp->rx_sw_if_index), + ntohl (mp->tx_sw_if_index), + (int) (mp->is_add != 0)); + + BAD_RX_SW_IF_INDEX_LABEL; + BAD_TX_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_L2_PATCH_ADD_DEL_REPLY); +} + +static void + vl_api_vxlan_gpe_add_del_tunnel_t_handler + (vl_api_vxlan_gpe_add_del_tunnel_t * mp) +{ + vl_api_vxlan_gpe_add_del_tunnel_reply_t *rmp; + int rv = 0; + vnet_vxlan_gpe_add_del_tunnel_args_t _a, *a = &_a; + u32 encap_fib_index, decap_fib_index; + u8 protocol; + uword *p; + ip4_main_t *im = &ip4_main; + u32 sw_if_index = ~0; + + + p = hash_get (im->fib_index_by_table_id, ntohl (mp->encap_vrf_id)); + if (!p) + { + rv = VNET_API_ERROR_NO_SUCH_FIB; + goto out; + } + encap_fib_index = p[0]; + + protocol = mp->protocol; + + /* Interpret decap_vrf_id as an opaque if sending to other-than-ip4-input */ + if (protocol == VXLAN_GPE_INPUT_NEXT_IP4_INPUT) + { + p = hash_get (im->fib_index_by_table_id, ntohl (mp->decap_vrf_id)); + if (!p) + { + rv = VNET_API_ERROR_NO_SUCH_INNER_FIB; + goto out; + } + decap_fib_index = p[0]; + } + else + { + decap_fib_index = ntohl (mp->decap_vrf_id); + } + + /* Check src & dst are different */ + if ((mp->is_ipv6 && memcmp (mp->local, mp->remote, 16) == 0) || + (!mp->is_ipv6 && memcmp (mp->local, mp->remote, 4) == 0)) + { + rv = VNET_API_ERROR_SAME_SRC_DST; + goto out; + } + memset (a, 0, sizeof (*a)); + + a->is_add = mp->is_add; + a->is_ip6 = mp->is_ipv6; + /* ip addresses sent in network byte order */ + if (a->is_ip6) + { + clib_memcpy (&(a->local.ip6), mp->local, 16); + clib_memcpy (&(a->remote.ip6), mp->remote, 16); + } + else + { + clib_memcpy (&(a->local.ip4), mp->local, 4); + clib_memcpy (&(a->remote.ip4), mp->remote, 4); + } + a->encap_fib_index = encap_fib_index; + a->decap_fib_index = decap_fib_index; + a->protocol = protocol; + a->vni = ntohl (mp->vni); + rv = vnet_vxlan_gpe_add_del_tunnel (a, &sw_if_index); + +out: + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_VXLAN_GPE_ADD_DEL_TUNNEL_REPLY, + ({ + rmp->sw_if_index = ntohl (sw_if_index); + })); + /* *INDENT-ON* */ +} + +static void send_vxlan_gpe_tunnel_details + (vxlan_gpe_tunnel_t * t, unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_vxlan_gpe_tunnel_details_t *rmp; + ip4_main_t *im4 = &ip4_main; + ip6_main_t *im6 = &ip6_main; + u8 is_ipv6 = !(t->flags & VXLAN_GPE_TUNNEL_IS_IPV4); + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_VXLAN_GPE_TUNNEL_DETAILS); + if (is_ipv6) + { + memcpy (rmp->local, &(t->local.ip6), 16); + memcpy (rmp->remote, &(t->remote.ip6), 16); + rmp->encap_vrf_id = htonl (im6->fibs[t->encap_fib_index].ft_table_id); + rmp->decap_vrf_id = htonl (im6->fibs[t->decap_fib_index].ft_table_id); + } + else + { + memcpy (rmp->local, &(t->local.ip4), 4); + memcpy (rmp->remote, &(t->remote.ip4), 4); + rmp->encap_vrf_id = htonl (im4->fibs[t->encap_fib_index].ft_table_id); + rmp->decap_vrf_id = htonl (im4->fibs[t->decap_fib_index].ft_table_id); + } + rmp->vni = htonl (t->vni); + rmp->protocol = t->protocol; + rmp->sw_if_index = htonl (t->sw_if_index); + rmp->is_ipv6 = is_ipv6; + rmp->context = context; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void vl_api_vxlan_gpe_tunnel_dump_t_handler + (vl_api_vxlan_gpe_tunnel_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + vxlan_gpe_main_t *vgm = &vxlan_gpe_main; + vxlan_gpe_tunnel_t *t; + u32 sw_if_index; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + sw_if_index = ntohl (mp->sw_if_index); + + if (~0 == sw_if_index) + { + /* *INDENT-OFF* */ + pool_foreach (t, vgm->tunnels, + ({ + send_vxlan_gpe_tunnel_details(t, q, mp->context); + })); + /* *INDENT-ON* */ + } + else + { + if ((sw_if_index >= vec_len (vgm->tunnel_index_by_sw_if_index)) || + (~0 == vgm->tunnel_index_by_sw_if_index[sw_if_index])) + { + return; + } + t = &vgm->tunnels[vgm->tunnel_index_by_sw_if_index[sw_if_index]]; + send_vxlan_gpe_tunnel_details (t, q, mp->context); + } +} + +/** Used for transferring locators via VPP API */ +/* *INDENT-OFF* */ +typedef CLIB_PACKED (struct { + u32 sw_if_index; /**< locator sw_if_index */ + u8 priority; /**< locator priority */ + u8 weight; /**< locator weight */ +}) ls_locator_t; +/* *INDENT-ON* */ + +static void +vl_api_lisp_add_del_locator_set_t_handler (vl_api_lisp_add_del_locator_set_t * + mp) +{ + vl_api_lisp_add_del_locator_set_reply_t *rmp; + int rv = 0; + vnet_lisp_add_del_locator_set_args_t _a, *a = &_a; + locator_t locator; + ls_locator_t *ls_loc; + u32 ls_index = ~0, locator_num; + u8 *locator_name = NULL; + int i; + + memset (a, 0, sizeof (a[0])); + + locator_name = format (0, "%s", mp->locator_set_name); + + a->name = locator_name; + a->is_add = mp->is_add; + a->local = 1; + locator_num = clib_net_to_host_u32 (mp->locator_num); + + memset (&locator, 0, sizeof (locator)); + for (i = 0; i < locator_num; i++) + { + ls_loc = &((ls_locator_t *) mp->locators)[i]; + VALIDATE_SW_IF_INDEX (ls_loc); + + locator.sw_if_index = htonl (ls_loc->sw_if_index); + locator.priority = ls_loc->priority; + locator.weight = ls_loc->weight; + locator.local = 1; + vec_add1 (a->locators, locator); + } + + rv = vnet_lisp_add_del_locator_set (a, &ls_index); + + BAD_SW_IF_INDEX_LABEL; + + vec_free (locator_name); + vec_free (a->locators); + + /* *INDENT-OFF* */ + REPLY_MACRO2 (VL_API_LISP_ADD_DEL_LOCATOR_SET_REPLY, + ({ + rmp->ls_index = clib_host_to_net_u32 (ls_index); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_lisp_add_del_locator_t_handler (vl_api_lisp_add_del_locator_t * mp) +{ + vl_api_lisp_add_del_locator_reply_t *rmp; + int rv = 0; + locator_t locator, *locators = NULL; + vnet_lisp_add_del_locator_set_args_t _a, *a = &_a; + u32 ls_index = ~0; + u8 *locator_name = NULL; + + memset (&locator, 0, sizeof (locator)); + memset (a, 0, sizeof (a[0])); + + locator.sw_if_index = ntohl (mp->sw_if_index); + locator.priority = mp->priority; + locator.weight = mp->weight; + locator.local = 1; + vec_add1 (locators, locator); + + locator_name = format (0, "%s", mp->locator_set_name); + + a->name = locator_name; + a->locators = locators; + a->is_add = mp->is_add; + a->local = 1; + + rv = vnet_lisp_add_del_locator (a, NULL, &ls_index); + + vec_free (locators); + vec_free (locator_name); + + REPLY_MACRO (VL_API_LISP_ADD_DEL_LOCATOR_REPLY); +} + +static int +unformat_lisp_eid_api (gid_address_t * dst, u32 vni, u8 type, void *src, + u8 len) +{ + switch (type) + { + case 0: /* ipv4 */ + gid_address_type (dst) = GID_ADDR_IP_PREFIX; + gid_address_ip_set (dst, src, IP4); + gid_address_ippref_len (dst) = len; + ip_prefix_normalize (&gid_address_ippref (dst)); + break; + case 1: /* ipv6 */ + gid_address_type (dst) = GID_ADDR_IP_PREFIX; + gid_address_ip_set (dst, src, IP6); + gid_address_ippref_len (dst) = len; + ip_prefix_normalize (&gid_address_ippref (dst)); + break; + case 2: /* l2 mac */ + gid_address_type (dst) = GID_ADDR_MAC; + clib_memcpy (&gid_address_mac (dst), src, 6); + break; + default: + /* unknown type */ + return VNET_API_ERROR_INVALID_VALUE; + } + + gid_address_vni (dst) = vni; + + return 0; +} + +static void +vl_api_lisp_add_del_local_eid_t_handler (vl_api_lisp_add_del_local_eid_t * mp) +{ + vl_api_lisp_add_del_local_eid_reply_t *rmp; + lisp_cp_main_t *lcm = vnet_lisp_cp_get_main (); + int rv = 0; + gid_address_t _eid, *eid = &_eid; + uword *p = NULL; + u32 locator_set_index = ~0, map_index = ~0; + vnet_lisp_add_del_mapping_args_t _a, *a = &_a; + u8 *name = NULL, *key = NULL; + memset (a, 0, sizeof (a[0])); + memset (eid, 0, sizeof (eid[0])); + + rv = unformat_lisp_eid_api (eid, clib_net_to_host_u32 (mp->vni), + mp->eid_type, mp->eid, mp->prefix_len); + if (rv) + goto out; + + name = format (0, "%s", mp->locator_set_name); + p = hash_get_mem (lcm->locator_set_index_by_name, name); + if (!p) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto out; + } + locator_set_index = p[0]; + + if (*mp->key) + key = format (0, "%s", mp->key); + + /* XXX treat batch configuration */ + a->is_add = mp->is_add; + gid_address_copy (&a->eid, eid); + a->locator_set_index = locator_set_index; + a->local = 1; + a->key = key; + a->key_id = clib_net_to_host_u16 (mp->key_id); + + rv = vnet_lisp_add_del_local_mapping (a, &map_index); + +out: + vec_free (name); + vec_free (key); + gid_address_free (&a->eid); + + REPLY_MACRO (VL_API_LISP_ADD_DEL_LOCAL_EID_REPLY); +} + +static void + vl_api_lisp_eid_table_add_del_map_t_handler + (vl_api_lisp_eid_table_add_del_map_t * mp) +{ + vl_api_lisp_eid_table_add_del_map_reply_t *rmp; + int rv = 0; + rv = vnet_lisp_eid_table_map (clib_net_to_host_u32 (mp->vni), + clib_net_to_host_u32 (mp->dp_table), + mp->is_l2, mp->is_add); +REPLY_MACRO (VL_API_LISP_EID_TABLE_ADD_DEL_MAP_REPLY)} + +/** Used for transferring locators via VPP API */ +/* *INDENT-OFF* */ +typedef CLIB_PACKED (struct { + u8 is_ip4; /**< is locator an IPv4 address */ + u8 priority; /**< locator priority */ + u8 weight; /**< locator weight */ + u8 addr[16]; /**< IPv4/IPv6 address */ +}) rloc_t; +/* *INDENT-ON* */ + +static locator_pair_t * +unformat_lisp_loc_pairs (void *lcl_locs, void *rmt_locs, u32 rloc_num) +{ + u32 i; + locator_pair_t *pairs = 0, pair; + rloc_t *r; + + for (i = 0; i < rloc_num; i++) + { + /* local locator */ + r = &((rloc_t *) lcl_locs)[i]; + memset (&pair.lcl_loc, 0, sizeof (pair.lcl_loc)); + ip_address_set (&pair.lcl_loc, &r->addr, r->is_ip4 ? IP4 : IP6); + + /* remote locators */ + r = &((rloc_t *) rmt_locs)[i]; + memset (&pair.rmt_loc, 0, sizeof (pair.rmt_loc)); + ip_address_set (&pair.rmt_loc, &r->addr, r->is_ip4 ? IP4 : IP6); + + pair.priority = r->priority; + pair.weight = r->weight; + + vec_add1 (pairs, pair); + } + return pairs; +} + +static locator_t * +unformat_lisp_locs (void *rmt_locs, u32 rloc_num) +{ + u32 i; + locator_t *locs = 0, loc; + rloc_t *r; + + for (i = 0; i < rloc_num; i++) + { + /* remote locators */ + r = &((rloc_t *) rmt_locs)[i]; + memset (&loc, 0, sizeof (loc)); + gid_address_ip_set (&loc.address, &r->addr, r->is_ip4 ? IP4 : IP6); + + loc.priority = r->priority; + loc.weight = r->weight; + + vec_add1 (locs, loc); + } + return locs; +} + +static void + vl_api_lisp_gpe_add_del_fwd_entry_t_handler + (vl_api_lisp_gpe_add_del_fwd_entry_t * mp) +{ + vl_api_lisp_gpe_add_del_fwd_entry_reply_t *rmp; + vnet_lisp_gpe_add_del_fwd_entry_args_t _a, *a = &_a; + locator_pair_t *pairs = 0; + int rv = 0; + + memset (a, 0, sizeof (a[0])); + + rv = unformat_lisp_eid_api (&a->rmt_eid, mp->vni, mp->eid_type, + mp->rmt_eid, mp->rmt_len); + rv |= unformat_lisp_eid_api (&a->lcl_eid, mp->vni, mp->eid_type, + mp->lcl_eid, mp->lcl_len); + + pairs = unformat_lisp_loc_pairs (mp->lcl_locs, mp->rmt_locs, mp->loc_num); + + if (rv || 0 == pairs) + goto send_reply; + + a->is_add = mp->is_add; + a->locator_pairs = pairs; + a->dp_table = mp->dp_table; + a->vni = mp->vni; + a->action = mp->action; + + rv = vnet_lisp_gpe_add_del_fwd_entry (a, 0); + vec_free (pairs); +send_reply: + REPLY_MACRO (VL_API_LISP_GPE_ADD_DEL_FWD_ENTRY_REPLY); +} + +static void +vl_api_lisp_add_del_map_server_t_handler (vl_api_lisp_add_del_map_server_t + * mp) +{ + vl_api_lisp_add_del_map_server_reply_t *rmp; + int rv = 0; + ip_address_t addr; + + memset (&addr, 0, sizeof (addr)); + + ip_address_set (&addr, mp->ip_address, mp->is_ipv6 ? IP6 : IP4); + rv = vnet_lisp_add_del_map_server (&addr, mp->is_add); + + REPLY_MACRO (VL_API_LISP_ADD_DEL_MAP_SERVER_REPLY); +} + +static void +vl_api_lisp_add_del_map_resolver_t_handler (vl_api_lisp_add_del_map_resolver_t + * mp) +{ + vl_api_lisp_add_del_map_resolver_reply_t *rmp; + int rv = 0; + vnet_lisp_add_del_map_resolver_args_t _a, *a = &_a; + + memset (a, 0, sizeof (a[0])); + + a->is_add = mp->is_add; + ip_address_set (&a->address, mp->ip_address, mp->is_ipv6 ? IP6 : IP4); + + rv = vnet_lisp_add_del_map_resolver (a); + + REPLY_MACRO (VL_API_LISP_ADD_DEL_MAP_RESOLVER_REPLY); +} + +static void +vl_api_lisp_gpe_enable_disable_t_handler (vl_api_lisp_gpe_enable_disable_t * + mp) +{ + vl_api_lisp_gpe_enable_disable_reply_t *rmp; + int rv = 0; + vnet_lisp_gpe_enable_disable_args_t _a, *a = &_a; + + a->is_en = mp->is_en; + vnet_lisp_gpe_enable_disable (a); + + REPLY_MACRO (VL_API_LISP_GPE_ENABLE_DISABLE_REPLY); +} + +static void + vl_api_lisp_map_register_enable_disable_t_handler + (vl_api_lisp_map_register_enable_disable_t * mp) +{ + vl_api_lisp_map_register_enable_disable_reply_t *rmp; + int rv = 0; + + vnet_lisp_map_register_enable_disable (mp->is_enabled); + REPLY_MACRO (VL_API_LISP_ENABLE_DISABLE_REPLY); +} + +static void + vl_api_lisp_rloc_probe_enable_disable_t_handler + (vl_api_lisp_rloc_probe_enable_disable_t * mp) +{ + vl_api_lisp_rloc_probe_enable_disable_reply_t *rmp; + int rv = 0; + + vnet_lisp_rloc_probe_enable_disable (mp->is_enabled); + REPLY_MACRO (VL_API_LISP_ENABLE_DISABLE_REPLY); +} + +static void +vl_api_lisp_enable_disable_t_handler (vl_api_lisp_enable_disable_t * mp) +{ + vl_api_lisp_enable_disable_reply_t *rmp; + int rv = 0; + + vnet_lisp_enable_disable (mp->is_en); + REPLY_MACRO (VL_API_LISP_ENABLE_DISABLE_REPLY); +} + +static void +vl_api_lisp_gpe_add_del_iface_t_handler (vl_api_lisp_gpe_add_del_iface_t * mp) +{ + vl_api_lisp_gpe_add_del_iface_reply_t *rmp; + int rv = 0; + + if (mp->is_l2) + { + if (mp->is_add) + { + if (~0 == + lisp_gpe_tenant_l2_iface_add_or_lock (mp->vni, mp->dp_table)) + rv = 1; + } + else + lisp_gpe_tenant_l2_iface_unlock (mp->vni); + } + else + { + if (mp->is_add) + { + if (~0 == + lisp_gpe_tenant_l3_iface_add_or_lock (mp->vni, mp->dp_table)) + rv = 1; + } + else + lisp_gpe_tenant_l3_iface_unlock (mp->vni); + } + + REPLY_MACRO (VL_API_LISP_GPE_ADD_DEL_IFACE_REPLY); +} + +static void + vl_api_show_lisp_map_request_mode_t_handler + (vl_api_show_lisp_map_request_mode_t * mp) +{ + int rv = 0; + vl_api_show_lisp_map_request_mode_reply_t *rmp; + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_SHOW_LISP_MAP_REQUEST_MODE_REPLY, + ({ + rmp->mode = vnet_lisp_get_map_request_mode (); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_lisp_map_request_mode_t_handler (vl_api_lisp_map_request_mode_t * mp) +{ + vl_api_lisp_map_request_mode_reply_t *rmp; + int rv = 0; + + rv = vnet_lisp_set_map_request_mode (mp->mode); + + REPLY_MACRO (VL_API_LISP_MAP_REQUEST_MODE_REPLY); +} + +static void +vl_api_lisp_pitr_set_locator_set_t_handler (vl_api_lisp_pitr_set_locator_set_t + * mp) +{ + vl_api_lisp_pitr_set_locator_set_reply_t *rmp; + int rv = 0; + u8 *ls_name = 0; + + ls_name = format (0, "%s", mp->ls_name); + rv = vnet_lisp_pitr_set_locator_set (ls_name, mp->is_add); + vec_free (ls_name); + + REPLY_MACRO (VL_API_LISP_PITR_SET_LOCATOR_SET_REPLY); +} + +static void + vl_api_lisp_add_del_map_request_itr_rlocs_t_handler + (vl_api_lisp_add_del_map_request_itr_rlocs_t * mp) +{ + vl_api_lisp_add_del_map_request_itr_rlocs_reply_t *rmp; + int rv = 0; + u8 *locator_set_name = NULL; + vnet_lisp_add_del_mreq_itr_rloc_args_t _a, *a = &_a; + + locator_set_name = format (0, "%s", mp->locator_set_name); + + a->is_add = mp->is_add; + a->locator_set_name = locator_set_name; + + rv = vnet_lisp_add_del_mreq_itr_rlocs (a); + + vec_free (locator_set_name); + + REPLY_MACRO (VL_API_LISP_ADD_DEL_MAP_REQUEST_ITR_RLOCS_REPLY); +} + +static void + vl_api_lisp_add_del_remote_mapping_t_handler + (vl_api_lisp_add_del_remote_mapping_t * mp) +{ + locator_t *rlocs = 0; + vl_api_lisp_add_del_remote_mapping_reply_t *rmp; + int rv = 0; + gid_address_t _eid, *eid = &_eid; + u32 rloc_num = clib_net_to_host_u32 (mp->rloc_num); + + memset (eid, 0, sizeof (eid[0])); + + rv = unformat_lisp_eid_api (eid, clib_net_to_host_u32 (mp->vni), + mp->eid_type, mp->eid, mp->eid_len); + if (rv) + goto send_reply; + + rlocs = unformat_lisp_locs (mp->rlocs, rloc_num); + + if (!mp->is_add) + { + vnet_lisp_add_del_adjacency_args_t _a, *a = &_a; + gid_address_copy (&a->reid, eid); + a->is_add = 0; + rv = vnet_lisp_add_del_adjacency (a); + if (rv) + { + goto out; + } + } + + /* NOTE: for now this works as a static remote mapping, i.e., + * not authoritative and ttl infinite. */ + rv = vnet_lisp_add_del_mapping (eid, rlocs, mp->action, 0, ~0, + mp->is_add, 1 /* is_static */ , 0); + + if (mp->del_all) + vnet_lisp_clear_all_remote_adjacencies (); + +out: + vec_free (rlocs); +send_reply: + REPLY_MACRO (VL_API_LISP_ADD_DEL_REMOTE_MAPPING_REPLY); +} + +static void +vl_api_lisp_add_del_adjacency_t_handler (vl_api_lisp_add_del_adjacency_t * mp) +{ + vl_api_lisp_add_del_adjacency_reply_t *rmp; + vnet_lisp_add_del_adjacency_args_t _a, *a = &_a; + + int rv = 0; + memset (a, 0, sizeof (a[0])); + + rv = unformat_lisp_eid_api (&a->leid, clib_net_to_host_u32 (mp->vni), + mp->eid_type, mp->leid, mp->leid_len); + rv |= unformat_lisp_eid_api (&a->reid, clib_net_to_host_u32 (mp->vni), + mp->eid_type, mp->reid, mp->reid_len); + + if (rv) + goto send_reply; + + a->is_add = mp->is_add; + rv = vnet_lisp_add_del_adjacency (a); + +send_reply: + REPLY_MACRO (VL_API_LISP_ADD_DEL_ADJACENCY_REPLY); +} + +static void +send_lisp_locator_details (lisp_cp_main_t * lcm, + locator_t * loc, + unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_lisp_locator_details_t *rmp; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_LISP_LOCATOR_DETAILS); + rmp->context = context; + + rmp->local = loc->local; + if (loc->local) + { + rmp->sw_if_index = ntohl (loc->sw_if_index); + } + else + { + rmp->is_ipv6 = gid_address_ip_version (&loc->address); + ip_address_copy_addr (rmp->ip_address, &gid_address_ip (&loc->address)); + } + rmp->priority = loc->priority; + rmp->weight = loc->weight; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +vl_api_lisp_locator_dump_t_handler (vl_api_lisp_locator_dump_t * mp) +{ + u8 *ls_name = 0; + unix_shared_memory_queue_t *q = 0; + lisp_cp_main_t *lcm = vnet_lisp_cp_get_main (); + locator_set_t *lsit = 0; + locator_t *loc = 0; + u32 ls_index = ~0, *locit = 0; + uword *p = 0; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + if (mp->is_index_set) + ls_index = htonl (mp->ls_index); + else + { + /* make sure we get a proper C-string */ + mp->ls_name[sizeof (mp->ls_name) - 1] = 0; + ls_name = format (0, "%s", mp->ls_name); + p = hash_get_mem (lcm->locator_set_index_by_name, ls_name); + if (!p) + goto out; + ls_index = p[0]; + } + + if (pool_is_free_index (lcm->locator_set_pool, ls_index)) + return; + + lsit = pool_elt_at_index (lcm->locator_set_pool, ls_index); + + vec_foreach (locit, lsit->locator_indices) + { + loc = pool_elt_at_index (lcm->locator_pool, locit[0]); + send_lisp_locator_details (lcm, loc, q, mp->context); + }; +out: + vec_free (ls_name); +} + +static void +send_lisp_locator_set_details (lisp_cp_main_t * lcm, + locator_set_t * lsit, + unix_shared_memory_queue_t * q, + u32 context, u32 ls_index) +{ + vl_api_lisp_locator_set_details_t *rmp; + u8 *str = 0; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_LISP_LOCATOR_SET_DETAILS); + rmp->context = context; + + rmp->ls_index = htonl (ls_index); + if (lsit->local) + { + ASSERT (lsit->name != NULL); + strncpy ((char *) rmp->ls_name, (char *) lsit->name, + vec_len (lsit->name)); + } + else + { + str = format (0, "", ls_index); + strncpy ((char *) rmp->ls_name, (char *) str, vec_len (str)); + vec_free (str); + } + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +vl_api_lisp_locator_set_dump_t_handler (vl_api_lisp_locator_set_dump_t * mp) +{ + unix_shared_memory_queue_t *q = NULL; + lisp_cp_main_t *lcm = vnet_lisp_cp_get_main (); + locator_set_t *lsit = NULL; + u8 filter; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + filter = mp->filter; + /* *INDENT-OFF* */ + pool_foreach (lsit, lcm->locator_set_pool, + ({ + if (filter && !((1 == filter && lsit->local) || + (2 == filter && !lsit->local))) + { + continue; + } + send_lisp_locator_set_details (lcm, lsit, q, mp->context, + lsit - lcm->locator_set_pool); + })); + /* *INDENT-ON* */ +} + +static void +lisp_fid_put_api (u8 * dst, fid_address_t * src, u8 * prefix_length) +{ + ASSERT (prefix_length); + ip_prefix_t *ippref = &fid_addr_ippref (src); + + switch (fid_addr_type (src)) + { + case FID_ADDR_IP_PREF: + if (ip_prefix_version (ippref) == IP4) + clib_memcpy (dst, &ip_prefix_v4 (ippref), 4); + else + clib_memcpy (dst, &ip_prefix_v6 (ippref), 16); + prefix_length[0] = ip_prefix_len (ippref); + break; + + case FID_ADDR_MAC: + prefix_length[0] = 0; + clib_memcpy (dst, fid_addr_mac (src), 6); + break; + + default: + clib_warning ("Unknown FID type %d!", fid_addr_type (src)); + break; + } +} + +static u8 +fid_type_to_api_type (fid_address_t * fid) +{ + ip_prefix_t *ippref; + + switch (fid_addr_type (fid)) + { + case FID_ADDR_IP_PREF: + ippref = &fid_addr_ippref (fid); + if (ip_prefix_version (ippref) == IP4) + return 0; + else if (ip_prefix_version (ippref) == IP6) + return 1; + else + return ~0; + + case FID_ADDR_MAC: + return 2; + } + + return ~0; +} + +static void +send_lisp_eid_table_details (mapping_t * mapit, + unix_shared_memory_queue_t * q, + u32 context, u8 filter) +{ + fid_address_t *fid; + lisp_cp_main_t *lcm = vnet_lisp_cp_get_main (); + locator_set_t *ls = 0; + vl_api_lisp_eid_table_details_t *rmp = NULL; + gid_address_t *gid = NULL; + u8 *mac = 0; + ip_prefix_t *ip_prefix = NULL; + + switch (filter) + { + case 0: /* all mappings */ + break; + + case 1: /* local only */ + if (!mapit->local) + return; + break; + case 2: /* remote only */ + if (mapit->local) + return; + break; + default: + clib_warning ("Filter error, unknown filter: %d", filter); + return; + } + + gid = &mapit->eid; + ip_prefix = &gid_address_ippref (gid); + mac = gid_address_mac (gid); + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_LISP_EID_TABLE_DETAILS); + + ls = pool_elt_at_index (lcm->locator_set_pool, mapit->locator_set_index); + if (vec_len (ls->locator_indices) == 0) + rmp->locator_set_index = ~0; + else + rmp->locator_set_index = clib_host_to_net_u32 (mapit->locator_set_index); + + rmp->is_local = mapit->local; + rmp->ttl = clib_host_to_net_u32 (mapit->ttl); + rmp->action = mapit->action; + rmp->authoritative = mapit->authoritative; + + switch (gid_address_type (gid)) + { + case GID_ADDR_SRC_DST: + rmp->is_src_dst = 1; + fid = &gid_address_sd_src (gid); + rmp->eid_type = fid_type_to_api_type (fid); + lisp_fid_put_api (rmp->seid, &gid_address_sd_src (gid), + &rmp->seid_prefix_len); + lisp_fid_put_api (rmp->eid, &gid_address_sd_dst (gid), + &rmp->eid_prefix_len); + break; + case GID_ADDR_IP_PREFIX: + rmp->eid_prefix_len = ip_prefix_len (ip_prefix); + if (ip_prefix_version (ip_prefix) == IP4) + { + rmp->eid_type = 0; /* ipv4 type */ + clib_memcpy (rmp->eid, &ip_prefix_v4 (ip_prefix), + sizeof (ip_prefix_v4 (ip_prefix))); + } + else + { + rmp->eid_type = 1; /* ipv6 type */ + clib_memcpy (rmp->eid, &ip_prefix_v6 (ip_prefix), + sizeof (ip_prefix_v6 (ip_prefix))); + } + break; + case GID_ADDR_MAC: + rmp->eid_type = 2; /* l2 mac type */ + clib_memcpy (rmp->eid, mac, 6); + break; + default: + ASSERT (0); + } + rmp->context = context; + rmp->vni = clib_host_to_net_u32 (gid_address_vni (gid)); + rmp->key_id = clib_host_to_net_u16 (mapit->key_id); + memcpy (rmp->key, mapit->key, vec_len (mapit->key)); + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +vl_api_lisp_eid_table_dump_t_handler (vl_api_lisp_eid_table_dump_t * mp) +{ + u32 mi; + unix_shared_memory_queue_t *q = NULL; + lisp_cp_main_t *lcm = vnet_lisp_cp_get_main (); + mapping_t *mapit = NULL; + gid_address_t _eid, *eid = &_eid; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + if (mp->eid_set) + { + memset (eid, 0, sizeof (*eid)); + + unformat_lisp_eid_api (eid, clib_net_to_host_u32 (mp->vni), + mp->eid_type, mp->eid, mp->prefix_length); + + mi = gid_dictionary_lookup (&lcm->mapping_index_by_gid, eid); + if ((u32) ~ 0 == mi) + return; + + mapit = pool_elt_at_index (lcm->mapping_pool, mi); + send_lisp_eid_table_details (mapit, q, mp->context, + 0 /* ignore filter */ ); + } + else + { + /* *INDENT-OFF* */ + pool_foreach (mapit, lcm->mapping_pool, + ({ + send_lisp_eid_table_details(mapit, q, mp->context, + mp->filter); + })); + /* *INDENT-ON* */ + } +} + +static void +send_lisp_gpe_fwd_entry_details (lisp_gpe_fwd_entry_t * lfe, + unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_lisp_gpe_tunnel_details_t *rmp; + lisp_gpe_main_t *lgm = &lisp_gpe_main; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_LISP_GPE_TUNNEL_DETAILS); + + rmp->tunnels = lfe - lgm->lisp_fwd_entry_pool; + + rmp->is_ipv6 = ip_prefix_version (&(lfe->key->rmt.ippref)) == IP6 ? 1 : 0; + ip_address_copy_addr (rmp->source_ip, + &ip_prefix_addr (&(lfe->key->rmt.ippref))); + ip_address_copy_addr (rmp->destination_ip, + &ip_prefix_addr (&(lfe->key->rmt.ippref))); + + rmp->encap_fib_id = htonl (0); + rmp->decap_fib_id = htonl (lfe->eid_fib_index); + rmp->iid = htonl (lfe->key->vni); + rmp->context = context; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +vl_api_lisp_gpe_tunnel_dump_t_handler (vl_api_lisp_gpe_tunnel_dump_t * mp) +{ + unix_shared_memory_queue_t *q = NULL; + lisp_gpe_main_t *lgm = &lisp_gpe_main; + lisp_gpe_fwd_entry_t *lfe = NULL; + + if (pool_elts (lgm->lisp_fwd_entry_pool) == 0) + { + return; + } + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + /* *INDENT-OFF* */ + pool_foreach(lfe, lgm->lisp_fwd_entry_pool, + ({ + send_lisp_gpe_fwd_entry_details(lfe, q, mp->context); + })); + /* *INDENT-ON* */ +} + +static void +send_lisp_map_server_details (ip_address_t * ip, + unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_lisp_map_server_details_t *rmp = NULL; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_LISP_MAP_SERVER_DETAILS); + + switch (ip_addr_version (ip)) + { + case IP4: + rmp->is_ipv6 = 0; + clib_memcpy (rmp->ip_address, &ip_addr_v4 (ip), + sizeof (ip_addr_v4 (ip))); + break; + + case IP6: + rmp->is_ipv6 = 1; + clib_memcpy (rmp->ip_address, &ip_addr_v6 (ip), + sizeof (ip_addr_v6 (ip))); + break; + + default: + ASSERT (0); + } + rmp->context = context; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +vl_api_lisp_map_server_dump_t_handler (vl_api_lisp_map_server_dump_t * mp) +{ + unix_shared_memory_queue_t *q = NULL; + lisp_cp_main_t *lcm = vnet_lisp_cp_get_main (); + lisp_msmr_t *mr; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + vec_foreach (mr, lcm->map_servers) + { + send_lisp_map_server_details (&mr->address, q, mp->context); + } +} + +static void +send_lisp_map_resolver_details (ip_address_t * ip, + unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_lisp_map_resolver_details_t *rmp = NULL; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_LISP_MAP_RESOLVER_DETAILS); + + switch (ip_addr_version (ip)) + { + case IP4: + rmp->is_ipv6 = 0; + clib_memcpy (rmp->ip_address, &ip_addr_v4 (ip), + sizeof (ip_addr_v4 (ip))); + break; + + case IP6: + rmp->is_ipv6 = 1; + clib_memcpy (rmp->ip_address, &ip_addr_v6 (ip), + sizeof (ip_addr_v6 (ip))); + break; + + default: + ASSERT (0); + } + rmp->context = context; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +vl_api_lisp_map_resolver_dump_t_handler (vl_api_lisp_map_resolver_dump_t * mp) +{ + unix_shared_memory_queue_t *q = NULL; + lisp_cp_main_t *lcm = vnet_lisp_cp_get_main (); + lisp_msmr_t *mr; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + vec_foreach (mr, lcm->map_resolvers) + { + send_lisp_map_resolver_details (&mr->address, q, mp->context); + } +} + +static void +send_eid_table_map_pair (hash_pair_t * p, + unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_lisp_eid_table_map_details_t *rmp = NULL; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_LISP_EID_TABLE_MAP_DETAILS); + + rmp->vni = clib_host_to_net_u32 (p->key); + rmp->dp_table = clib_host_to_net_u32 (p->value[0]); + rmp->context = context; + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +vl_api_lisp_eid_table_map_dump_t_handler (vl_api_lisp_eid_table_map_dump_t * + mp) +{ + unix_shared_memory_queue_t *q = NULL; + lisp_cp_main_t *lcm = vnet_lisp_cp_get_main (); + hash_pair_t *p; + uword *vni_table = 0; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + if (mp->is_l2) + { + vni_table = lcm->bd_id_by_vni; + } + else + { + vni_table = lcm->table_id_by_vni; + } + + /* *INDENT-OFF* */ + hash_foreach_pair (p, vni_table, + ({ + send_eid_table_map_pair (p, q, mp->context); + })); + /* *INDENT-ON* */ +} + +static void +send_eid_table_vni (u32 vni, unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_lisp_eid_table_vni_details_t *rmp = 0; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_LISP_EID_TABLE_VNI_DETAILS); + rmp->context = context; + rmp->vni = clib_host_to_net_u32 (vni); + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +lisp_adjacency_copy (vl_api_lisp_adjacency_t * dst, lisp_adjacency_t * adjs) +{ + lisp_adjacency_t *adj; + vl_api_lisp_adjacency_t a; + u32 i, n = vec_len (adjs); + + for (i = 0; i < n; i++) + { + adj = vec_elt_at_index (adjs, i); + memset (&a, 0, sizeof (a)); + + switch (gid_address_type (&adj->reid)) + { + case GID_ADDR_IP_PREFIX: + a.reid_prefix_len = gid_address_ippref_len (&adj->reid); + a.leid_prefix_len = gid_address_ippref_len (&adj->leid); + if (gid_address_ip_version (&adj->reid) == IP4) + { + a.eid_type = 0; /* ipv4 type */ + clib_memcpy (a.reid, &gid_address_ip (&adj->reid), 4); + clib_memcpy (a.leid, &gid_address_ip (&adj->leid), 4); + } + else + { + a.eid_type = 1; /* ipv6 type */ + clib_memcpy (a.reid, &gid_address_ip (&adj->reid), 16); + clib_memcpy (a.leid, &gid_address_ip (&adj->leid), 16); + } + break; + case GID_ADDR_MAC: + a.eid_type = 2; /* l2 mac type */ + mac_copy (a.reid, gid_address_mac (&adj->reid)); + mac_copy (a.leid, gid_address_mac (&adj->leid)); + break; + default: + ASSERT (0); + } + dst[i] = a; + } +} + +static void + vl_api_show_lisp_rloc_probe_state_t_handler + (vl_api_show_lisp_rloc_probe_state_t * mp) +{ + vl_api_show_lisp_rloc_probe_state_reply_t *rmp = 0; + int rv = 0; + + /* *INDENT-OFF* */ + REPLY_MACRO2 (VL_API_SHOW_LISP_RLOC_PROBE_STATE_REPLY, + { + rmp->is_enabled = vnet_lisp_rloc_probe_state_get (); + }); + /* *INDENT-ON* */ +} + +static void + vl_api_show_lisp_map_register_state_t_handler + (vl_api_show_lisp_map_register_state_t * mp) +{ + vl_api_show_lisp_map_register_state_reply_t *rmp = 0; + int rv = 0; + + /* *INDENT-OFF* */ + REPLY_MACRO2 (VL_API_SHOW_LISP_MAP_REGISTER_STATE_REPLY, + { + rmp->is_enabled = vnet_lisp_map_register_state_get (); + }); + /* *INDENT-ON* */ +} + +static void +vl_api_lisp_adjacencies_get_t_handler (vl_api_lisp_adjacencies_get_t * mp) +{ + vl_api_lisp_adjacencies_get_reply_t *rmp = 0; + lisp_adjacency_t *adjs = 0; + int rv = 0; + vl_api_lisp_adjacency_t a; + u32 size = ~0; + u32 vni = clib_net_to_host_u32 (mp->vni); + + adjs = vnet_lisp_adjacencies_get_by_vni (vni); + size = vec_len (adjs) * sizeof (a); + + /* *INDENT-OFF* */ + REPLY_MACRO4 (VL_API_LISP_ADJACENCIES_GET_REPLY, size, + { + rmp->count = clib_host_to_net_u32 (vec_len (adjs)); + lisp_adjacency_copy (rmp->adjacencies, adjs); + }); + /* *INDENT-ON* */ + + vec_free (adjs); +} + +static void +vl_api_lisp_eid_table_vni_dump_t_handler (vl_api_lisp_eid_table_vni_dump_t * + mp) +{ + hash_pair_t *p; + u32 *vnis = 0; + unix_shared_memory_queue_t *q = 0; + lisp_cp_main_t *lcm = vnet_lisp_cp_get_main (); + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + /* *INDENT-OFF* */ + hash_foreach_pair (p, lcm->table_id_by_vni, + ({ + hash_set (vnis, p->key, 0); + })); + + hash_foreach_pair (p, lcm->bd_id_by_vni, + ({ + hash_set (vnis, p->key, 0); + })); + + hash_foreach_pair (p, vnis, + ({ + send_eid_table_vni (p->key, q, mp->context); + })); + /* *INDENT-ON* */ + + hash_free (vnis); +} + +static void +vl_api_show_lisp_status_t_handler (vl_api_show_lisp_status_t * mp) +{ + unix_shared_memory_queue_t *q = NULL; + vl_api_show_lisp_status_reply_t *rmp = NULL; + int rv = 0; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_SHOW_LISP_STATUS_REPLY, + ({ + rmp->gpe_status = vnet_lisp_gpe_enable_disable_status (); + rmp->feature_status = vnet_lisp_enable_disable_status (); + })); + /* *INDENT-ON* */ +} + +static void + vl_api_lisp_get_map_request_itr_rlocs_t_handler + (vl_api_lisp_get_map_request_itr_rlocs_t * mp) +{ + unix_shared_memory_queue_t *q = NULL; + vl_api_lisp_get_map_request_itr_rlocs_reply_t *rmp = NULL; + lisp_cp_main_t *lcm = vnet_lisp_cp_get_main (); + locator_set_t *loc_set = 0; + u8 *tmp_str = 0; + int rv = 0; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + if (~0 == lcm->mreq_itr_rlocs) + { + tmp_str = format (0, " "); + } + else + { + loc_set = + pool_elt_at_index (lcm->locator_set_pool, lcm->mreq_itr_rlocs); + tmp_str = format (0, "%s", loc_set->name); + } + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_LISP_GET_MAP_REQUEST_ITR_RLOCS_REPLY, + ({ + strncpy((char *) rmp->locator_set_name, (char *) tmp_str, + ARRAY_LEN(rmp->locator_set_name) - 1); + })); + /* *INDENT-ON* */ + + vec_free (tmp_str); +} + +static void +vl_api_show_lisp_pitr_t_handler (vl_api_show_lisp_pitr_t * mp) +{ + unix_shared_memory_queue_t *q = NULL; + vl_api_show_lisp_pitr_reply_t *rmp = NULL; + lisp_cp_main_t *lcm = vnet_lisp_cp_get_main (); + mapping_t *m; + locator_set_t *ls = 0; + u8 *tmp_str = 0; + int rv = 0; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + if (!lcm->lisp_pitr) + { + tmp_str = format (0, "N/A"); + } + else + { + m = pool_elt_at_index (lcm->mapping_pool, lcm->pitr_map_index); + if (~0 != m->locator_set_index) + { + ls = + pool_elt_at_index (lcm->locator_set_pool, m->locator_set_index); + tmp_str = format (0, "%s", ls->name); + } + else + { + tmp_str = format (0, "N/A"); + } + } + vec_add1 (tmp_str, 0); + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_SHOW_LISP_PITR_REPLY, + ({ + rmp->status = lcm->lisp_pitr; + strncpy((char *) rmp->locator_set_name, (char *) tmp_str, + ARRAY_LEN(rmp->locator_set_name) - 1); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_interface_name_renumber_t_handler (vl_api_interface_name_renumber_t * + mp) +{ + vl_api_interface_name_renumber_reply_t *rmp; + int rv = 0; + + VALIDATE_SW_IF_INDEX (mp); + + rv = vnet_interface_name_renumber + (ntohl (mp->sw_if_index), ntohl (mp->new_show_dev_instance)); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_INTERFACE_NAME_RENUMBER_REPLY); +} + +static int +arp_change_data_callback (u32 pool_index, u8 * new_mac, + u32 sw_if_index, u32 address) +{ + vpe_api_main_t *am = &vpe_api_main; + vlib_main_t *vm = am->vlib_main; + vl_api_ip4_arp_event_t *event; + static f64 arp_event_last_time; + f64 now = vlib_time_now (vm); + + if (pool_is_free_index (am->arp_events, pool_index)) + return 1; + + event = pool_elt_at_index (am->arp_events, pool_index); + /* *INDENT-OFF* */ + if (memcmp (&event->new_mac, new_mac, sizeof (event->new_mac))) + { + clib_memcpy (event->new_mac, new_mac, sizeof (event->new_mac)); + } + else + { /* same mac */ + if (sw_if_index == event->sw_if_index && + (!event->mac_ip || + /* for BD case, also check IP address with 10 sec timeout */ + (address == event->address && + (now - arp_event_last_time) < 10.0))) + return 1; + } + /* *INDENT-ON* */ + + arp_event_last_time = now; + event->sw_if_index = sw_if_index; + if (event->mac_ip) + event->address = address; + return 0; +} + +static int +nd_change_data_callback (u32 pool_index, u8 * new_mac, + u32 sw_if_index, ip6_address_t * address) +{ + vpe_api_main_t *am = &vpe_api_main; + vlib_main_t *vm = am->vlib_main; + vl_api_ip6_nd_event_t *event; + static f64 nd_event_last_time; + f64 now = vlib_time_now (vm); + + if (pool_is_free_index (am->nd_events, pool_index)) + return 1; + + event = pool_elt_at_index (am->nd_events, pool_index); + + /* *INDENT-OFF* */ + if (memcmp (&event->new_mac, new_mac, sizeof (event->new_mac))) + { + clib_memcpy (event->new_mac, new_mac, sizeof (event->new_mac)); + } + else + { /* same mac */ + if (sw_if_index == event->sw_if_index && + (!event->mac_ip || + /* for BD case, also check IP address with 10 sec timeout */ + (ip6_address_is_equal (address, + (ip6_address_t *) event->address) && + (now - nd_event_last_time) < 10.0))) + return 1; + } + /* *INDENT-ON* */ + + nd_event_last_time = now; + event->sw_if_index = sw_if_index; + if (event->mac_ip) + clib_memcpy (event->address, address, sizeof (event->address)); + return 0; +} + +static int +arp_change_delete_callback (u32 pool_index, u8 * notused) +{ + vpe_api_main_t *am = &vpe_api_main; + + if (pool_is_free_index (am->arp_events, pool_index)) + return 1; + + pool_put_index (am->arp_events, pool_index); + return 0; +} + +static int +nd_change_delete_callback (u32 pool_index, u8 * notused) +{ + vpe_api_main_t *am = &vpe_api_main; + + if (pool_is_free_index (am->nd_events, pool_index)) + return 1; + + pool_put_index (am->nd_events, pool_index); + return 0; +} + +static void +vl_api_want_ip4_arp_events_t_handler (vl_api_want_ip4_arp_events_t * mp) +{ + vpe_api_main_t *am = &vpe_api_main; + vnet_main_t *vnm = vnet_get_main (); + vl_api_want_ip4_arp_events_reply_t *rmp; + vl_api_ip4_arp_event_t *event; + int rv; + + if (mp->enable_disable) + { + pool_get (am->arp_events, event); + memset (event, 0, sizeof (*event)); + + event->_vl_msg_id = ntohs (VL_API_IP4_ARP_EVENT); + event->client_index = mp->client_index; + event->context = mp->context; + event->address = mp->address; + event->pid = mp->pid; + if (mp->address == 0) + event->mac_ip = 1; + + rv = vnet_add_del_ip4_arp_change_event + (vnm, arp_change_data_callback, + mp->pid, &mp->address /* addr, in net byte order */ , + vpe_resolver_process_node.index, + IP4_ARP_EVENT, event - am->arp_events, 1 /* is_add */ ); + } + else + { + rv = vnet_add_del_ip4_arp_change_event + (vnm, arp_change_delete_callback, + mp->pid, &mp->address /* addr, in net byte order */ , + vpe_resolver_process_node.index, + IP4_ARP_EVENT, ~0 /* pool index */ , 0 /* is_add */ ); + } + REPLY_MACRO (VL_API_WANT_IP4_ARP_EVENTS_REPLY); +} + +static void +vl_api_want_ip6_nd_events_t_handler (vl_api_want_ip6_nd_events_t * mp) +{ + vpe_api_main_t *am = &vpe_api_main; + vnet_main_t *vnm = vnet_get_main (); + vl_api_want_ip6_nd_events_reply_t *rmp; + vl_api_ip6_nd_event_t *event; + int rv; + + if (mp->enable_disable) + { + pool_get (am->nd_events, event); + memset (event, 0, sizeof (*event)); + + event->_vl_msg_id = ntohs (VL_API_IP6_ND_EVENT); + event->client_index = mp->client_index; + event->context = mp->context; + clib_memcpy (event->address, mp->address, 16); + event->pid = mp->pid; + if (ip6_address_is_zero ((ip6_address_t *) mp->address)) + event->mac_ip = 1; + + rv = vnet_add_del_ip6_nd_change_event + (vnm, nd_change_data_callback, + mp->pid, mp->address /* addr, in net byte order */ , + vpe_resolver_process_node.index, + IP6_ND_EVENT, event - am->nd_events, 1 /* is_add */ ); + } + else + { + rv = vnet_add_del_ip6_nd_change_event + (vnm, nd_change_delete_callback, + mp->pid, mp->address /* addr, in net byte order */ , + vpe_resolver_process_node.index, + IP6_ND_EVENT, ~0 /* pool index */ , 0 /* is_add */ ); + } + REPLY_MACRO (VL_API_WANT_IP6_ND_EVENTS_REPLY); +} + +static void vl_api_input_acl_set_interface_t_handler + (vl_api_input_acl_set_interface_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_input_acl_set_interface_reply_t *rmp; + int rv; + u32 sw_if_index, ip4_table_index, ip6_table_index, l2_table_index; + + ip4_table_index = ntohl (mp->ip4_table_index); + ip6_table_index = ntohl (mp->ip6_table_index); + l2_table_index = ntohl (mp->l2_table_index); + sw_if_index = ntohl (mp->sw_if_index); + + VALIDATE_SW_IF_INDEX (mp); + + rv = vnet_set_input_acl_intfc (vm, sw_if_index, ip4_table_index, + ip6_table_index, l2_table_index, mp->is_add); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_INPUT_ACL_SET_INTERFACE_REPLY); +} + +static void vl_api_ipsec_spd_add_del_t_handler + (vl_api_ipsec_spd_add_del_t * mp) +{ +#if IPSEC == 0 + clib_warning ("unimplemented"); +#else + + vlib_main_t *vm __attribute__ ((unused)) = vlib_get_main (); + vl_api_ipsec_spd_add_del_reply_t *rmp; + int rv; + +#if DPDK > 0 + rv = ipsec_add_del_spd (vm, ntohl (mp->spd_id), mp->is_add); +#else + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif + + REPLY_MACRO (VL_API_IPSEC_SPD_ADD_DEL_REPLY); +#endif +} + +static void vl_api_ipsec_interface_add_del_spd_t_handler + (vl_api_ipsec_interface_add_del_spd_t * mp) +{ + vlib_main_t *vm __attribute__ ((unused)) = vlib_get_main (); + vl_api_ipsec_interface_add_del_spd_reply_t *rmp; + int rv; + u32 sw_if_index __attribute__ ((unused)); + u32 spd_id __attribute__ ((unused)); + + sw_if_index = ntohl (mp->sw_if_index); + spd_id = ntohl (mp->spd_id); + + VALIDATE_SW_IF_INDEX (mp); + +#if IPSEC > 0 + rv = ipsec_set_interface_spd (vm, sw_if_index, spd_id, mp->is_add); +#else + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_IPSEC_INTERFACE_ADD_DEL_SPD_REPLY); +} + +static void vl_api_ipsec_spd_add_del_entry_t_handler + (vl_api_ipsec_spd_add_del_entry_t * mp) +{ + vlib_main_t *vm __attribute__ ((unused)) = vlib_get_main (); + vl_api_ipsec_spd_add_del_entry_reply_t *rmp; + int rv; + +#if IPSEC > 0 + ipsec_policy_t p; + + memset (&p, 0, sizeof (p)); + + p.id = ntohl (mp->spd_id); + p.priority = ntohl (mp->priority); + p.is_outbound = mp->is_outbound; + p.is_ipv6 = mp->is_ipv6; + + if (mp->is_ipv6 || mp->is_ip_any) + { + clib_memcpy (&p.raddr.start, mp->remote_address_start, 16); + clib_memcpy (&p.raddr.stop, mp->remote_address_stop, 16); + clib_memcpy (&p.laddr.start, mp->local_address_start, 16); + clib_memcpy (&p.laddr.stop, mp->local_address_stop, 16); + } + else + { + clib_memcpy (&p.raddr.start.ip4.data, mp->remote_address_start, 4); + clib_memcpy (&p.raddr.stop.ip4.data, mp->remote_address_stop, 4); + clib_memcpy (&p.laddr.start.ip4.data, mp->local_address_start, 4); + clib_memcpy (&p.laddr.stop.ip4.data, mp->local_address_stop, 4); + } + p.protocol = mp->protocol; + p.rport.start = ntohs (mp->remote_port_start); + p.rport.stop = ntohs (mp->remote_port_stop); + p.lport.start = ntohs (mp->local_port_start); + p.lport.stop = ntohs (mp->local_port_stop); + /* policy action resolve unsupported */ + if (mp->policy == IPSEC_POLICY_ACTION_RESOLVE) + { + clib_warning ("unsupported action: 'resolve'"); + rv = VNET_API_ERROR_UNIMPLEMENTED; + goto out; + } + p.policy = mp->policy; + p.sa_id = ntohl (mp->sa_id); + + rv = ipsec_add_del_policy (vm, &p, mp->is_add); + if (rv) + goto out; + + if (mp->is_ip_any) + { + p.is_ipv6 = 1; + rv = ipsec_add_del_policy (vm, &p, mp->is_add); + } +#else + rv = VNET_API_ERROR_UNIMPLEMENTED; + goto out; +#endif + +out: + REPLY_MACRO (VL_API_IPSEC_SPD_ADD_DEL_ENTRY_REPLY); +} + +static void vl_api_ipsec_sad_add_del_entry_t_handler + (vl_api_ipsec_sad_add_del_entry_t * mp) +{ + vlib_main_t *vm __attribute__ ((unused)) = vlib_get_main (); + vl_api_ipsec_sad_add_del_entry_reply_t *rmp; + int rv; +#if IPSEC > 0 + ipsec_sa_t sa; + + memset (&sa, 0, sizeof (sa)); + + sa.id = ntohl (mp->sad_id); + sa.spi = ntohl (mp->spi); + /* security protocol AH unsupported */ + if (mp->protocol == IPSEC_PROTOCOL_AH) + { + clib_warning ("unsupported security protocol 'AH'"); + rv = VNET_API_ERROR_UNIMPLEMENTED; + goto out; + } + sa.protocol = mp->protocol; + /* check for unsupported crypto-alg */ + if (mp->crypto_algorithm < IPSEC_CRYPTO_ALG_AES_CBC_128 || + mp->crypto_algorithm >= IPSEC_CRYPTO_N_ALG) + { + clib_warning ("unsupported crypto-alg: '%U'", format_ipsec_crypto_alg, + mp->crypto_algorithm); + rv = VNET_API_ERROR_UNIMPLEMENTED; + goto out; + } + sa.crypto_alg = mp->crypto_algorithm; + sa.crypto_key_len = mp->crypto_key_length; + clib_memcpy (&sa.crypto_key, mp->crypto_key, sizeof (sa.crypto_key)); + /* check for unsupported integ-alg */ +#if DPDK_CRYPTO==1 + if (mp->integrity_algorithm < IPSEC_INTEG_ALG_NONE || +#else + if (mp->integrity_algorithm < IPSEC_INTEG_ALG_SHA1_96 || +#endif + mp->integrity_algorithm >= IPSEC_INTEG_N_ALG) + { + clib_warning ("unsupported integ-alg: '%U'", format_ipsec_integ_alg, + mp->integrity_algorithm); + rv = VNET_API_ERROR_UNIMPLEMENTED; + goto out; + } + +#if DPDK_CRYPTO==1 + /*Special cases, aes-gcm-128 encryption */ + if (mp->crypto_algorithm == IPSEC_CRYPTO_ALG_AES_GCM_128) + { + if (mp->integrity_algorithm != IPSEC_INTEG_ALG_NONE + && mp->integrity_algorithm != IPSEC_INTEG_ALG_AES_GCM_128) + { + clib_warning + ("unsupported: aes-gcm-128 crypto-alg needs none as integ-alg"); + rv = VNET_API_ERROR_UNIMPLEMENTED; + goto out; + } + else /*set integ-alg internally to aes-gcm-128 */ + mp->integrity_algorithm = IPSEC_INTEG_ALG_AES_GCM_128; + } + else if (mp->integrity_algorithm == IPSEC_INTEG_ALG_AES_GCM_128) + { + clib_warning ("unsupported integ-alg: aes-gcm-128"); + rv = VNET_API_ERROR_UNIMPLEMENTED; + goto out; + } + else if (mp->integrity_algorithm == IPSEC_INTEG_ALG_NONE) + { + clib_warning ("unsupported integ-alg: none"); + rv = VNET_API_ERROR_UNIMPLEMENTED; + goto out; + } +#endif + + sa.integ_alg = mp->integrity_algorithm; + sa.integ_key_len = mp->integrity_key_length; + clib_memcpy (&sa.integ_key, mp->integrity_key, sizeof (sa.integ_key)); + sa.use_esn = mp->use_extended_sequence_number; + sa.is_tunnel = mp->is_tunnel; + sa.is_tunnel_ip6 = mp->is_tunnel_ipv6; + if (sa.is_tunnel_ip6) + { + clib_memcpy (&sa.tunnel_src_addr, mp->tunnel_src_address, 16); + clib_memcpy (&sa.tunnel_dst_addr, mp->tunnel_dst_address, 16); + } + else + { + clib_memcpy (&sa.tunnel_src_addr.ip4.data, mp->tunnel_src_address, 4); + clib_memcpy (&sa.tunnel_dst_addr.ip4.data, mp->tunnel_dst_address, 4); + } + + rv = ipsec_add_del_sa (vm, &sa, mp->is_add); +#else + rv = VNET_API_ERROR_UNIMPLEMENTED; + goto out; +#endif + +out: + REPLY_MACRO (VL_API_IPSEC_SAD_ADD_DEL_ENTRY_REPLY); +} + +static void +vl_api_ikev2_profile_add_del_t_handler (vl_api_ikev2_profile_add_del_t * mp) +{ + vl_api_ikev2_profile_add_del_reply_t *rmp; + int rv = 0; + +#if IPSEC > 0 + vlib_main_t *vm = vlib_get_main (); + clib_error_t *error; + u8 *tmp = format (0, "%s", mp->name); + error = ikev2_add_del_profile (vm, tmp, mp->is_add); + vec_free (tmp); + if (error) + rv = VNET_API_ERROR_UNSPECIFIED; +#else + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif + + REPLY_MACRO (VL_API_IKEV2_PROFILE_ADD_DEL_REPLY); +} + +static void + vl_api_ikev2_profile_set_auth_t_handler + (vl_api_ikev2_profile_set_auth_t * mp) +{ + vl_api_ikev2_profile_set_auth_reply_t *rmp; + int rv = 0; + +#if IPSEC > 0 + vlib_main_t *vm = vlib_get_main (); + clib_error_t *error; + u8 *tmp = format (0, "%s", mp->name); + u8 *data = vec_new (u8, mp->data_len); + clib_memcpy (data, mp->data, mp->data_len); + error = ikev2_set_profile_auth (vm, tmp, mp->auth_method, data, mp->is_hex); + vec_free (tmp); + vec_free (data); + if (error) + rv = VNET_API_ERROR_UNSPECIFIED; +#else + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif + + REPLY_MACRO (VL_API_IKEV2_PROFILE_SET_AUTH_REPLY); +} + +static void +vl_api_ikev2_profile_set_id_t_handler (vl_api_ikev2_profile_set_id_t * mp) +{ + vl_api_ikev2_profile_add_del_reply_t *rmp; + int rv = 0; + +#if IPSEC > 0 + vlib_main_t *vm = vlib_get_main (); + clib_error_t *error; + u8 *tmp = format (0, "%s", mp->name); + u8 *data = vec_new (u8, mp->data_len); + clib_memcpy (data, mp->data, mp->data_len); + error = ikev2_set_profile_id (vm, tmp, mp->id_type, data, mp->is_local); + vec_free (tmp); + vec_free (data); + if (error) + rv = VNET_API_ERROR_UNSPECIFIED; +#else + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif + + REPLY_MACRO (VL_API_IKEV2_PROFILE_SET_ID_REPLY); +} + +static void +vl_api_ikev2_profile_set_ts_t_handler (vl_api_ikev2_profile_set_ts_t * mp) +{ + vl_api_ikev2_profile_set_ts_reply_t *rmp; + int rv = 0; + +#if IPSEC > 0 + vlib_main_t *vm = vlib_get_main (); + clib_error_t *error; + u8 *tmp = format (0, "%s", mp->name); + error = ikev2_set_profile_ts (vm, tmp, mp->proto, mp->start_port, + mp->end_port, (ip4_address_t) mp->start_addr, + (ip4_address_t) mp->end_addr, mp->is_local); + vec_free (tmp); + if (error) + rv = VNET_API_ERROR_UNSPECIFIED; +#else + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif + + REPLY_MACRO (VL_API_IKEV2_PROFILE_SET_TS_REPLY); +} + +static void +vl_api_ikev2_set_local_key_t_handler (vl_api_ikev2_set_local_key_t * mp) +{ + vl_api_ikev2_profile_set_ts_reply_t *rmp; + int rv = 0; + +#if IPSEC > 0 + vlib_main_t *vm = vlib_get_main (); + clib_error_t *error; + + error = ikev2_set_local_key (vm, mp->key_file); + if (error) + rv = VNET_API_ERROR_UNSPECIFIED; +#else + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif + + REPLY_MACRO (VL_API_IKEV2_SET_LOCAL_KEY_REPLY); +} + +static void +vl_api_ipsec_sa_set_key_t_handler (vl_api_ipsec_sa_set_key_t * mp) +{ + vlib_main_t *vm __attribute__ ((unused)) = vlib_get_main (); + vl_api_ipsec_sa_set_key_reply_t *rmp; + int rv; +#if IPSEC > 0 + ipsec_sa_t sa; + sa.id = ntohl (mp->sa_id); + sa.crypto_key_len = mp->crypto_key_length; + clib_memcpy (&sa.crypto_key, mp->crypto_key, sizeof (sa.crypto_key)); + sa.integ_key_len = mp->integrity_key_length; + clib_memcpy (&sa.integ_key, mp->integrity_key, sizeof (sa.integ_key)); + + rv = ipsec_set_sa_key (vm, &sa); +#else + rv = VNET_API_ERROR_UNIMPLEMENTED; +#endif + + REPLY_MACRO (VL_API_IPSEC_SA_SET_KEY_REPLY); +} + +static void vl_api_cop_interface_enable_disable_t_handler + (vl_api_cop_interface_enable_disable_t * mp) +{ + vl_api_cop_interface_enable_disable_reply_t *rmp; + int rv; + u32 sw_if_index = ntohl (mp->sw_if_index); + int enable_disable; + + VALIDATE_SW_IF_INDEX (mp); + + enable_disable = (int) mp->enable_disable; + + rv = cop_interface_enable_disable (sw_if_index, enable_disable); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_COP_INTERFACE_ENABLE_DISABLE_REPLY); +} + +static void vl_api_cop_whitelist_enable_disable_t_handler + (vl_api_cop_whitelist_enable_disable_t * mp) +{ + vl_api_cop_whitelist_enable_disable_reply_t *rmp; + cop_whitelist_enable_disable_args_t _a, *a = &_a; + u32 sw_if_index = ntohl (mp->sw_if_index); + int rv; + + VALIDATE_SW_IF_INDEX (mp); + + a->sw_if_index = sw_if_index; + a->ip4 = mp->ip4; + a->ip6 = mp->ip6; + a->default_cop = mp->default_cop; + a->fib_id = ntohl (mp->fib_id); + + rv = cop_whitelist_enable_disable (a); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_COP_WHITELIST_ENABLE_DISABLE_REPLY); +} + +static void +vl_api_get_node_graph_t_handler (vl_api_get_node_graph_t * mp) +{ + int rv = 0; + u8 *vector = 0; + api_main_t *am = &api_main; + vlib_main_t *vm = vlib_get_main (); + void *oldheap; + vl_api_get_node_graph_reply_t *rmp; + + pthread_mutex_lock (&am->vlib_rp->mutex); + oldheap = svm_push_data_heap (am->vlib_rp); + + /* + * Keep the number of memcpy ops to a minimum (e.g. 1). + */ + vec_validate (vector, 16384); + vec_reset_length (vector); + + /* $$$$ FIXME */ + vector = vlib_node_serialize (&vm->node_main, vector, + (u32) ~ 0 /* all threads */ , + 1 /* include nexts */ , + 1 /* include stats */ ); + + svm_pop_heap (oldheap); + pthread_mutex_unlock (&am->vlib_rp->mutex); + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_GET_NODE_GRAPH_REPLY, + ({ + rmp->reply_in_shmem = (uword) vector; + })); + /* *INDENT-ON* */ +} + +static void +vl_api_ioam_enable_t_handler (vl_api_ioam_enable_t * mp) +{ + int rv = 0; + vl_api_ioam_enable_reply_t *rmp; + clib_error_t *error; + + /* Ignoring the profile id as currently a single profile + * is supported */ + error = ip6_ioam_enable (mp->trace_enable, mp->pot_enable, + mp->seqno, mp->analyse); + if (error) + { + clib_error_report (error); + rv = clib_error_get_code (error); + } + + REPLY_MACRO (VL_API_IOAM_ENABLE_REPLY); +} + +static void +vl_api_ioam_disable_t_handler (vl_api_ioam_disable_t * mp) +{ + int rv = 0; + vl_api_ioam_disable_reply_t *rmp; + clib_error_t *error; + + error = clear_ioam_rewrite_fn (); + if (error) + { + clib_error_report (error); + rv = clib_error_get_code (error); + } + + REPLY_MACRO (VL_API_IOAM_DISABLE_REPLY); +} + +static void +vl_api_af_packet_create_t_handler (vl_api_af_packet_create_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_af_packet_create_reply_t *rmp; + int rv = 0; + u8 *host_if_name = NULL; + u32 sw_if_index; + + host_if_name = format (0, "%s", mp->host_if_name); + vec_add1 (host_if_name, 0); + + rv = af_packet_create_if (vm, host_if_name, + mp->use_random_hw_addr ? 0 : mp->hw_addr, + &sw_if_index); + + vec_free (host_if_name); + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_AF_PACKET_CREATE_REPLY, + ({ + rmp->sw_if_index = clib_host_to_net_u32(sw_if_index); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_af_packet_delete_t_handler (vl_api_af_packet_delete_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_af_packet_delete_reply_t *rmp; + int rv = 0; + u8 *host_if_name = NULL; + + host_if_name = format (0, "%s", mp->host_if_name); + vec_add1 (host_if_name, 0); + + rv = af_packet_delete_if (vm, host_if_name); + + vec_free (host_if_name); + + REPLY_MACRO (VL_API_AF_PACKET_DELETE_REPLY); +} + +static void +vl_api_policer_add_del_t_handler (vl_api_policer_add_del_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_policer_add_del_reply_t *rmp; + int rv = 0; + u8 *name = NULL; + sse2_qos_pol_cfg_params_st cfg; + clib_error_t *error; + u32 policer_index; + + name = format (0, "%s", mp->name); + + memset (&cfg, 0, sizeof (cfg)); + cfg.rfc = mp->type; + cfg.rnd_type = mp->round_type; + cfg.rate_type = mp->rate_type; + cfg.rb.kbps.cir_kbps = mp->cir; + cfg.rb.kbps.eir_kbps = mp->eir; + cfg.rb.kbps.cb_bytes = mp->cb; + cfg.rb.kbps.eb_bytes = mp->eb; + cfg.conform_action.action_type = mp->conform_action_type; + cfg.conform_action.dscp = mp->conform_dscp; + cfg.exceed_action.action_type = mp->exceed_action_type; + cfg.exceed_action.dscp = mp->exceed_dscp; + cfg.violate_action.action_type = mp->violate_action_type; + cfg.violate_action.dscp = mp->violate_dscp; + cfg.color_aware = mp->color_aware; + + error = policer_add_del (vm, name, &cfg, &policer_index, mp->is_add); + + if (error) + rv = VNET_API_ERROR_UNSPECIFIED; + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_POLICER_ADD_DEL_REPLY, + ({ + if (rv == 0 && mp->is_add) + rmp->policer_index = ntohl(policer_index); + else + rmp->policer_index = ~0; + })); + /* *INDENT-ON* */ +} + +static void +send_policer_details (u8 * name, + sse2_qos_pol_cfg_params_st * config, + policer_read_response_type_st * templ, + unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_policer_details_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_POLICER_DETAILS); + mp->context = context; + mp->cir = htonl (config->rb.kbps.cir_kbps); + mp->eir = htonl (config->rb.kbps.eir_kbps); + mp->cb = htonl (config->rb.kbps.cb_bytes); + mp->eb = htonl (config->rb.kbps.eb_bytes); + mp->rate_type = config->rate_type; + mp->round_type = config->rnd_type; + mp->type = config->rfc; + mp->conform_action_type = config->conform_action.action_type; + mp->conform_dscp = config->conform_action.dscp; + mp->exceed_action_type = config->exceed_action.action_type; + mp->exceed_dscp = config->exceed_action.dscp; + mp->violate_action_type = config->violate_action.action_type; + mp->violate_dscp = config->violate_action.dscp; + mp->single_rate = templ->single_rate ? 1 : 0; + mp->color_aware = templ->color_aware ? 1 : 0; + mp->scale = htonl (templ->scale); + mp->cir_tokens_per_period = htonl (templ->cir_tokens_per_period); + mp->pir_tokens_per_period = htonl (templ->pir_tokens_per_period); + mp->current_limit = htonl (templ->current_limit); + mp->current_bucket = htonl (templ->current_bucket); + mp->extended_limit = htonl (templ->extended_limit); + mp->extended_bucket = htonl (templ->extended_bucket); + mp->last_update_time = clib_host_to_net_u64 (templ->last_update_time); + + strncpy ((char *) mp->name, (char *) name, ARRAY_LEN (mp->name) - 1); + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +vl_api_policer_dump_t_handler (vl_api_policer_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + vnet_policer_main_t *pm = &vnet_policer_main; + hash_pair_t *hp; + uword *p; + u32 pool_index; + u8 *match_name = 0; + u8 *name; + sse2_qos_pol_cfg_params_st *config; + policer_read_response_type_st *templ; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + if (mp->match_name_valid) + { + match_name = format (0, "%s%c", mp->match_name, 0); + } + + if (mp->match_name_valid) + { + p = hash_get_mem (pm->policer_config_by_name, match_name); + if (p) + { + pool_index = p[0]; + config = pool_elt_at_index (pm->configs, pool_index); + templ = pool_elt_at_index (pm->policer_templates, pool_index); + send_policer_details (match_name, config, templ, q, mp->context); + } + } + else + { + /* *INDENT-OFF* */ + hash_foreach_pair (hp, pm->policer_config_by_name, + ({ + name = (u8 *) hp->key; + pool_index = hp->value[0]; + config = pool_elt_at_index (pm->configs, pool_index); + templ = pool_elt_at_index (pm->policer_templates, pool_index); + send_policer_details(name, config, templ, q, mp->context); + })); + /* *INDENT-ON* */ + } +} + +static void + vl_api_policer_classify_set_interface_t_handler + (vl_api_policer_classify_set_interface_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_policer_classify_set_interface_reply_t *rmp; + int rv; + u32 sw_if_index, ip4_table_index, ip6_table_index, l2_table_index; + + ip4_table_index = ntohl (mp->ip4_table_index); + ip6_table_index = ntohl (mp->ip6_table_index); + l2_table_index = ntohl (mp->l2_table_index); + sw_if_index = ntohl (mp->sw_if_index); + + VALIDATE_SW_IF_INDEX (mp); + + rv = vnet_set_policer_classify_intfc (vm, sw_if_index, ip4_table_index, + ip6_table_index, l2_table_index, + mp->is_add); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_POLICER_CLASSIFY_SET_INTERFACE_REPLY); +} + +static void +send_policer_classify_details (u32 sw_if_index, + u32 table_index, + unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_policer_classify_details_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_POLICER_CLASSIFY_DETAILS); + mp->context = context; + mp->sw_if_index = htonl (sw_if_index); + mp->table_index = htonl (table_index); + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +vl_api_policer_classify_dump_t_handler (vl_api_policer_classify_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + policer_classify_main_t *pcm = &policer_classify_main; + u32 *vec_tbl; + int i; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + vec_tbl = pcm->classify_table_index_by_sw_if_index[mp->type]; + + if (vec_len (vec_tbl)) + { + for (i = 0; i < vec_len (vec_tbl); i++) + { + if (vec_elt (vec_tbl, i) == ~0) + continue; + + send_policer_classify_details (i, vec_elt (vec_tbl, i), q, + mp->context); + } + } +} + +static void +vl_api_netmap_create_t_handler (vl_api_netmap_create_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_netmap_create_reply_t *rmp; + int rv = 0; + u8 *if_name = NULL; + + if_name = format (0, "%s", mp->netmap_if_name); + vec_add1 (if_name, 0); + + rv = + netmap_create_if (vm, if_name, mp->use_random_hw_addr ? 0 : mp->hw_addr, + mp->is_pipe, mp->is_master, 0); + + vec_free (if_name); + + REPLY_MACRO (VL_API_NETMAP_CREATE_REPLY); +} + +static void +vl_api_netmap_delete_t_handler (vl_api_netmap_delete_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_netmap_delete_reply_t *rmp; + int rv = 0; + u8 *if_name = NULL; + + if_name = format (0, "%s", mp->netmap_if_name); + vec_add1 (if_name, 0); + + rv = netmap_delete_if (vm, if_name); + + vec_free (if_name); + + REPLY_MACRO (VL_API_NETMAP_DELETE_REPLY); +} + +static void +vl_api_mpls_tunnel_details_t_handler (vl_api_mpls_fib_details_t * mp) +{ + clib_warning ("BUG"); +} + +typedef struct mpls_tunnel_send_walk_ctx_t_ +{ + unix_shared_memory_queue_t *q; + u32 index; + u32 context; +} mpls_tunnel_send_walk_ctx_t; + +static void +send_mpls_tunnel_entry (u32 mti, void *arg) +{ + mpls_tunnel_send_walk_ctx_t *ctx; + vl_api_mpls_tunnel_details_t *mp; + const mpls_tunnel_t *mt; + u32 nlabels; + + ctx = arg; + + if (~0 != ctx->index && mti != ctx->index) + return; + + mt = mpls_tunnel_get (mti); + nlabels = vec_len (mt->mt_label_stack); + + mp = vl_msg_api_alloc (sizeof (*mp) + nlabels * sizeof (u32)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_MPLS_TUNNEL_DETAILS); + mp->context = ctx->context; + + mp->tunnel_index = ntohl (mti); + memcpy (mp->mt_next_hop_out_labels, + mt->mt_label_stack, nlabels * sizeof (u32)); + + // FIXME + + vl_msg_api_send_shmem (ctx->q, (u8 *) & mp); +} + +static void +vl_api_mpls_tunnel_dump_t_handler (vl_api_mpls_tunnel_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + mpls_tunnel_send_walk_ctx_t ctx = { + .q = q, + .index = ntohl (mp->tunnel_index), + .context = mp->context, + }; + mpls_tunnel_walk (send_mpls_tunnel_entry, &ctx); +} + +static void +vl_api_mpls_fib_details_t_handler (vl_api_mpls_fib_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void +vl_api_mpls_fib_details_t_endian (vl_api_mpls_fib_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void +vl_api_mpls_fib_details_t_print (vl_api_mpls_fib_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void +send_mpls_fib_details (vpe_api_main_t * am, + unix_shared_memory_queue_t * q, + u32 table_id, u32 label, u32 eos, + fib_route_path_encode_t * api_rpaths, u32 context) +{ + vl_api_mpls_fib_details_t *mp; + fib_route_path_encode_t *api_rpath; + vl_api_fib_path2_t *fp; + int path_count; + + path_count = vec_len (api_rpaths); + mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp)); + if (!mp) + return; + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_MPLS_FIB_DETAILS); + mp->context = context; + + mp->table_id = htonl (table_id); + mp->eos_bit = eos; + mp->label = htonl (label); + + mp->count = htonl (path_count); + fp = mp->path; + vec_foreach (api_rpath, api_rpaths) + { + memset (fp, 0, sizeof (*fp)); + fp->weight = htonl (api_rpath->rpath.frp_weight); + fp->sw_if_index = htonl (api_rpath->rpath.frp_sw_if_index); + copy_fib_next_hop (api_rpath, fp); + fp++; + } + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +vl_api_mpls_fib_dump_t_handler (vl_api_mpls_fib_dump_t * mp) +{ + vpe_api_main_t *am = &vpe_api_main; + unix_shared_memory_queue_t *q; + mpls_main_t *mm = &mpls_main; + fib_table_t *fib_table; + fib_node_index_t lfei, *lfeip, *lfeis = NULL; + mpls_label_t key; + fib_prefix_t pfx; + u32 fib_index; + fib_route_path_encode_t *api_rpaths; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + /* *INDENT-OFF* */ + pool_foreach (fib_table, mm->fibs, + ({ + hash_foreach(key, lfei, fib_table->mpls.mf_entries, + ({ + vec_add1(lfeis, lfei); + })); + })); + vec_sort_with_function(lfeis, fib_entry_cmp_for_sort); + + vec_foreach(lfeip, lfeis) + { + fib_entry_get_prefix(*lfeip, &pfx); + fib_index = fib_entry_get_fib_index(*lfeip); + fib_table = fib_table_get(fib_index, pfx.fp_proto); + api_rpaths = NULL; + fib_entry_encode(*lfeip, &api_rpaths); + send_mpls_fib_details (am, q, + fib_table->ft_table_id, + pfx.fp_label, + pfx.fp_eos, + api_rpaths, + mp->context); + vec_free(api_rpaths); + } + + vec_free (lfeis); +} + +static void +vl_api_classify_table_ids_t_handler (vl_api_classify_table_ids_t * mp) +{ + unix_shared_memory_queue_t *q; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + vnet_classify_main_t *cm = &vnet_classify_main; + vnet_classify_table_t *t; + u32 *table_ids = 0; + u32 count; + + /* *INDENT-OFF* */ + pool_foreach (t, cm->tables, + ({ + vec_add1 (table_ids, ntohl(t - cm->tables)); + })); + /* *INDENT-ON* */ + count = vec_len (table_ids); + + vl_api_classify_table_ids_reply_t *rmp; + rmp = vl_msg_api_alloc_as_if_client (sizeof (*rmp) + count * sizeof (u32)); + rmp->_vl_msg_id = ntohs (VL_API_CLASSIFY_TABLE_IDS_REPLY); + rmp->context = mp->context; + rmp->count = ntohl (count); + clib_memcpy (rmp->ids, table_ids, count * sizeof (u32)); + rmp->retval = 0; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); + + vec_free (table_ids); +} + +static void + vl_api_classify_table_by_interface_t_handler + (vl_api_classify_table_by_interface_t * mp) +{ + vl_api_classify_table_by_interface_reply_t *rmp; + int rv = 0; + + u32 sw_if_index = ntohl (mp->sw_if_index); + u32 *acl = 0; + + vec_validate (acl, INPUT_ACL_N_TABLES - 1); + vec_set (acl, ~0); + + VALIDATE_SW_IF_INDEX (mp); + + input_acl_main_t *am = &input_acl_main; + + int if_idx; + u32 type; + + for (type = 0; type < INPUT_ACL_N_TABLES; type++) + { + u32 *vec_tbl = am->classify_table_index_by_sw_if_index[type]; + if (vec_len (vec_tbl)) + { + for (if_idx = 0; if_idx < vec_len (vec_tbl); if_idx++) + { + if (vec_elt (vec_tbl, if_idx) == ~0 || sw_if_index != if_idx) + { + continue; + } + acl[type] = vec_elt (vec_tbl, if_idx); + } + } + } + + BAD_SW_IF_INDEX_LABEL; + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_CLASSIFY_TABLE_BY_INTERFACE_REPLY, + ({ + rmp->sw_if_index = ntohl(sw_if_index); + rmp->l2_table_id = ntohl(acl[INPUT_ACL_TABLE_L2]); + rmp->ip4_table_id = ntohl(acl[INPUT_ACL_TABLE_IP4]); + rmp->ip6_table_id = ntohl(acl[INPUT_ACL_TABLE_IP6]); + })); + /* *INDENT-ON* */ + vec_free (acl); +} + +static void +vl_api_classify_table_info_t_handler (vl_api_classify_table_info_t * mp) +{ + unix_shared_memory_queue_t *q; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + vl_api_classify_table_info_reply_t *rmp = 0; + + vnet_classify_main_t *cm = &vnet_classify_main; + u32 table_id = ntohl (mp->table_id); + vnet_classify_table_t *t; + + /* *INDENT-OFF* */ + pool_foreach (t, cm->tables, + ({ + if (table_id == t - cm->tables) + { + rmp = vl_msg_api_alloc_as_if_client + (sizeof (*rmp) + t->match_n_vectors * sizeof (u32x4)); + rmp->_vl_msg_id = ntohs (VL_API_CLASSIFY_TABLE_INFO_REPLY); + rmp->context = mp->context; + rmp->table_id = ntohl(table_id); + rmp->nbuckets = ntohl(t->nbuckets); + rmp->match_n_vectors = ntohl(t->match_n_vectors); + rmp->skip_n_vectors = ntohl(t->skip_n_vectors); + rmp->active_sessions = ntohl(t->active_elements); + rmp->next_table_index = ntohl(t->next_table_index); + rmp->miss_next_index = ntohl(t->miss_next_index); + rmp->mask_length = ntohl(t->match_n_vectors * sizeof (u32x4)); + clib_memcpy(rmp->mask, t->mask, t->match_n_vectors * sizeof(u32x4)); + rmp->retval = 0; + break; + } + })); + /* *INDENT-ON* */ + + if (rmp == 0) + { + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs ((VL_API_CLASSIFY_TABLE_INFO_REPLY)); + rmp->context = mp->context; + rmp->retval = ntohl (VNET_API_ERROR_CLASSIFY_TABLE_NOT_FOUND); + } + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +vl_api_classify_session_details_t_handler (vl_api_classify_session_details_t * + mp) +{ + clib_warning ("BUG"); +} + +static void +send_classify_session_details (unix_shared_memory_queue_t * q, + u32 table_id, + u32 match_length, + vnet_classify_entry_t * e, u32 context) +{ + vl_api_classify_session_details_t *rmp; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_CLASSIFY_SESSION_DETAILS); + rmp->context = context; + rmp->table_id = ntohl (table_id); + rmp->hit_next_index = ntohl (e->next_index); + rmp->advance = ntohl (e->advance); + rmp->opaque_index = ntohl (e->opaque_index); + rmp->match_length = ntohl (match_length); + clib_memcpy (rmp->match, e->key, match_length); + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +vl_api_classify_session_dump_t_handler (vl_api_classify_session_dump_t * mp) +{ + vnet_classify_main_t *cm = &vnet_classify_main; + unix_shared_memory_queue_t *q; + + u32 table_id = ntohl (mp->table_id); + vnet_classify_table_t *t; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + return; + + /* *INDENT-OFF* */ + pool_foreach (t, cm->tables, + ({ + if (table_id == t - cm->tables) + { + vnet_classify_bucket_t * b; + vnet_classify_entry_t * v, * save_v; + int i, j, k; + + for (i = 0; i < t->nbuckets; i++) + { + b = &t->buckets [i]; + if (b->offset == 0) + continue; + + save_v = vnet_classify_get_entry (t, b->offset); + for (j = 0; j < (1<log2_pages); j++) + { + for (k = 0; k < t->entries_per_page; k++) + { + v = vnet_classify_entry_at_index + (t, save_v, j*t->entries_per_page + k); + if (vnet_classify_entry_is_free (v)) + continue; + + send_classify_session_details + (q, table_id, t->match_n_vectors * sizeof (u32x4), + v, mp->context); + } + } + } + break; + } + })); + /* *INDENT-ON* */ +} + +static void +vl_api_set_ipfix_exporter_t_handler (vl_api_set_ipfix_exporter_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + flow_report_main_t *frm = &flow_report_main; + vl_api_set_ipfix_exporter_reply_t *rmp; + ip4_address_t collector, src; + u16 collector_port = UDP_DST_PORT_ipfix; + u32 path_mtu; + u32 template_interval; + u8 udp_checksum; + u32 fib_id; + u32 fib_index = ~0; + int rv = 0; + + memcpy (collector.data, mp->collector_address, sizeof (collector.data)); + collector_port = ntohs (mp->collector_port); + if (collector_port == (u16) ~ 0) + collector_port = UDP_DST_PORT_ipfix; + memcpy (src.data, mp->src_address, sizeof (src.data)); + fib_id = ntohl (mp->vrf_id); + + ip4_main_t *im = &ip4_main; + if (fib_id == ~0) + { + fib_index = ~0; + } + else + { + uword *p = hash_get (im->fib_index_by_table_id, fib_id); + if (!p) + { + rv = VNET_API_ERROR_NO_SUCH_FIB; + goto out; + } + fib_index = p[0]; + } + + path_mtu = ntohl (mp->path_mtu); + if (path_mtu == ~0) + path_mtu = 512; // RFC 7011 section 10.3.3. + template_interval = ntohl (mp->template_interval); + if (template_interval == ~0) + template_interval = 20; + udp_checksum = mp->udp_checksum; + + if (collector.as_u32 == 0) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto out; + } + + if (src.as_u32 == 0) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto out; + } + + if (path_mtu > 1450 /* vpp does not support fragmentation */ ) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto out; + } + + if (path_mtu < 68) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto out; + } + + /* Reset report streams if we are reconfiguring IP addresses */ + if (frm->ipfix_collector.as_u32 != collector.as_u32 || + frm->src_address.as_u32 != src.as_u32 || + frm->collector_port != collector_port) + vnet_flow_reports_reset (frm); + + frm->ipfix_collector.as_u32 = collector.as_u32; + frm->collector_port = collector_port; + frm->src_address.as_u32 = src.as_u32; + frm->fib_index = fib_index; + frm->path_mtu = path_mtu; + frm->template_interval = template_interval; + frm->udp_checksum = udp_checksum; + + /* Turn on the flow reporting process */ + vlib_process_signal_event (vm, flow_report_process_node.index, 1, 0); + +out: + REPLY_MACRO (VL_API_SET_IPFIX_EXPORTER_REPLY); +} + +static void +vl_api_ipfix_exporter_dump_t_handler (vl_api_ipfix_exporter_dump_t * mp) +{ + flow_report_main_t *frm = &flow_report_main; + unix_shared_memory_queue_t *q; + vl_api_ipfix_exporter_details_t *rmp; + ip4_main_t *im = &ip4_main; + u32 vrf_id; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + return; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_IPFIX_EXPORTER_DETAILS); + rmp->context = mp->context; + memcpy (rmp->collector_address, frm->ipfix_collector.data, + sizeof (frm->ipfix_collector.data)); + rmp->collector_port = htons (frm->collector_port); + memcpy (rmp->src_address, frm->src_address.data, + sizeof (frm->src_address.data)); + if (frm->fib_index == ~0) + vrf_id = ~0; + else + vrf_id = im->fibs[frm->fib_index].ft_table_id; + rmp->vrf_id = htonl (vrf_id); + rmp->path_mtu = htonl (frm->path_mtu); + rmp->template_interval = htonl (frm->template_interval); + rmp->udp_checksum = (frm->udp_checksum != 0); + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void + vl_api_set_ipfix_classify_stream_t_handler + (vl_api_set_ipfix_classify_stream_t * mp) +{ + vl_api_set_ipfix_classify_stream_reply_t *rmp; + flow_report_classify_main_t *fcm = &flow_report_classify_main; + flow_report_main_t *frm = &flow_report_main; + u32 domain_id = 0; + u32 src_port = UDP_DST_PORT_ipfix; + int rv = 0; + + domain_id = ntohl (mp->domain_id); + src_port = ntohs (mp->src_port); + + if (fcm->src_port != 0 && + (fcm->domain_id != domain_id || fcm->src_port != (u16) src_port)) + { + int rv = vnet_stream_change (frm, fcm->domain_id, fcm->src_port, + domain_id, (u16) src_port); + ASSERT (rv == 0); + } + + fcm->domain_id = domain_id; + fcm->src_port = (u16) src_port; + + REPLY_MACRO (VL_API_SET_IPFIX_CLASSIFY_STREAM_REPLY); +} + +static void + vl_api_ipfix_classify_stream_dump_t_handler + (vl_api_ipfix_classify_stream_dump_t * mp) +{ + flow_report_classify_main_t *fcm = &flow_report_classify_main; + unix_shared_memory_queue_t *q; + vl_api_ipfix_classify_stream_details_t *rmp; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + return; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_IPFIX_CLASSIFY_STREAM_DETAILS); + rmp->context = mp->context; + rmp->domain_id = htonl (fcm->domain_id); + rmp->src_port = htons (fcm->src_port); + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void + vl_api_ipfix_classify_table_add_del_t_handler + (vl_api_ipfix_classify_table_add_del_t * mp) +{ + vl_api_ipfix_classify_table_add_del_reply_t *rmp; + flow_report_classify_main_t *fcm = &flow_report_classify_main; + flow_report_main_t *frm = &flow_report_main; + vnet_flow_report_add_del_args_t args; + ipfix_classify_table_t *table; + int is_add; + u32 classify_table_index; + u8 ip_version; + u8 transport_protocol; + int rv = 0; + + classify_table_index = ntohl (mp->table_id); + ip_version = mp->ip_version; + transport_protocol = mp->transport_protocol; + is_add = mp->is_add; + + if (fcm->src_port == 0) + { + /* call set_ipfix_classify_stream first */ + rv = VNET_API_ERROR_UNSPECIFIED; + goto out; + } + + memset (&args, 0, sizeof (args)); + + table = 0; + int i; + for (i = 0; i < vec_len (fcm->tables); i++) + if (ipfix_classify_table_index_valid (i)) + if (fcm->tables[i].classify_table_index == classify_table_index) + { + table = &fcm->tables[i]; + break; + } + + if (is_add) + { + if (table) + { + rv = VNET_API_ERROR_VALUE_EXIST; + goto out; + } + table = ipfix_classify_add_table (); + table->classify_table_index = classify_table_index; + } + else + { + if (!table) + { + rv = VNET_API_ERROR_NO_SUCH_ENTRY; + goto out; + } + } + + table->ip_version = ip_version; + table->transport_protocol = transport_protocol; + + args.opaque.as_uword = table - fcm->tables; + args.rewrite_callback = ipfix_classify_template_rewrite; + args.flow_data_callback = ipfix_classify_send_flows; + args.is_add = is_add; + args.domain_id = fcm->domain_id; + args.src_port = fcm->src_port; + + rv = vnet_flow_report_add_del (frm, &args); + + /* If deleting, or add failed */ + if (is_add == 0 || (rv && is_add)) + ipfix_classify_delete_table (table - fcm->tables); + +out: + REPLY_MACRO (VL_API_SET_IPFIX_CLASSIFY_STREAM_REPLY); +} + +static void +send_ipfix_classify_table_details (u32 table_index, + unix_shared_memory_queue_t * q, + u32 context) +{ + flow_report_classify_main_t *fcm = &flow_report_classify_main; + vl_api_ipfix_classify_table_details_t *mp; + + ipfix_classify_table_t *table = &fcm->tables[table_index]; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_IPFIX_CLASSIFY_TABLE_DETAILS); + mp->context = context; + mp->table_id = htonl (table->classify_table_index); + mp->ip_version = table->ip_version; + mp->transport_protocol = table->transport_protocol; + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void + vl_api_ipfix_classify_table_dump_t_handler + (vl_api_ipfix_classify_table_dump_t * mp) +{ + flow_report_classify_main_t *fcm = &flow_report_classify_main; + unix_shared_memory_queue_t *q; + u32 i; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (!q) + return; + + for (i = 0; i < vec_len (fcm->tables); i++) + if (ipfix_classify_table_index_valid (i)) + send_ipfix_classify_table_details (i, q, mp->context); +} + +static void +vl_api_pg_create_interface_t_handler (vl_api_pg_create_interface_t * mp) +{ + vl_api_pg_create_interface_reply_t *rmp; + int rv = 0; + + pg_main_t *pg = &pg_main; + u32 pg_if_id = pg_interface_add_or_get (pg, ntohl (mp->interface_id)); + pg_interface_t *pi = pool_elt_at_index (pg->interfaces, pg_if_id); + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_PG_CREATE_INTERFACE_REPLY, + ({ + rmp->sw_if_index = ntohl(pi->sw_if_index); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_pg_capture_t_handler (vl_api_pg_capture_t * mp) +{ + vl_api_pg_capture_reply_t *rmp; + int rv = 0; + + vnet_main_t *vnm = vnet_get_main (); + vnet_interface_main_t *im = &vnm->interface_main; + vnet_hw_interface_t *hi = 0; + + u8 *intf_name = format (0, "pg%d", ntohl (mp->interface_id), 0); + u32 hw_if_index = ~0; + uword *p = hash_get_mem (im->hw_interface_by_name, intf_name); + if (p) + hw_if_index = *p; + vec_free (intf_name); + + if (hw_if_index != ~0) + { + pg_capture_args_t _a, *a = &_a; + + u32 len = ntohl (mp->pcap_name_length); + u8 *pcap_file_name = vec_new (u8, len); + clib_memcpy (pcap_file_name, mp->pcap_file_name, len); + + hi = vnet_get_sup_hw_interface (vnm, hw_if_index); + a->hw_if_index = hw_if_index; + a->dev_instance = hi->dev_instance; + a->is_enabled = mp->is_enabled; + a->pcap_file_name = pcap_file_name; + a->count = ntohl (mp->count); + + clib_error_t *e = pg_capture (a); + if (e) + { + clib_error_report (e); + rv = VNET_API_ERROR_CANNOT_CREATE_PCAP_FILE; + } + + vec_free (pcap_file_name); + } + REPLY_MACRO (VL_API_PG_CAPTURE_REPLY); +} + +static void +vl_api_pg_enable_disable_t_handler (vl_api_pg_enable_disable_t * mp) +{ + vl_api_pg_enable_disable_reply_t *rmp; + int rv = 0; + + pg_main_t *pg = &pg_main; + u32 stream_index = ~0; + + int is_enable = mp->is_enabled != 0; + u32 len = ntohl (mp->stream_name_length) - 1; + + if (len > 0) + { + u8 *stream_name = vec_new (u8, len); + clib_memcpy (stream_name, mp->stream_name, len); + uword *p = hash_get_mem (pg->stream_index_by_name, stream_name); + if (p) + stream_index = *p; + vec_free (stream_name); + } + + pg_enable_disable (stream_index, is_enable); + + REPLY_MACRO (VL_API_PG_ENABLE_DISABLE_REPLY); +} + +static void + vl_api_ip_source_and_port_range_check_add_del_t_handler + (vl_api_ip_source_and_port_range_check_add_del_t * mp) +{ + vl_api_ip_source_and_port_range_check_add_del_reply_t *rmp; + int rv = 0; + + u8 is_ipv6 = mp->is_ipv6; + u8 is_add = mp->is_add; + u8 mask_length = mp->mask_length; + ip4_address_t ip4_addr; + ip6_address_t ip6_addr; + u16 *low_ports = 0; + u16 *high_ports = 0; + u32 vrf_id; + u16 tmp_low, tmp_high; + u8 num_ranges; + int i; + + // Validate port range + num_ranges = mp->number_of_ranges; + if (num_ranges > 32) + { // This is size of array in VPE.API + rv = VNET_API_ERROR_EXCEEDED_NUMBER_OF_RANGES_CAPACITY; + goto reply; + } + + vec_reset_length (low_ports); + vec_reset_length (high_ports); + + for (i = 0; i < num_ranges; i++) + { + tmp_low = mp->low_ports[i]; + tmp_high = mp->high_ports[i]; + // If tmp_low <= tmp_high then only need to check tmp_low = 0 + // If tmp_low <= tmp_high then only need to check tmp_high > 65535 + if (tmp_low > tmp_high || tmp_low == 0 || tmp_high > 65535) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto reply; + } + vec_add1 (low_ports, tmp_low); + vec_add1 (high_ports, tmp_high + 1); + } + + // Validate mask_length + if ((is_ipv6 && mask_length > 128) || (!is_ipv6 && mask_length > 32)) + { + rv = VNET_API_ERROR_ADDRESS_LENGTH_MISMATCH; + goto reply; + } + + vrf_id = ntohl (mp->vrf_id); + + if (vrf_id < 1) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto reply; + } + + + if (is_ipv6) + { + clib_memcpy (ip6_addr.as_u8, mp->address, sizeof (ip6_addr.as_u8)); + rv = ip6_source_and_port_range_check_add_del (&ip6_addr, + mask_length, + vrf_id, + low_ports, + high_ports, is_add); + } + else + { + clib_memcpy (ip4_addr.data, mp->address, sizeof (ip4_addr)); + rv = ip4_source_and_port_range_check_add_del (&ip4_addr, + mask_length, + vrf_id, + low_ports, + high_ports, is_add); + } + +reply: + vec_free (low_ports); + vec_free (high_ports); + REPLY_MACRO (VL_API_IP_SOURCE_AND_PORT_RANGE_CHECK_ADD_DEL_REPLY); +} + +static void + vl_api_ip_source_and_port_range_check_interface_add_del_t_handler + (vl_api_ip_source_and_port_range_check_interface_add_del_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_ip_source_and_port_range_check_interface_add_del_reply_t *rmp; + ip4_main_t *im = &ip4_main; + int rv; + u32 sw_if_index; + u32 fib_index[IP_SOURCE_AND_PORT_RANGE_CHECK_N_PROTOCOLS]; + u32 vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_N_PROTOCOLS]; + uword *p = 0; + int i; + + vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_TCP_OUT] = + ntohl (mp->tcp_out_vrf_id); + vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_UDP_OUT] = + ntohl (mp->udp_out_vrf_id); + vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_TCP_IN] = + ntohl (mp->tcp_in_vrf_id); + vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_UDP_IN] = + ntohl (mp->udp_in_vrf_id); + + + for (i = 0; i < IP_SOURCE_AND_PORT_RANGE_CHECK_N_PROTOCOLS; i++) + { + if (vrf_id[i] != 0 && vrf_id[i] != ~0) + { + p = hash_get (im->fib_index_by_table_id, vrf_id[i]); + + if (p == 0) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto reply; + } + + fib_index[i] = p[0]; + } + else + fib_index[i] = ~0; + } + sw_if_index = ntohl (mp->sw_if_index); + + VALIDATE_SW_IF_INDEX (mp); + + rv = + set_ip_source_and_port_range_check (vm, fib_index, sw_if_index, + mp->is_add); + + BAD_SW_IF_INDEX_LABEL; +reply: + + REPLY_MACRO (VL_API_IP_SOURCE_AND_PORT_RANGE_CHECK_INTERFACE_ADD_DEL_REPLY); +} + +static void +vl_api_ipsec_gre_add_del_tunnel_t_handler (vl_api_ipsec_gre_add_del_tunnel_t * + mp) +{ + vl_api_ipsec_gre_add_del_tunnel_reply_t *rmp; + int rv = 0; + vnet_ipsec_gre_add_del_tunnel_args_t _a, *a = &_a; + u32 sw_if_index = ~0; + + /* Check src & dst are different */ + if (memcmp (mp->src_address, mp->dst_address, 4) == 0) + { + rv = VNET_API_ERROR_SAME_SRC_DST; + goto out; + } + + memset (a, 0, sizeof (*a)); + + /* ip addresses sent in network byte order */ + clib_memcpy (&(a->src), mp->src_address, 4); + clib_memcpy (&(a->dst), mp->dst_address, 4); + a->is_add = mp->is_add; + a->lsa = ntohl (mp->local_sa_id); + a->rsa = ntohl (mp->remote_sa_id); + + rv = vnet_ipsec_gre_add_del_tunnel (a, &sw_if_index); + +out: + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_GRE_ADD_DEL_TUNNEL_REPLY, + ({ + rmp->sw_if_index = ntohl (sw_if_index); + })); + /* *INDENT-ON* */ +} + +static void send_ipsec_gre_tunnel_details + (ipsec_gre_tunnel_t * t, unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_ipsec_gre_tunnel_details_t *rmp; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_IPSEC_GRE_TUNNEL_DETAILS); + clib_memcpy (rmp->src_address, &(t->tunnel_src), 4); + clib_memcpy (rmp->dst_address, &(t->tunnel_dst), 4); + rmp->sw_if_index = htonl (t->sw_if_index); + rmp->local_sa_id = htonl (t->local_sa_id); + rmp->remote_sa_id = htonl (t->remote_sa_id); + rmp->context = context; + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void vl_api_ipsec_gre_tunnel_dump_t_handler + (vl_api_ipsec_gre_tunnel_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + ipsec_gre_main_t *igm = &ipsec_gre_main; + ipsec_gre_tunnel_t *t; + u32 sw_if_index; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + sw_if_index = ntohl (mp->sw_if_index); + + if (~0 == sw_if_index) + { + /* *INDENT-OFF* */ + pool_foreach (t, igm->tunnels, + ({ + send_ipsec_gre_tunnel_details(t, q, mp->context); + })); + /* *INDENT-ON* */ + } + else + { + if ((sw_if_index >= vec_len (igm->tunnel_index_by_sw_if_index)) || + (~0 == igm->tunnel_index_by_sw_if_index[sw_if_index])) + { + return; + } + t = &igm->tunnels[igm->tunnel_index_by_sw_if_index[sw_if_index]]; + send_ipsec_gre_tunnel_details (t, q, mp->context); + } +} + +static void +vl_api_delete_subif_t_handler (vl_api_delete_subif_t * mp) +{ + vl_api_delete_subif_reply_t *rmp; + int rv; + + rv = vnet_delete_sub_interface (ntohl (mp->sw_if_index)); + + REPLY_MACRO (VL_API_DELETE_SUBIF_REPLY); +} + +static void + vl_api_l2_interface_pbb_tag_rewrite_t_handler + (vl_api_l2_interface_pbb_tag_rewrite_t * mp) +{ + vl_api_l2_interface_pbb_tag_rewrite_reply_t *rmp; + vnet_main_t *vnm = vnet_get_main (); + vlib_main_t *vm = vlib_get_main (); + u32 vtr_op; + int rv = 0; + + VALIDATE_SW_IF_INDEX (mp); + + vtr_op = ntohl (mp->vtr_op); + + switch (vtr_op) + { + case L2_VTR_DISABLED: + case L2_VTR_PUSH_2: + case L2_VTR_POP_2: + case L2_VTR_TRANSLATE_2_1: + break; + + default: + rv = VNET_API_ERROR_INVALID_VALUE; + goto bad_sw_if_index; + } + + rv = l2pbb_configure (vm, vnm, ntohl (mp->sw_if_index), vtr_op, + mp->b_dmac, mp->b_smac, ntohs (mp->b_vlanid), + ntohl (mp->i_sid), ntohs (mp->outer_tag)); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_L2_INTERFACE_PBB_TAG_REWRITE_REPLY); + +} + +static void +vl_api_bfd_udp_add_t_handler (vl_api_bfd_udp_add_t * mp) +{ + vl_api_bfd_udp_add_reply_t *rmp; + int rv; + + VALIDATE_SW_IF_INDEX (mp); + + ip46_address_t local_addr; + memset (&local_addr, 0, sizeof (local_addr)); + ip46_address_t peer_addr; + memset (&peer_addr, 0, sizeof (peer_addr)); + if (mp->is_ipv6) + { + clib_memcpy (&local_addr.ip6, mp->local_addr, sizeof (local_addr.ip6)); + clib_memcpy (&peer_addr.ip6, mp->peer_addr, sizeof (peer_addr.ip6)); + } + else + { + clib_memcpy (&local_addr.ip4, mp->local_addr, sizeof (local_addr.ip4)); + clib_memcpy (&peer_addr.ip4, mp->peer_addr, sizeof (peer_addr.ip4)); + } + + rv = bfd_udp_add_session (clib_net_to_host_u32 (mp->sw_if_index), + clib_net_to_host_u32 (mp->desired_min_tx), + clib_net_to_host_u32 (mp->required_min_rx), + mp->detect_mult, &local_addr, &peer_addr); + + BAD_SW_IF_INDEX_LABEL; + REPLY_MACRO (VL_API_BFD_UDP_ADD_REPLY); +} + +static void +vl_api_bfd_udp_del_t_handler (vl_api_bfd_udp_del_t * mp) +{ + vl_api_bfd_udp_del_reply_t *rmp; + int rv; + + VALIDATE_SW_IF_INDEX (mp); + + ip46_address_t local_addr; + memset (&local_addr, 0, sizeof (local_addr)); + ip46_address_t peer_addr; + memset (&peer_addr, 0, sizeof (peer_addr)); + if (mp->is_ipv6) + { + clib_memcpy (&local_addr.ip6, mp->local_addr, sizeof (local_addr.ip6)); + clib_memcpy (&peer_addr.ip6, mp->peer_addr, sizeof (peer_addr.ip6)); + } + else + { + clib_memcpy (&local_addr.ip4, mp->local_addr, sizeof (local_addr.ip4)); + clib_memcpy (&peer_addr.ip4, mp->peer_addr, sizeof (peer_addr.ip4)); + } + + rv = + bfd_udp_del_session (clib_net_to_host_u32 (mp->sw_if_index), &local_addr, + &peer_addr); + + BAD_SW_IF_INDEX_LABEL; + REPLY_MACRO (VL_API_BFD_UDP_DEL_REPLY); +} + +void +send_bfd_udp_session_details (unix_shared_memory_queue_t * q, u32 context, + bfd_session_t * bs) +{ + if (bs->transport != BFD_TRANSPORT_UDP4 && + bs->transport != BFD_TRANSPORT_UDP6) + { + return; + } + + vl_api_bfd_udp_session_details_t *mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_BFD_UDP_SESSION_DETAILS); + mp->context = context; + mp->bs_index = clib_host_to_net_u32 (bs->bs_idx); + mp->state = bs->local_state; + bfd_udp_session_t *bus = &bs->udp; + bfd_udp_key_t *key = &bus->key; + mp->sw_if_index = clib_host_to_net_u32 (key->sw_if_index); + mp->is_ipv6 = !(ip46_address_is_ip4 (&key->local_addr)); + if (mp->is_ipv6) + { + clib_memcpy (mp->local_addr, &key->local_addr, + sizeof (key->local_addr)); + clib_memcpy (mp->peer_addr, &key->peer_addr, sizeof (key->peer_addr)); + } + else + { + clib_memcpy (mp->local_addr, key->local_addr.ip4.data, + sizeof (key->local_addr.ip4.data)); + clib_memcpy (mp->peer_addr, key->peer_addr.ip4.data, + sizeof (key->peer_addr.ip4.data)); + } + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +void +bfd_event (bfd_main_t * bm, bfd_session_t * bs) +{ + vpe_api_main_t *vam = &vpe_api_main; + vpe_client_registration_t *reg; + unix_shared_memory_queue_t *q; + /* *INDENT-OFF* */ + pool_foreach (reg, vam->bfd_events_registrations, ({ + q = vl_api_client_index_to_input_queue (reg->client_index); + if (q) + { + switch (bs->transport) + { + case BFD_TRANSPORT_UDP4: + /* fallthrough */ + case BFD_TRANSPORT_UDP6: + send_bfd_udp_session_details (q, 0, bs); + } + } + })); + /* *INDENT-ON* */ +} + +static void +vl_api_bfd_udp_session_dump_t_handler (vl_api_bfd_udp_session_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + + q = vl_api_client_index_to_input_queue (mp->client_index); + + if (q == 0) + return; + + bfd_session_t *bs = NULL; + /* *INDENT-OFF* */ + pool_foreach (bs, bfd_main.sessions, ({ + if (bs->transport == BFD_TRANSPORT_UDP4 || + bs->transport == BFD_TRANSPORT_UDP6) + send_bfd_udp_session_details (q, mp->context, bs); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_bfd_session_set_flags_t_handler (vl_api_bfd_session_set_flags_t * mp) +{ + vl_api_bfd_session_set_flags_reply_t *rmp; + int rv; + + rv = + bfd_session_set_flags (clib_net_to_host_u32 (mp->bs_index), + mp->admin_up_down); + + REPLY_MACRO (VL_API_BFD_SESSION_SET_FLAGS_REPLY); +} + +static void +vl_api_punt_t_handler (vl_api_punt_t * mp) +{ + vl_api_punt_reply_t *rmp; + vlib_main_t *vm = vlib_get_main (); + int rv = 0; + clib_error_t *error; + + error = vnet_punt_add_del (vm, mp->ipv, mp->l4_protocol, + ntohs (mp->l4_port), mp->is_add); + if (error) + { + rv = -1; + clib_error_report (error); + } + + REPLY_MACRO (VL_API_PUNT_REPLY); +} + +static void + vl_api_flow_classify_set_interface_t_handler + (vl_api_flow_classify_set_interface_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_flow_classify_set_interface_reply_t *rmp; + int rv; + u32 sw_if_index, ip4_table_index, ip6_table_index; + + ip4_table_index = ntohl (mp->ip4_table_index); + ip6_table_index = ntohl (mp->ip6_table_index); + sw_if_index = ntohl (mp->sw_if_index); + + VALIDATE_SW_IF_INDEX (mp); + + rv = vnet_set_flow_classify_intfc (vm, sw_if_index, ip4_table_index, + ip6_table_index, mp->is_add); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_FLOW_CLASSIFY_SET_INTERFACE_REPLY); +} + +static void +send_flow_classify_details (u32 sw_if_index, + u32 table_index, + unix_shared_memory_queue_t * q, u32 context) +{ + vl_api_flow_classify_details_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_FLOW_CLASSIFY_DETAILS); + mp->context = context; + mp->sw_if_index = htonl (sw_if_index); + mp->table_index = htonl (table_index); + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +vl_api_flow_classify_dump_t_handler (vl_api_flow_classify_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + flow_classify_main_t *pcm = &flow_classify_main; + u32 *vec_tbl; + int i; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + vec_tbl = pcm->classify_table_index_by_sw_if_index[mp->type]; + + if (vec_len (vec_tbl)) + { + for (i = 0; i < vec_len (vec_tbl); i++) + { + if (vec_elt (vec_tbl, i) == ~0) + continue; + + send_flow_classify_details (i, vec_elt (vec_tbl, i), q, + mp->context); + } + } +} + +static void +send_ipsec_spd_details (ipsec_policy_t * p, unix_shared_memory_queue_t * q, + u32 context) +{ + vl_api_ipsec_spd_details_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_IPSEC_SPD_DETAILS); + mp->context = context; + + mp->spd_id = htonl (p->id); + mp->priority = htonl (p->priority); + mp->is_outbound = p->is_outbound; + mp->is_ipv6 = p->is_ipv6; + if (p->is_ipv6) + { + memcpy (mp->local_start_addr, &p->laddr.start.ip6, 16); + memcpy (mp->local_stop_addr, &p->laddr.stop.ip6, 16); + memcpy (mp->remote_start_addr, &p->raddr.start.ip6, 16); + memcpy (mp->remote_stop_addr, &p->raddr.stop.ip6, 16); + } + else + { + memcpy (mp->local_start_addr, &p->laddr.start.ip4, 4); + memcpy (mp->local_stop_addr, &p->laddr.stop.ip4, 4); + memcpy (mp->remote_start_addr, &p->raddr.start.ip4, 4); + memcpy (mp->remote_stop_addr, &p->raddr.stop.ip4, 4); + } + mp->local_start_port = htons (p->lport.start); + mp->local_stop_port = htons (p->lport.stop); + mp->remote_start_port = htons (p->rport.start); + mp->remote_stop_port = htons (p->rport.stop); + mp->protocol = p->protocol; + mp->policy = p->policy; + mp->sa_id = htonl (p->sa_id); + mp->bytes = clib_host_to_net_u64 (p->counter.bytes); + mp->packets = clib_host_to_net_u64 (p->counter.packets); + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static void +vl_api_ipsec_spd_dump_t_handler (vl_api_ipsec_spd_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + ipsec_main_t *im = &ipsec_main; + ipsec_policy_t *policy; + ipsec_spd_t *spd; + uword *p; + u32 spd_index; +#if IPSEC > 0 + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + p = hash_get (im->spd_index_by_spd_id, ntohl (mp->spd_id)); + if (!p) + return; + + spd_index = p[0]; + spd = pool_elt_at_index (im->spds, spd_index); + + /* *INDENT-OFF* */ + pool_foreach (policy, spd->policies, + ({ + if (mp->sa_id == ~(0) || ntohl (mp->sa_id) == policy->sa_id) + send_ipsec_spd_details (policy, q, + mp->context);} + )); + /* *INDENT-ON* */ +#else + clib_warning ("unimplemented"); +#endif +} + +static void +vl_api_feature_enable_disable_t_handler (vl_api_feature_enable_disable_t * mp) +{ + vl_api_feature_enable_disable_reply_t *rmp; + int rv = 0; + u8 *arc_name, *feature_name; + + VALIDATE_SW_IF_INDEX (mp); + + arc_name = format (0, "%s%c", mp->arc_name, 0); + feature_name = format (0, "%s%c", mp->feature_name, 0); + + vnet_feature_registration_t *reg; + reg = + vnet_get_feature_reg ((const char *) arc_name, + (const char *) feature_name); + if (reg == 0) + rv = VNET_API_ERROR_INVALID_VALUE; + else + { + u32 sw_if_index; + clib_error_t *error = 0; + + sw_if_index = ntohl (mp->sw_if_index); + if (reg->enable_disable_cb) + error = reg->enable_disable_cb (sw_if_index, mp->enable); + if (!error) + vnet_feature_enable_disable ((const char *) arc_name, + (const char *) feature_name, + sw_if_index, mp->enable, 0, 0); + else + { + clib_error_report (error); + rv = VNET_API_ERROR_CANNOT_ENABLE_DISABLE_FEATURE; + } + } + + vec_free (feature_name); + vec_free (arc_name); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_FEATURE_ENABLE_DISABLE_REPLY); +} + +#define BOUNCE_HANDLER(nn) \ +static void vl_api_##nn##_t_handler ( \ + vl_api_##nn##_t *mp) \ +{ \ + vpe_client_registration_t *reg; \ + vpe_api_main_t * vam = &vpe_api_main; \ + unix_shared_memory_queue_t * q; \ + \ + /* One registration only... */ \ + pool_foreach(reg, vam->nn##_registrations, \ + ({ \ + q = vl_api_client_index_to_input_queue (reg->client_index); \ + if (q) { \ + /* \ + * If the queue is stuffed, turf the msg and complain \ + * It's unlikely that the intended recipient is \ + * alive; avoid deadlock at all costs. \ + */ \ + if (q->cursize == q->maxsize) { \ + clib_warning ("ERROR: receiver queue full, drop msg"); \ + vl_msg_api_free (mp); \ + return; \ + } \ + vl_msg_api_send_shmem (q, (u8 *)&mp); \ + return; \ + } \ + })); \ + vl_msg_api_free (mp); \ +} + +static void setup_message_id_table (api_main_t * am); + +/* + * vpe_api_hookup + * Add vpe's API message handlers to the table. + * vlib has alread mapped shared memory and + * added the client registration handlers. + * See .../open-repo/vlib/memclnt_vlib.c:memclnt_process() + */ +static clib_error_t * +vpe_api_hookup (vlib_main_t * vm) +{ + api_main_t *am = &api_main; + +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler, \ + vl_noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 1); + foreach_vpe_api_msg; +#undef _ + + /* + * Manually register the sr tunnel add del msg, so we trace + * enough bytes to capture a typical segment list + */ + vl_msg_api_set_handlers (VL_API_SR_TUNNEL_ADD_DEL, + "sr_tunnel_add_del", + vl_api_sr_tunnel_add_del_t_handler, + vl_noop_handler, + vl_api_sr_tunnel_add_del_t_endian, + vl_api_sr_tunnel_add_del_t_print, 256, 1); + + + /* + * Manually register the sr policy add del msg, so we trace + * enough bytes to capture a typical tunnel name list + */ + vl_msg_api_set_handlers (VL_API_SR_POLICY_ADD_DEL, + "sr_policy_add_del", + vl_api_sr_policy_add_del_t_handler, + vl_noop_handler, + vl_api_sr_policy_add_del_t_endian, + vl_api_sr_policy_add_del_t_print, 256, 1); + + /* + * Trace space for 8 MPLS encap labels, classifier mask+match + */ + am->api_trace_cfg[VL_API_MPLS_TUNNEL_ADD_DEL].size += 8 * sizeof (u32); + am->api_trace_cfg[VL_API_CLASSIFY_ADD_DEL_TABLE].size += 5 * sizeof (u32x4); + am->api_trace_cfg[VL_API_CLASSIFY_ADD_DEL_SESSION].size + += 5 * sizeof (u32x4); + am->api_trace_cfg[VL_API_VXLAN_ADD_DEL_TUNNEL].size += 16 * sizeof (u32); + + /* + * Thread-safe API messages + */ + am->is_mp_safe[VL_API_IP_ADD_DEL_ROUTE] = 1; + am->is_mp_safe[VL_API_GET_NODE_GRAPH] = 1; + + /* + * Set up the (msg_name, crc, message-id) table + */ + setup_message_id_table (am); + + return 0; +} + +VLIB_API_INIT_FUNCTION (vpe_api_hookup); + +static clib_error_t * +vpe_api_init (vlib_main_t * vm) +{ + vpe_api_main_t *am = &vpe_api_main; + + am->vlib_main = vm; + am->vnet_main = vnet_get_main (); + am->interface_events_registration_hash = hash_create (0, sizeof (uword)); + am->to_netconf_server_registration_hash = hash_create (0, sizeof (uword)); + am->from_netconf_server_registration_hash = hash_create (0, sizeof (uword)); + am->to_netconf_client_registration_hash = hash_create (0, sizeof (uword)); + am->from_netconf_client_registration_hash = hash_create (0, sizeof (uword)); + am->oam_events_registration_hash = hash_create (0, sizeof (uword)); + am->bfd_events_registration_hash = hash_create (0, sizeof (uword)); + + vl_api_init (vm); + vl_set_memory_region_name ("/vpe-api"); + vl_enable_disable_memory_api (vm, 1 /* enable it */ ); + + return 0; +} + +VLIB_INIT_FUNCTION (vpe_api_init); + + +static clib_error_t * +api_segment_config (vlib_main_t * vm, unformat_input_t * input) +{ + u8 *chroot_path; + u64 baseva, size, pvt_heap_size; + int uid, gid, rv; + const int max_buf_size = 4096; + char *s, *buf; + struct passwd _pw, *pw; + struct group _grp, *grp; + clib_error_t *e; + buf = vec_new (char, 128); + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "prefix %s", &chroot_path)) + { + vec_add1 (chroot_path, 0); + vl_set_memory_root_path ((char *) chroot_path); + } + else if (unformat (input, "uid %d", &uid)) + vl_set_memory_uid (uid); + else if (unformat (input, "gid %d", &gid)) + vl_set_memory_gid (gid); + else if (unformat (input, "baseva %llx", &baseva)) + vl_set_global_memory_baseva (baseva); + else if (unformat (input, "global-size %lldM", &size)) + vl_set_global_memory_size (size * (1ULL << 20)); + else if (unformat (input, "global-size %lldG", &size)) + vl_set_global_memory_size (size * (1ULL << 30)); + else if (unformat (input, "global-size %lld", &size)) + vl_set_global_memory_size (size); + else if (unformat (input, "global-pvt-heap-size %lldM", &pvt_heap_size)) + vl_set_global_pvt_heap_size (pvt_heap_size * (1ULL << 20)); + else if (unformat (input, "global-pvt-heap-size size %lld", + &pvt_heap_size)) + vl_set_global_pvt_heap_size (pvt_heap_size); + else if (unformat (input, "api-pvt-heap-size %lldM", &pvt_heap_size)) + vl_set_api_pvt_heap_size (pvt_heap_size * (1ULL << 20)); + else if (unformat (input, "api-pvt-heap-size size %lld", + &pvt_heap_size)) + vl_set_api_pvt_heap_size (pvt_heap_size); + else if (unformat (input, "api-size %lldM", &size)) + vl_set_api_memory_size (size * (1ULL << 20)); + else if (unformat (input, "api-size %lldG", &size)) + vl_set_api_memory_size (size * (1ULL << 30)); + else if (unformat (input, "api-size %lld", &size)) + vl_set_api_memory_size (size); + else if (unformat (input, "uid %s", &s)) + { + /* lookup the username */ + pw = NULL; + while (((rv = + getpwnam_r (s, &_pw, buf, vec_len (buf), &pw)) == ERANGE) + && (vec_len (buf) <= max_buf_size)) + { + vec_resize (buf, vec_len (buf) * 2); + } + if (rv < 0) + { + e = clib_error_return_code (0, rv, + CLIB_ERROR_ERRNO_VALID | + CLIB_ERROR_FATAL, + "cannot fetch username %s", s); + vec_free (s); + vec_free (buf); + return e; + } + if (pw == NULL) + { + e = + clib_error_return_fatal (0, "username %s does not exist", s); + vec_free (s); + vec_free (buf); + return e; + } + vec_free (s); + vl_set_memory_uid (pw->pw_uid); + } + else if (unformat (input, "gid %s", &s)) + { + /* lookup the group name */ + grp = NULL; + while (((rv = + getgrnam_r (s, &_grp, buf, vec_len (buf), &grp)) == ERANGE) + && (vec_len (buf) <= max_buf_size)) + { + vec_resize (buf, vec_len (buf) * 2); + } + if (rv != 0) + { + e = clib_error_return_code (0, rv, + CLIB_ERROR_ERRNO_VALID | + CLIB_ERROR_FATAL, + "cannot fetch group %s", s); + vec_free (s); + vec_free (buf); + return e; + } + if (grp == NULL) + { + e = clib_error_return_fatal (0, "group %s does not exist", s); + vec_free (s); + vec_free (buf); + return e; + } + vec_free (s); + vec_free (buf); + vl_set_memory_gid (grp->gr_gid); + } + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + return 0; +} + +VLIB_EARLY_CONFIG_FUNCTION (api_segment_config, "api-segment"); + +void * +get_unformat_vnet_sw_interface (void) +{ + return (void *) &unformat_vnet_sw_interface; +} + +static u8 * +format_arp_event (u8 * s, va_list * args) +{ + vl_api_ip4_arp_event_t *event = va_arg (*args, vl_api_ip4_arp_event_t *); + + s = format (s, "pid %d: ", event->pid); + if (event->mac_ip) + s = format (s, "bd mac/ip4 binding events"); + else + s = format (s, "resolution for %U", format_ip4_address, &event->address); + return s; +} + +static u8 * +format_nd_event (u8 * s, va_list * args) +{ + vl_api_ip6_nd_event_t *event = va_arg (*args, vl_api_ip6_nd_event_t *); + + s = format (s, "pid %d: ", event->pid); + if (event->mac_ip) + s = format (s, "bd mac/ip6 binding events"); + else + s = format (s, "resolution for %U", format_ip6_address, event->address); + return s; +} + +static clib_error_t * +show_ip_arp_nd_events_fn (vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) +{ + vpe_api_main_t *am = &vpe_api_main; + vl_api_ip4_arp_event_t *arp_event; + vl_api_ip6_nd_event_t *nd_event; + + if ((pool_elts (am->arp_events) == 0) && (pool_elts (am->nd_events) == 0)) + { + vlib_cli_output (vm, "No active arp or nd event registrations"); + return 0; + } + + /* *INDENT-OFF* */ + pool_foreach (arp_event, am->arp_events, + ({ + vlib_cli_output (vm, "%U", format_arp_event, arp_event); + })); + + pool_foreach (nd_event, am->nd_events, + ({ + vlib_cli_output (vm, "%U", format_nd_event, nd_event); + })); + /* *INDENT-ON* */ + + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_ip_arp_nd_events, static) = { + .path = "show arp-nd-event registrations", + .function = show_ip_arp_nd_events_fn, + .short_help = "Show ip4 arp and ip6 nd event registrations", +}; +/* *INDENT-ON* */ + +#define vl_msg_name_crc_list +#include +#undef vl_msg_name_crc_list + +static void +setup_message_id_table (api_main_t * am) +{ +#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id); + foreach_vl_msg_name_crc_memclnt; + foreach_vl_msg_name_crc_vpe; +#undef _ +} + + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vpp/vpp/vpp-api/custom_dump.c b/vpp/vpp/vpp-api/custom_dump.c new file mode 100644 index 00000000..bfebf49f --- /dev/null +++ b/vpp/vpp/vpp-api/custom_dump.c @@ -0,0 +1,3139 @@ +/* + *------------------------------------------------------------------ + * custom_dump.c - pretty-print API messages for replay + * + * Copyright (c) 2014-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include + +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs + +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun + +#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) + +#define FINISH \ + vec_add1 (s, 0); \ + vl_print (handle, (char *)s); \ + vec_free (s); \ + return handle; + + +static void *vl_api_create_loopback_t_print + (vl_api_create_loopback_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: create_loopback "); + s = format (s, "mac %U ", format_ethernet_address, &mp->mac_address); + + FINISH; +} + +static void *vl_api_delete_loopback_t_print + (vl_api_delete_loopback_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: delete_loopback "); + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + FINISH; +} + +static void *vl_api_sw_interface_set_flags_t_print + (vl_api_sw_interface_set_flags_t * mp, void *handle) +{ + u8 *s; + s = format (0, "SCRIPT: sw_interface_set_flags "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + if (mp->admin_up_down) + s = format (s, "admin-up "); + else + s = format (s, "admin-down "); + + if (mp->link_up_down) + s = format (s, "link-up"); + else + s = format (s, "link-down"); + + FINISH; +} + +static void *vl_api_sw_interface_add_del_address_t_print + (vl_api_sw_interface_add_del_address_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_add_del_address "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + if (mp->is_ipv6) + s = format (s, "%U/%d ", format_ip6_address, + (ip6_address_t *) mp->address, mp->address_length); + else + s = format (s, "%U/%d ", format_ip4_address, + (ip4_address_t *) mp->address, mp->address_length); + + if (mp->is_add == 0) + s = format (s, "del "); + if (mp->del_all) + s = format (s, "del-all "); + + FINISH; +} + +static void *vl_api_sw_interface_set_table_t_print + (vl_api_sw_interface_set_table_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_set_table "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + if (mp->vrf_id) + s = format (s, "vrf %d ", ntohl (mp->vrf_id)); + + if (mp->is_ipv6) + s = format (s, "ipv6 "); + + FINISH; +} + +static void *vl_api_sw_interface_set_mpls_enable_t_print + (vl_api_sw_interface_set_mpls_enable_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_set_mpls_enable "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + if (mp->enable == 0) + s = format (s, "disable"); + + FINISH; +} + +static void *vl_api_sw_interface_set_vpath_t_print + (vl_api_sw_interface_set_vpath_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_set_vpath "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + if (mp->enable) + s = format (s, "enable "); + else + s = format (s, "disable "); + + FINISH; +} + +static void *vl_api_sw_interface_set_vxlan_bypass_t_print + (vl_api_sw_interface_set_vxlan_bypass_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_set_vxlan_bypass "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + if (mp->is_ipv6) + s = format (s, "ip6"); + + if (mp->enable) + s = format (s, "enable "); + else + s = format (s, "disable "); + + FINISH; +} + +static void *vl_api_sw_interface_set_l2_xconnect_t_print + (vl_api_sw_interface_set_l2_xconnect_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_set_l2_xconnect "); + + s = format (s, "sw_if_index %d ", ntohl (mp->rx_sw_if_index)); + + if (mp->enable) + { + s = format (s, "tx_sw_if_index %d ", ntohl (mp->tx_sw_if_index)); + } + else + s = format (s, "delete "); + + FINISH; +} + +static void *vl_api_sw_interface_set_l2_bridge_t_print + (vl_api_sw_interface_set_l2_bridge_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_set_l2_bridge "); + + s = format (s, "sw_if_index %d ", ntohl (mp->rx_sw_if_index)); + + if (mp->enable) + { + s = format (s, "bd_id %d shg %d %senable ", ntohl (mp->bd_id), + mp->shg, ((mp->bvi) ? "bvi " : " ")); + } + else + s = format (s, "disable "); + + FINISH; +} + +static void *vl_api_sw_interface_set_dpdk_hqos_pipe_t_print + (vl_api_sw_interface_set_dpdk_hqos_pipe_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_set_dpdk_hqos_pipe "); + + s = format (s, "sw_if_index %u ", ntohl (mp->sw_if_index)); + + s = format (s, "subport %u pipe %u profile %u ", + ntohl (mp->subport), ntohl (mp->pipe), ntohl (mp->profile)); + + FINISH; +} + +static void *vl_api_sw_interface_set_dpdk_hqos_subport_t_print + (vl_api_sw_interface_set_dpdk_hqos_subport_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_set_dpdk_hqos_subport "); + + s = format (s, "sw_if_index %u ", ntohl (mp->sw_if_index)); + + s = + format (s, + "subport %u rate %u bkt_size %u tc0 %u tc1 %u tc2 %u tc3 %u period %u", + ntohl (mp->subport), ntohl (mp->tb_rate), ntohl (mp->tb_size), + ntohl (mp->tc_rate[0]), ntohl (mp->tc_rate[1]), + ntohl (mp->tc_rate[2]), ntohl (mp->tc_rate[3]), + ntohl (mp->tc_period)); + + FINISH; +} + +static void *vl_api_sw_interface_set_dpdk_hqos_tctbl_t_print + (vl_api_sw_interface_set_dpdk_hqos_tctbl_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_set_dpdk_hqos_tctbl "); + + s = format (s, "sw_if_index %u ", ntohl (mp->sw_if_index)); + + s = format (s, "entry %u tc %u queue %u", + ntohl (mp->entry), ntohl (mp->tc), ntohl (mp->queue)); + + FINISH; +} + +static void *vl_api_bridge_domain_add_del_t_print + (vl_api_bridge_domain_add_del_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: bridge_domain_add_del "); + + s = format (s, "bd_id %d ", ntohl (mp->bd_id)); + + if (mp->is_add) + { + s = format (s, "flood %d uu-flood %d forward %d learn %d arp-term %d", + mp->flood, mp->uu_flood, mp->forward, mp->learn, + mp->arp_term); + } + else + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_bridge_domain_dump_t_print + (vl_api_bridge_domain_dump_t * mp, void *handle) +{ + u8 *s; + u32 bd_id = ntohl (mp->bd_id); + + s = format (0, "SCRIPT: bridge_domain_dump "); + + if (bd_id != ~0) + s = format (s, "bd_id %d ", bd_id); + + FINISH; +} + +static void *vl_api_l2fib_add_del_t_print + (vl_api_l2fib_add_del_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: l2fib_add_del "); + + s = format (s, "mac %U ", format_ethernet_address, &mp->mac); + + s = format (s, "bd_id %d ", ntohl (mp->bd_id)); + + + if (mp->is_add) + { + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + if (mp->static_mac) + s = format (s, "%s", "static "); + if (mp->filter_mac) + s = format (s, "%s", "filter "); + if (mp->bvi_mac) + s = format (s, "%s", "bvi "); + } + else + { + s = format (s, "del "); + } + + FINISH; +} + +static void * +vl_api_l2_flags_t_print (vl_api_l2_flags_t * mp, void *handle) +{ + u8 *s; + u32 flags = ntohl (mp->feature_bitmap); + + s = format (0, "SCRIPT: l2_flags "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + +#define _(a,b) \ + if (flags & L2INPUT_FEAT_ ## a) s = format (s, #a " "); + foreach_l2input_feat; +#undef _ + + FINISH; +} + +static void *vl_api_bridge_flags_t_print + (vl_api_bridge_flags_t * mp, void *handle) +{ + u8 *s; + u32 flags = ntohl (mp->feature_bitmap); + + s = format (0, "SCRIPT: bridge_flags "); + + s = format (s, "bd_id %d ", ntohl (mp->bd_id)); + + if (flags & L2_LEARN) + s = format (s, "learn "); + if (flags & L2_FWD) + s = format (s, "forward "); + if (flags & L2_FLOOD) + s = format (s, "flood "); + if (flags & L2_UU_FLOOD) + s = format (s, "uu-flood "); + if (flags & L2_ARP_TERM) + s = format (s, "arp-term "); + + if (mp->is_set == 0) + s = format (s, "clear "); + + FINISH; +} + +static void *vl_api_bd_ip_mac_add_del_t_print + (vl_api_bd_ip_mac_add_del_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: bd_ip_mac_add_del "); + s = format (s, "bd_id %d ", ntohl (mp->bd_id)); + + if (mp->is_ipv6) + s = format (s, "%U ", format_ip6_address, + (ip6_address_t *) mp->ip_address); + else + s = format (s, "%U ", format_ip4_address, + (ip4_address_t *) mp->ip_address); + + s = format (s, "%U ", format_ethernet_address, mp->mac_address); + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_tap_connect_t_print + (vl_api_tap_connect_t * mp, void *handle) +{ + u8 *s; + u8 null_mac[6]; + + memset (null_mac, 0, sizeof (null_mac)); + + s = format (0, "SCRIPT: tap_connect "); + s = format (s, "tapname %s ", mp->tap_name); + if (mp->use_random_mac) + s = format (s, "random-mac "); + if (mp->tag[0]) + s = format (s, "tag %s ", mp->tag); + if (memcmp (mp->mac_address, null_mac, 6)) + s = format (s, "mac %U ", format_ethernet_address, mp->mac_address); + + FINISH; +} + +static void *vl_api_tap_modify_t_print + (vl_api_tap_modify_t * mp, void *handle) +{ + u8 *s; + u8 null_mac[6]; + + memset (null_mac, 0, sizeof (null_mac)); + + s = format (0, "SCRIPT: tap_modify "); + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + s = format (s, "tapname %s ", mp->tap_name); + if (mp->use_random_mac) + s = format (s, "random-mac "); + + if (memcmp (mp->mac_address, null_mac, 6)) + s = format (s, "mac %U ", format_ethernet_address, mp->mac_address); + + FINISH; +} + +static void *vl_api_tap_delete_t_print + (vl_api_tap_delete_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: tap_delete "); + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + FINISH; +} + +static void *vl_api_sw_interface_tap_dump_t_print + (vl_api_sw_interface_tap_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_tap_dump "); + + FINISH; +} + + +static void *vl_api_ip_add_del_route_t_print + (vl_api_ip_add_del_route_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: ip_add_del_route "); + if (mp->is_add == 0) + s = format (s, "del "); + + if (mp->next_hop_sw_if_index) + s = format (s, "sw_if_index %d ", ntohl (mp->next_hop_sw_if_index)); + + if (mp->is_ipv6) + s = format (s, "%U/%d ", format_ip6_address, mp->dst_address, + mp->dst_address_length); + else + s = format (s, "%U/%d ", format_ip4_address, mp->dst_address, + mp->dst_address_length); + if (mp->is_local) + s = format (s, "local "); + else if (mp->is_drop) + s = format (s, "drop "); + else if (mp->is_classify) + s = format (s, "classify %d", ntohl (mp->classify_table_index)); + else + { + if (mp->is_ipv6) + s = format (s, "via %U ", format_ip6_address, mp->next_hop_address); + else + s = format (s, "via %U ", format_ip4_address, mp->next_hop_address); + } + + if (mp->table_id != 0) + s = format (s, "vrf %d ", ntohl (mp->table_id)); + + if (mp->create_vrf_if_needed) + s = format (s, "create-vrf "); + + if (mp->next_hop_weight != 1) + s = format (s, "weight %d ", mp->next_hop_weight); + + if (mp->not_last) + s = format (s, "not-last "); + + if (mp->is_multipath) + s = format (s, "multipath "); + + if (mp->is_multipath) + s = format (s, "multipath "); + + if (mp->next_hop_table_id) + s = format (s, "lookup-in-vrf %d ", ntohl (mp->next_hop_table_id)); + + FINISH; +} + +static void *vl_api_proxy_arp_add_del_t_print + (vl_api_proxy_arp_add_del_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: proxy_arp_add_del "); + + s = format (s, "%U - %U ", format_ip4_address, mp->low_address, + format_ip4_address, mp->hi_address); + + if (mp->vrf_id) + s = format (s, "vrf %d ", ntohl (mp->vrf_id)); + + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_proxy_arp_intfc_enable_disable_t_print + (vl_api_proxy_arp_intfc_enable_disable_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: proxy_arp_intfc_enable_disable "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + s = format (s, "enable %d ", mp->enable_disable); + + FINISH; +} + +static void *vl_api_mpls_tunnel_add_del_t_print + (vl_api_mpls_tunnel_add_del_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: mpls_tunnel_add_del "); + + if (mp->mt_next_hop_sw_if_index) + s = format (s, "sw_if_index %d ", ntohl (mp->mt_next_hop_sw_if_index)); + + if (mp->mt_next_hop_proto_is_ip4) + s = format (s, "%U ", format_ip4_address, mp->mt_next_hop); + else + s = format (s, "%U ", format_ip6_address, mp->mt_next_hop); + + if (mp->mt_l2_only) + s = format (s, "l2-only "); + + if (mp->mt_is_add == 0) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_sw_interface_set_unnumbered_t_print + (vl_api_sw_interface_set_unnumbered_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_set_unnumbered "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + s = format (s, "unnum_if_index %d ", ntohl (mp->unnumbered_sw_if_index)); + + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_ip_neighbor_add_del_t_print + (vl_api_ip_neighbor_add_del_t * mp, void *handle) +{ + u8 *s; + u8 null_mac[6]; + + memset (null_mac, 0, sizeof (null_mac)); + + s = format (0, "SCRIPT: ip_neighbor_add_del "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + if (mp->is_static) + s = format (s, "is_static "); + + s = format (s, "vrf_id %d ", ntohl (mp->vrf_id)); + + if (memcmp (mp->mac_address, null_mac, 6)) + s = format (s, "mac %U ", format_ethernet_address, mp->mac_address); + + if (mp->is_ipv6) + s = + format (s, "dst %U ", format_ip6_address, + (ip6_address_t *) mp->dst_address); + else + s = + format (s, "dst %U ", format_ip4_address, + (ip4_address_t *) mp->dst_address); + + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + +static void * +vl_api_reset_vrf_t_print (vl_api_reset_vrf_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: reset_vrf "); + + if (mp->vrf_id) + s = format (s, "vrf %d ", ntohl (mp->vrf_id)); + + if (mp->is_ipv6 != 0) + s = format (s, "ipv6 "); + + FINISH; +} + +static void *vl_api_create_vlan_subif_t_print + (vl_api_create_vlan_subif_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: create_vlan_subif "); + + if (mp->sw_if_index) + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + if (mp->vlan_id) + s = format (s, "vlan_id %d ", ntohl (mp->vlan_id)); + + FINISH; +} + +#define foreach_create_subif_bit \ +_(no_tags) \ +_(one_tag) \ +_(two_tags) \ +_(dot1ad) \ +_(exact_match) \ +_(default_sub) \ +_(outer_vlan_id_any) \ +_(inner_vlan_id_any) + +static void *vl_api_create_subif_t_print + (vl_api_create_subif_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: create_subif "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + s = format (s, "sub_id %d ", ntohl (mp->sub_id)); + + if (mp->outer_vlan_id) + s = format (s, "outer_vlan_id %d ", ntohs (mp->outer_vlan_id)); + + if (mp->inner_vlan_id) + s = format (s, "inner_vlan_id %d ", ntohs (mp->inner_vlan_id)); + +#define _(a) if (mp->a) s = format (s, "%s ", #a); + foreach_create_subif_bit; +#undef _ + + FINISH; +} + +static void *vl_api_delete_subif_t_print + (vl_api_delete_subif_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: delete_subif "); + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + FINISH; +} + +static void *vl_api_oam_add_del_t_print + (vl_api_oam_add_del_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: oam_add_del "); + + if (mp->vrf_id) + s = format (s, "vrf %d ", ntohl (mp->vrf_id)); + + s = format (s, "src %U ", format_ip4_address, mp->src_address); + + s = format (s, "dst %U ", format_ip4_address, mp->dst_address); + + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + +static void * +vl_api_reset_fib_t_print (vl_api_reset_fib_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: reset_fib "); + + if (mp->vrf_id) + s = format (s, "vrf %d ", ntohl (mp->vrf_id)); + + if (mp->is_ipv6 != 0) + s = format (s, "ipv6 "); + + FINISH; +} + +static void *vl_api_dhcp_proxy_config_t_print + (vl_api_dhcp_proxy_config_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: dhcp_proxy_config "); + + s = format (s, "vrf_id %d ", ntohl (mp->vrf_id)); + + if (mp->is_ipv6) + { + s = format (s, "svr %U ", format_ip6_address, + (ip6_address_t *) mp->dhcp_server); + s = format (s, "src %U ", format_ip6_address, + (ip6_address_t *) mp->dhcp_src_address); + } + else + { + s = format (s, "svr %U ", format_ip4_address, + (ip4_address_t *) mp->dhcp_server); + s = format (s, "src %U ", format_ip4_address, + (ip4_address_t *) mp->dhcp_src_address); + } + if (mp->is_add == 0) + s = format (s, "del "); + + s = format (s, "insert-cid %d ", mp->insert_circuit_id); + + FINISH; +} + +static void *vl_api_dhcp_proxy_config_2_t_print + (vl_api_dhcp_proxy_config_2_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: dhcp_proxy_config_2 "); + + s = format (s, "rx_vrf_id %d ", ntohl (mp->rx_vrf_id)); + s = format (s, "server_vrf_id %d ", ntohl (mp->server_vrf_id)); + + if (mp->is_ipv6) + { + s = format (s, "svr %U ", format_ip6_address, + (ip6_address_t *) mp->dhcp_server); + s = format (s, "src %U ", format_ip6_address, + (ip6_address_t *) mp->dhcp_src_address); + } + else + { + s = format (s, "svr %U ", format_ip4_address, + (ip4_address_t *) mp->dhcp_server); + s = format (s, "src %U ", format_ip4_address, + (ip4_address_t *) mp->dhcp_src_address); + } + if (mp->is_add == 0) + s = format (s, "del "); + + s = format (s, "insert-cid %d ", mp->insert_circuit_id); + + FINISH; +} + +static void *vl_api_dhcp_proxy_set_vss_t_print + (vl_api_dhcp_proxy_set_vss_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: dhcp_proxy_set_vss "); + + s = format (s, "tbl_id %d ", ntohl (mp->tbl_id)); + + s = format (s, "fib_id %d ", ntohl (mp->fib_id)); + + s = format (s, "oui %d ", ntohl (mp->oui)); + + if (mp->is_ipv6 != 0) + s = format (s, "ipv6 "); + + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_dhcp_client_config_t_print + (vl_api_dhcp_client_config_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: dhcp_client_config "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + s = format (s, "hostname %s ", mp->hostname); + + s = format (s, "want_dhcp_event %d ", mp->want_dhcp_event); + + s = format (s, "pid %d ", mp->pid); + + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + + +static void *vl_api_set_ip_flow_hash_t_print + (vl_api_set_ip_flow_hash_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: set_ip_flow_hash "); + + s = format (s, "vrf_id %d ", ntohl (mp->vrf_id)); + + if (mp->src) + s = format (s, "src "); + + if (mp->dst) + s = format (s, "dst "); + + if (mp->sport) + s = format (s, "sport "); + + if (mp->dport) + s = format (s, "dport "); + + if (mp->proto) + s = format (s, "proto "); + + if (mp->reverse) + s = format (s, "reverse "); + + if (mp->is_ipv6 != 0) + s = format (s, "ipv6 "); + + FINISH; +} + +static void *vl_api_sw_interface_ip6_set_link_local_address_t_print + (vl_api_sw_interface_ip6_set_link_local_address_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_ip6_set_link_local_address "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + s = format (s, "%U/%d ", format_ip6_address, mp->address, + mp->address_length); + + FINISH; +} + +static void *vl_api_sw_interface_ip6nd_ra_prefix_t_print + (vl_api_sw_interface_ip6nd_ra_prefix_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_ip6nd_ra_prefix "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + s = format (s, "%U/%d ", format_ip6_address, mp->address, + mp->address_length); + + s = format (s, "val_life %d ", ntohl (mp->val_lifetime)); + + s = format (s, "pref_life %d ", ntohl (mp->pref_lifetime)); + + if (mp->use_default) + s = format (s, "def "); + + if (mp->no_advertise) + s = format (s, "noadv "); + + if (mp->off_link) + s = format (s, "offl "); + + if (mp->no_autoconfig) + s = format (s, "noauto "); + + if (mp->no_onlink) + s = format (s, "nolink "); + + if (mp->is_no) + s = format (s, "isno "); + + FINISH; +} + +static void *vl_api_sw_interface_ip6nd_ra_config_t_print + (vl_api_sw_interface_ip6nd_ra_config_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_ip6nd_ra_config "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + s = format (s, "maxint %d ", ntohl (mp->max_interval)); + + s = format (s, "minint %d ", ntohl (mp->min_interval)); + + s = format (s, "life %d ", ntohl (mp->lifetime)); + + s = format (s, "count %d ", ntohl (mp->initial_count)); + + s = format (s, "interval %d ", ntohl (mp->initial_interval)); + + if (mp->suppress) + s = format (s, "suppress "); + + if (mp->managed) + s = format (s, "managed "); + + if (mp->other) + s = format (s, "other "); + + if (mp->ll_option) + s = format (s, "ll "); + + if (mp->send_unicast) + s = format (s, "send "); + + if (mp->cease) + s = format (s, "cease "); + + if (mp->is_no) + s = format (s, "isno "); + + if (mp->default_router) + s = format (s, "def "); + + FINISH; +} + +static void *vl_api_set_arp_neighbor_limit_t_print + (vl_api_set_arp_neighbor_limit_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: set_arp_neighbor_limit "); + + s = format (s, "arp_nbr_limit %d ", ntohl (mp->arp_neighbor_limit)); + + if (mp->is_ipv6 != 0) + s = format (s, "ipv6 "); + + FINISH; +} + +static void *vl_api_l2_patch_add_del_t_print + (vl_api_l2_patch_add_del_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: l2_patch_add_del "); + + s = format (s, "rx_sw_if_index %d ", ntohl (mp->rx_sw_if_index)); + + s = format (s, "tx_sw_if_index %d ", ntohl (mp->tx_sw_if_index)); + + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_sr_tunnel_add_del_t_print + (vl_api_sr_tunnel_add_del_t * mp, void *handle) +{ + u8 *s; + ip6_address_t *this_address; + int i; + u16 flags_host_byte_order; + u8 pl_flag; + + s = format (0, "SCRIPT: sr_tunnel_add_del "); + + if (mp->name[0]) + s = format (s, "name %s ", mp->name); + + s = format (s, "src %U dst %U/%d ", format_ip6_address, + (ip6_address_t *) mp->src_address, + format_ip6_address, + (ip6_address_t *) mp->dst_address, mp->dst_mask_width); + + this_address = (ip6_address_t *) mp->segs_and_tags; + for (i = 0; i < mp->n_segments; i++) + { + s = format (s, "next %U ", format_ip6_address, this_address); + this_address++; + } + for (i = 0; i < mp->n_tags; i++) + { + s = format (s, "tag %U ", format_ip6_address, this_address); + this_address++; + } + + flags_host_byte_order = clib_net_to_host_u16 (mp->flags_net_byte_order); + + if (flags_host_byte_order & IP6_SR_HEADER_FLAG_CLEANUP) + s = format (s, " clean "); + + if (flags_host_byte_order & IP6_SR_HEADER_FLAG_PROTECTED) + s = format (s, "protected "); + + for (i = 1; i <= 4; i++) + { + pl_flag = ip6_sr_policy_list_flags (flags_host_byte_order, i); + + switch (pl_flag) + { + case IP6_SR_HEADER_FLAG_PL_ELT_NOT_PRESENT: + continue; + + case IP6_SR_HEADER_FLAG_PL_ELT_INGRESS_PE: + s = format (s, "InPE %d ", i); + break; + + case IP6_SR_HEADER_FLAG_PL_ELT_EGRESS_PE: + s = format (s, "EgPE %d ", i); + break; + + case IP6_SR_HEADER_FLAG_PL_ELT_ORIG_SRC_ADDR: + s = format (s, "OrgSrc %d ", i); + break; + + default: + clib_warning ("BUG: pl elt %d value %d", i, pl_flag); + break; + } + } + + if (mp->policy_name[0]) + s = format (s, "policy_name %s ", mp->policy_name); + + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_sr_policy_add_del_t_print + (vl_api_sr_policy_add_del_t * mp, void *handle) +{ + u8 *s; + int i; + + s = format (0, "SCRIPT: sr_policy_add_del "); + + if (mp->name[0]) + s = format (s, "name %s ", mp->name); + + + if (mp->tunnel_names[0]) + { + // start deserializing tunnel_names + int num_tunnels = mp->tunnel_names[0]; //number of tunnels + u8 *deser_tun_names = mp->tunnel_names; + deser_tun_names += 1; //moving along + + u8 *tun_name = 0; + int tun_name_len = 0; + + for (i = 0; i < num_tunnels; i++) + { + tun_name_len = *deser_tun_names; + deser_tun_names += 1; + vec_resize (tun_name, tun_name_len); + memcpy (tun_name, deser_tun_names, tun_name_len); + s = format (s, "tunnel %s ", tun_name); + deser_tun_names += tun_name_len; + tun_name = 0; + } + } + + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_sr_multicast_map_add_del_t_print + (vl_api_sr_multicast_map_add_del_t * mp, void *handle) +{ + + u8 *s = 0; + /* int i; */ + + s = format (0, "SCRIPT: sr_multicast_map_add_del "); + + if (mp->multicast_address[0]) + s = format (s, "address %U ", format_ip6_address, &mp->multicast_address); + + if (mp->policy_name[0]) + s = format (s, "sr-policy %s ", &mp->policy_name); + + + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + + +static void *vl_api_classify_add_del_table_t_print + (vl_api_classify_add_del_table_t * mp, void *handle) +{ + u8 *s; + int i; + + s = format (0, "SCRIPT: classify_add_del_table "); + + if (mp->is_add == 0) + { + s = format (s, "table %d ", ntohl (mp->table_index)); + s = format (s, "%s ", mp->del_chain ? "del-chain" : "del"); + } + else + { + s = format (s, "nbuckets %d ", ntohl (mp->nbuckets)); + s = format (s, "memory_size %d ", ntohl (mp->memory_size)); + s = format (s, "skip %d ", ntohl (mp->skip_n_vectors)); + s = format (s, "match %d ", ntohl (mp->match_n_vectors)); + s = format (s, "next-table %d ", ntohl (mp->next_table_index)); + s = format (s, "miss-next %d ", ntohl (mp->miss_next_index)); + s = format (s, "current-data-flag %d ", ntohl (mp->current_data_flag)); + if (mp->current_data_flag) + s = format (s, "current-data-offset %d ", + ntohl (mp->current_data_offset)); + s = format (s, "mask hex "); + for (i = 0; i < ntohl (mp->match_n_vectors) * sizeof (u32x4); i++) + s = format (s, "%02x", mp->mask[i]); + vec_add1 (s, ' '); + } + + FINISH; +} + +static void *vl_api_classify_add_del_session_t_print + (vl_api_classify_add_del_session_t * mp, void *handle) +{ + u8 *s; + int i, limit = 0; + + s = format (0, "SCRIPT: classify_add_del_session "); + + s = format (s, "table_index %d ", ntohl (mp->table_index)); + s = format (s, "hit_next_index %d ", ntohl (mp->hit_next_index)); + s = format (s, "opaque_index %d ", ntohl (mp->opaque_index)); + s = format (s, "advance %d ", ntohl (mp->advance)); + s = format (s, "action %d ", mp->action); + if (mp->action) + s = format (s, "metadata %d ", ntohl (mp->metadata)); + if (mp->is_add == 0) + s = format (s, "del "); + + s = format (s, "match hex "); + for (i = 5 * sizeof (u32x4) - 1; i > 0; i--) + { + if (mp->match[i] != 0) + { + limit = i + 1; + break; + } + } + + for (i = 0; i < limit; i++) + s = format (s, "%02x", mp->match[i]); + + FINISH; +} + +static void *vl_api_classify_set_interface_ip_table_t_print + (vl_api_classify_set_interface_ip_table_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: classify_set_interface_ip_table "); + + if (mp->is_ipv6) + s = format (s, "ipv6 "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + s = format (s, "table %d ", ntohl (mp->table_index)); + + FINISH; +} + +static void *vl_api_classify_set_interface_l2_tables_t_print + (vl_api_classify_set_interface_l2_tables_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: classify_set_interface_l2_tables "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + s = format (s, "ip4-table %d ", ntohl (mp->ip4_table_index)); + s = format (s, "ip6-table %d ", ntohl (mp->ip6_table_index)); + s = format (s, "other-table %d ", ntohl (mp->other_table_index)); + s = format (s, "is-input %d ", mp->is_input); + + FINISH; +} + +static void *vl_api_add_node_next_t_print + (vl_api_add_node_next_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: add_node_next "); + + s = format (0, "node %s next %s ", mp->node_name, mp->next_name); + + FINISH; +} + +static void *vl_api_l2tpv3_create_tunnel_t_print + (vl_api_l2tpv3_create_tunnel_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: l2tpv3_create_tunnel "); + + s = format (s, "client_address %U our_address %U ", + format_ip6_address, (ip6_address_t *) (mp->client_address), + format_ip6_address, (ip6_address_t *) (mp->our_address)); + s = format (s, "local_session_id %d ", ntohl (mp->local_session_id)); + s = format (s, "remote_session_id %d ", ntohl (mp->remote_session_id)); + s = format (s, "local_cookie %lld ", + clib_net_to_host_u64 (mp->local_cookie)); + s = format (s, "remote_cookie %lld ", + clib_net_to_host_u64 (mp->remote_cookie)); + if (mp->l2_sublayer_present) + s = format (s, "l2-sublayer-present "); + + FINISH; +} + +static void *vl_api_l2tpv3_set_tunnel_cookies_t_print + (vl_api_l2tpv3_set_tunnel_cookies_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: l2tpv3_set_tunnel_cookies "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + s = format (s, "new_local_cookie %llu ", + clib_net_to_host_u64 (mp->new_local_cookie)); + + s = format (s, "new_remote_cookie %llu ", + clib_net_to_host_u64 (mp->new_remote_cookie)); + + FINISH; +} + +static void *vl_api_l2tpv3_interface_enable_disable_t_print + (vl_api_l2tpv3_interface_enable_disable_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: l2tpv3_interface_enable_disable "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + if (mp->enable_disable == 0) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_l2tpv3_set_lookup_key_t_print + (vl_api_l2tpv3_set_lookup_key_t * mp, void *handle) +{ + u8 *s; + char *str = "unknown"; + + s = format (0, "SCRIPT: l2tpv3_set_lookup_key "); + + switch (mp->key) + { + case L2T_LOOKUP_SRC_ADDRESS: + str = "lookup_v6_src"; + break; + case L2T_LOOKUP_DST_ADDRESS: + str = "lookup_v6_dst"; + break; + case L2T_LOOKUP_SESSION_ID: + str = "lookup_session_id"; + break; + default: + break; + } + + s = format (s, "%s ", str); + + FINISH; +} + +static void *vl_api_sw_if_l2tpv3_tunnel_dump_t_print + (vl_api_sw_if_l2tpv3_tunnel_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_if_l2tpv3_tunnel_dump "); + + FINISH; +} + +static void *vl_api_vxlan_add_del_tunnel_t_print + (vl_api_vxlan_add_del_tunnel_t * mp, void *handle) +{ + u8 *s; + s = format (0, "SCRIPT: vxlan_add_del_tunnel "); + + ip46_address_t src, dst; + + ip46_from_addr_buf (mp->is_ipv6, mp->dst_address, &dst); + ip46_from_addr_buf (mp->is_ipv6, mp->src_address, &src); + + u8 is_grp = ip46_address_is_multicast (&dst); + char *dst_name = is_grp ? "group" : "dst"; + + s = format (s, "src %U ", format_ip46_address, &src, IP46_TYPE_ANY); + s = format (s, "%s %U ", dst_name, format_ip46_address, + &dst, IP46_TYPE_ANY); + + if (is_grp) + s = format (s, "mcast_sw_if_index %d ", ntohl (mp->mcast_sw_if_index)); + + if (mp->encap_vrf_id) + s = format (s, "encap-vrf-id %d ", ntohl (mp->encap_vrf_id)); + + s = format (s, "decap-next %d ", ntohl (mp->decap_next_index)); + + s = format (s, "vni %d ", ntohl (mp->vni)); + + if (mp->is_add == 0) + s = format (s, "del "); + + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_vxlan_tunnel_dump_t_print + (vl_api_vxlan_tunnel_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: vxlan_tunnel_dump "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + FINISH; +} + +static void *vl_api_gre_add_del_tunnel_t_print + (vl_api_gre_add_del_tunnel_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: gre_add_del_tunnel "); + + s = format (s, "dst %U ", format_ip46_address, + (ip46_address_t *) & (mp->dst_address), + mp->is_ipv6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4); + + s = format (s, "src %U ", format_ip46_address, + (ip46_address_t *) & (mp->src_address), + mp->is_ipv6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4); + + if (mp->teb) + s = format (s, "teb "); + + if (mp->outer_fib_id) + s = format (s, "outer-fib-id %d ", ntohl (mp->outer_fib_id)); + + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_gre_tunnel_dump_t_print + (vl_api_gre_tunnel_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: gre_tunnel_dump "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + FINISH; +} + +static void *vl_api_l2_fib_clear_table_t_print + (vl_api_l2_fib_clear_table_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: l2_fib_clear_table "); + + FINISH; +} + +static void *vl_api_l2_interface_efp_filter_t_print + (vl_api_l2_interface_efp_filter_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: l2_interface_efp_filter "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + if (mp->enable_disable) + s = format (s, "enable "); + else + s = format (s, "disable "); + + FINISH; +} + +static void *vl_api_l2_interface_vlan_tag_rewrite_t_print + (vl_api_l2_interface_vlan_tag_rewrite_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: l2_interface_vlan_tag_rewrite "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + s = format (s, "vtr_op %d ", ntohl (mp->vtr_op)); + s = format (s, "push_dot1q %d ", ntohl (mp->push_dot1q)); + s = format (s, "tag1 %d ", ntohl (mp->tag1)); + s = format (s, "tag2 %d ", ntohl (mp->tag2)); + + FINISH; +} + +static void *vl_api_create_vhost_user_if_t_print + (vl_api_create_vhost_user_if_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: create_vhost_user_if "); + + s = format (s, "socket %s ", mp->sock_filename); + if (mp->is_server) + s = format (s, "server "); + if (mp->renumber) + s = format (s, "renumber %d ", ntohl (mp->custom_dev_instance)); + if (mp->tag[0]) + s = format (s, "tag %s", mp->tag); + + FINISH; +} + +static void *vl_api_modify_vhost_user_if_t_print + (vl_api_modify_vhost_user_if_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: modify_vhost_user_if "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + s = format (s, "socket %s ", mp->sock_filename); + if (mp->is_server) + s = format (s, "server "); + if (mp->renumber) + s = format (s, "renumber %d ", ntohl (mp->custom_dev_instance)); + + FINISH; +} + +static void *vl_api_delete_vhost_user_if_t_print + (vl_api_delete_vhost_user_if_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: delete_vhost_user_if "); + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + FINISH; +} + +static void *vl_api_sw_interface_vhost_user_dump_t_print + (vl_api_sw_interface_vhost_user_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_vhost_user_dump "); + + FINISH; +} + +static void *vl_api_sw_interface_dump_t_print + (vl_api_sw_interface_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_dump "); + + if (mp->name_filter_valid) + s = format (s, "name_filter %s ", mp->name_filter); + else + s = format (s, "all "); + + FINISH; +} + +static void *vl_api_l2_fib_table_dump_t_print + (vl_api_l2_fib_table_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: l2_fib_table_dump "); + + s = format (s, "bd_id %d ", ntohl (mp->bd_id)); + + FINISH; +} + +static void *vl_api_control_ping_t_print + (vl_api_control_ping_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: control_ping "); + + FINISH; +} + +static void *vl_api_want_interface_events_t_print + (vl_api_want_interface_events_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: want_interface_events pid %d enable %d ", + ntohl (mp->pid), ntohl (mp->enable_disable)); + + FINISH; +} + +static void *vl_api_cli_request_t_print + (vl_api_cli_request_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: cli_request "); + + FINISH; +} + +static void *vl_api_cli_inband_t_print + (vl_api_cli_inband_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: cli_inband "); + + FINISH; +} + +static void *vl_api_memclnt_create_t_print + (vl_api_memclnt_create_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: memclnt_create name %s ", mp->name); + + FINISH; +} + +static void *vl_api_show_version_t_print + (vl_api_show_version_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: show_version "); + + FINISH; +} + +static void *vl_api_vxlan_gpe_add_del_tunnel_t_print + (vl_api_vxlan_gpe_add_del_tunnel_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: vxlan_gpe_add_del_tunnel "); + + s = format (s, "local %U ", format_ip46_address, &mp->local, mp->is_ipv6); + + s = format (s, "remote %U ", format_ip46_address, &mp->remote, mp->is_ipv6); + + s = format (s, "protocol %d ", ntohl (mp->protocol)); + + s = format (s, "vni %d ", ntohl (mp->vni)); + + if (mp->is_add == 0) + s = format (s, "del "); + + if (mp->encap_vrf_id) + s = format (s, "encap-vrf-id %d ", ntohl (mp->encap_vrf_id)); + + if (mp->decap_vrf_id) + s = format (s, "decap-vrf-id %d ", ntohl (mp->decap_vrf_id)); + + FINISH; +} + +static void *vl_api_vxlan_gpe_tunnel_dump_t_print + (vl_api_vxlan_gpe_tunnel_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: vxlan_gpe_tunnel_dump "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + FINISH; +} + +static void *vl_api_interface_name_renumber_t_print + (vl_api_interface_name_renumber_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: interface_renumber "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + s = format (s, "new_show_dev_instance %d ", + ntohl (mp->new_show_dev_instance)); + + FINISH; +} + +static void *vl_api_want_ip4_arp_events_t_print + (vl_api_want_ip4_arp_events_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: want_ip4_arp_events "); + s = format (s, "pid %d address %U ", mp->pid, + format_ip4_address, &mp->address); + if (mp->enable_disable == 0) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_want_ip6_nd_events_t_print + (vl_api_want_ip6_nd_events_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: want_ip6_nd_events "); + s = format (s, "pid %d address %U ", mp->pid, + format_ip6_address, mp->address); + if (mp->enable_disable == 0) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_input_acl_set_interface_t_print + (vl_api_input_acl_set_interface_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: input_acl_set_interface "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + s = format (s, "ip4-table %d ", ntohl (mp->ip4_table_index)); + s = format (s, "ip6-table %d ", ntohl (mp->ip6_table_index)); + s = format (s, "l2-table %d ", ntohl (mp->l2_table_index)); + + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_ip_address_dump_t_print + (vl_api_ip_address_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: ip6_address_dump "); + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + s = format (s, "is_ipv6 %d ", mp->is_ipv6 != 0); + + FINISH; +} + +static void * +vl_api_ip_dump_t_print (vl_api_ip_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: ip_dump "); + s = format (s, "is_ipv6 %d ", mp->is_ipv6 != 0); + + FINISH; +} + +static void *vl_api_cop_interface_enable_disable_t_print + (vl_api_cop_interface_enable_disable_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: cop_interface_enable_disable "); + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + if (mp->enable_disable) + s = format (s, "enable "); + else + s = format (s, "disable "); + + FINISH; +} + +static void *vl_api_cop_whitelist_enable_disable_t_print + (vl_api_cop_whitelist_enable_disable_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: cop_whitelist_enable_disable "); + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + s = format (s, "fib-id %d ", ntohl (mp->fib_id)); + if (mp->ip4) + s = format (s, "ip4 "); + if (mp->ip6) + s = format (s, "ip6 "); + if (mp->default_cop) + s = format (s, "default "); + + FINISH; +} + +static void *vl_api_af_packet_create_t_print + (vl_api_af_packet_create_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: af_packet_create "); + s = format (s, "host_if_name %s ", mp->host_if_name); + if (mp->use_random_hw_addr) + s = format (s, "hw_addr random "); + else + s = format (s, "hw_addr %U ", format_ethernet_address, mp->hw_addr); + + FINISH; +} + +static void *vl_api_af_packet_delete_t_print + (vl_api_af_packet_delete_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: af_packet_delete "); + s = format (s, "host_if_name %s ", mp->host_if_name); + + FINISH; +} + +static u8 * +format_policer_action (u8 * s, va_list * va) +{ + u32 action = va_arg (*va, u32); + u32 dscp = va_arg (*va, u32); + char *t = 0; + + if (action == SSE2_QOS_ACTION_DROP) + s = format (s, "drop"); + else if (action == SSE2_QOS_ACTION_TRANSMIT) + s = format (s, "transmit"); + else if (action == SSE2_QOS_ACTION_MARK_AND_TRANSMIT) + { + s = format (s, "mark-and-transmit "); + switch (dscp) + { +#define _(v,f,str) case VNET_DSCP_##f: t = str; break; + foreach_vnet_dscp +#undef _ + default: + break; + } + s = format (s, "%s", t); + } + + return s; +} + +static void *vl_api_policer_add_del_t_print + (vl_api_policer_add_del_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: policer_add_del "); + s = format (s, "name %s ", mp->name); + s = format (s, "cir %d ", mp->cir); + s = format (s, "eir %d ", mp->eir); + s = format (s, "cb %d ", mp->cb); + s = format (s, "eb %d ", mp->eb); + + switch (mp->rate_type) + { + case SSE2_QOS_RATE_KBPS: + s = format (s, "rate_type kbps "); + break; + case SSE2_QOS_RATE_PPS: + s = format (s, "rate_type pps "); + break; + default: + break; + } + + switch (mp->round_type) + { + case SSE2_QOS_ROUND_TO_CLOSEST: + s = format (s, "round_type closest "); + break; + case SSE2_QOS_ROUND_TO_UP: + s = format (s, "round_type up "); + break; + case SSE2_QOS_ROUND_TO_DOWN: + s = format (s, "round_type down "); + break; + default: + break; + } + + switch (mp->type) + { + case SSE2_QOS_POLICER_TYPE_1R2C: + s = format (s, "type 1r2c "); + break; + case SSE2_QOS_POLICER_TYPE_1R3C_RFC_2697: + s = format (s, "type 1r3c "); + break; + case SSE2_QOS_POLICER_TYPE_2R3C_RFC_2698: + s = format (s, "type 2r3c-2698 "); + break; + case SSE2_QOS_POLICER_TYPE_2R3C_RFC_4115: + s = format (s, "type 2r3c-4115 "); + break; + case SSE2_QOS_POLICER_TYPE_2R3C_RFC_MEF5CF1: + s = format (s, "type 2r3c-mef5cf1 "); + break; + default: + break; + } + + s = format (s, "conform_action %U ", format_policer_action, + mp->conform_action_type, mp->conform_dscp); + s = format (s, "exceed_action %U ", format_policer_action, + mp->exceed_action_type, mp->exceed_dscp); + s = format (s, "violate_action %U ", format_policer_action, + mp->violate_action_type, mp->violate_dscp); + + if (mp->color_aware) + s = format (s, "color-aware "); + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_policer_dump_t_print + (vl_api_policer_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: policer_dump "); + if (mp->match_name_valid) + s = format (s, "name %s ", mp->match_name); + + FINISH; +} + +static void *vl_api_policer_classify_set_interface_t_print + (vl_api_policer_classify_set_interface_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: policer_classify_set_interface "); + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + if (mp->ip4_table_index != ~0) + s = format (s, "ip4-table %d ", ntohl (mp->ip4_table_index)); + if (mp->ip6_table_index != ~0) + s = format (s, "ip6-table %d ", ntohl (mp->ip6_table_index)); + if (mp->l2_table_index != ~0) + s = format (s, "l2-table %d ", ntohl (mp->l2_table_index)); + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_policer_classify_dump_t_print + (vl_api_policer_classify_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: policer_classify_dump "); + switch (mp->type) + { + case POLICER_CLASSIFY_TABLE_IP4: + s = format (s, "type ip4 "); + break; + case POLICER_CLASSIFY_TABLE_IP6: + s = format (s, "type ip6 "); + break; + case POLICER_CLASSIFY_TABLE_L2: + s = format (s, "type l2 "); + break; + default: + break; + } + + FINISH; +} + +static void *vl_api_sw_interface_clear_stats_t_print + (vl_api_sw_interface_clear_stats_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_clear_stats "); + if (mp->sw_if_index != ~0) + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + FINISH; +} + +static void *vl_api_mpls_tunnel_dump_t_print + (vl_api_mpls_tunnel_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: mpls_tunnel_dump "); + + s = format (s, "tunnel_index %d ", ntohl (mp->tunnel_index)); + + FINISH; +} + +static void *vl_api_mpls_fib_dump_t_print + (vl_api_mpls_fib_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: mpls_fib_decap_dump "); + + FINISH; +} + +static void *vl_api_ip_fib_dump_t_print + (vl_api_ip_fib_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: ip_fib_dump "); + + FINISH; +} + +static void *vl_api_ip6_fib_dump_t_print + (vl_api_ip6_fib_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: ip6_fib_dump "); + + FINISH; +} + +static void *vl_api_classify_table_ids_t_print + (vl_api_classify_table_ids_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: classify_table_ids "); + + FINISH; +} + +static void *vl_api_classify_table_by_interface_t_print + (vl_api_classify_table_by_interface_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: classify_table_by_interface "); + if (mp->sw_if_index != ~0) + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + FINISH; +} + +static void *vl_api_classify_table_info_t_print + (vl_api_classify_table_info_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: classify_table_info "); + if (mp->table_id != ~0) + s = format (s, "table_id %d ", ntohl (mp->table_id)); + + FINISH; +} + +static void *vl_api_classify_session_dump_t_print + (vl_api_classify_session_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: classify_session_dump "); + if (mp->table_id != ~0) + s = format (s, "table_id %d ", ntohl (mp->table_id)); + + FINISH; +} + +static void *vl_api_set_ipfix_exporter_t_print + (vl_api_set_ipfix_exporter_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: set_ipfix_exporter "); + + s = format (s, "collector-address %U ", format_ip4_address, + (ip4_address_t *) mp->collector_address); + s = format (s, "collector-port %d ", ntohs (mp->collector_port)); + s = format (s, "src-address %U ", format_ip4_address, + (ip4_address_t *) mp->src_address); + s = format (s, "vrf-id %d ", ntohl (mp->vrf_id)); + s = format (s, "path-mtu %d ", ntohl (mp->path_mtu)); + s = format (s, "template-interval %d ", ntohl (mp->template_interval)); + s = format (s, "udp-checksum %d ", mp->udp_checksum); + + FINISH; +} + +static void *vl_api_ipfix_exporter_dump_t_print + (vl_api_ipfix_exporter_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: ipfix_exporter_dump "); + + FINISH; +} + +static void *vl_api_set_ipfix_classify_stream_t_print + (vl_api_set_ipfix_classify_stream_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: set_ipfix_classify_stream "); + + s = format (s, "domain-id %d ", ntohl (mp->domain_id)); + s = format (s, "src-port %d ", ntohs (mp->src_port)); + + FINISH; +} + +static void *vl_api_ipfix_classify_stream_dump_t_print + (vl_api_ipfix_classify_stream_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: ipfix_classify_stream_dump "); + + FINISH; +} + +static void *vl_api_ipfix_classify_table_add_del_t_print + (vl_api_ipfix_classify_table_add_del_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: ipfix_classify_table_add_del "); + + s = format (s, "table-id %d ", ntohl (mp->table_id)); + s = format (s, "ip-version %d ", mp->ip_version); + s = format (s, "transport-protocol %d ", mp->transport_protocol); + + FINISH; +} + +static void *vl_api_ipfix_classify_table_dump_t_print + (vl_api_ipfix_classify_table_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: ipfix_classify_table_dump "); + + FINISH; +} + +static void *vl_api_sw_interface_span_enable_disable_t_print + (vl_api_sw_interface_span_enable_disable_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_span_enable_disable "); + s = format (s, "src_sw_if_index %u ", ntohl (mp->sw_if_index_from)); + s = format (s, "dst_sw_if_index %u ", ntohl (mp->sw_if_index_to)); + + switch (mp->state) + { + case 0: + s = format (s, "disable "); + break; + case 1: + s = format (s, "rx "); + break; + case 2: + s = format (s, "tx "); + break; + case 3: + default: + s = format (s, "both "); + break; + } + + FINISH; +} + +static void * +vl_api_sw_interface_span_dump_t_print (vl_api_sw_interface_span_dump_t * mp, + void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_span_dump "); + + FINISH; +} + +static void *vl_api_get_next_index_t_print + (vl_api_get_next_index_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: get_next_index "); + s = format (s, "node-name %s ", mp->node_name); + s = format (s, "next-node-name %s ", mp->next_name); + + FINISH; +} + +static void *vl_api_pg_create_interface_t_print + (vl_api_pg_create_interface_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: pg_create_interface "); + s = format (0, "if_id %d", ntohl (mp->interface_id)); + + FINISH; +} + +static void *vl_api_pg_capture_t_print + (vl_api_pg_capture_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: pg_capture "); + s = format (0, "if_id %d ", ntohl (mp->interface_id)); + s = format (0, "pcap %s", mp->pcap_file_name); + if (mp->count != ~0) + s = format (s, "count %d ", ntohl (mp->count)); + if (!mp->is_enabled) + s = format (s, "disable"); + + FINISH; +} + +static void *vl_api_pg_enable_disable_t_print + (vl_api_pg_enable_disable_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: pg_enable_disable "); + if (ntohl (mp->stream_name_length) > 0) + s = format (s, "stream %s", mp->stream_name); + if (!mp->is_enabled) + s = format (s, "disable"); + + FINISH; +} + +static void *vl_api_ip_source_and_port_range_check_add_del_t_print + (vl_api_ip_source_and_port_range_check_add_del_t * mp, void *handle) +{ + u8 *s; + int i; + + s = format (0, "SCRIPT: ip_source_and_port_range_check_add_del "); + if (mp->is_ipv6) + s = format (s, "%U/%d ", format_ip6_address, mp->address, + mp->mask_length); + else + s = format (s, "%U/%d ", format_ip4_address, mp->address, + mp->mask_length); + + for (i = 0; i < mp->number_of_ranges; i++) + { + s = format (s, "range %d - %d ", mp->low_ports[i], mp->high_ports[i]); + } + + s = format (s, "vrf %d ", ntohl (mp->vrf_id)); + + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_ip_source_and_port_range_check_interface_add_del_t_print + (vl_api_ip_source_and_port_range_check_interface_add_del_t * mp, + void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: ip_source_and_port_range_check_interface_add_del "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + if (mp->tcp_out_vrf_id != ~0) + s = format (s, "tcp-out-vrf %d ", ntohl (mp->tcp_out_vrf_id)); + + if (mp->udp_out_vrf_id != ~0) + s = format (s, "udp-out-vrf %d ", ntohl (mp->udp_out_vrf_id)); + + if (mp->tcp_in_vrf_id != ~0) + s = format (s, "tcp-in-vrf %d ", ntohl (mp->tcp_in_vrf_id)); + + if (mp->udp_in_vrf_id != ~0) + s = format (s, "udp-in-vrf %d ", ntohl (mp->udp_in_vrf_id)); + + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_lisp_enable_disable_t_print + (vl_api_lisp_enable_disable_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: lisp_enable_disable %s", + mp->is_en ? "enable" : "disable"); + + FINISH; +} + +static void *vl_api_lisp_gpe_add_del_iface_t_print + (vl_api_lisp_gpe_add_del_iface_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: lisp_gpe_add_del_iface "); + + s = format (s, "%s ", mp->is_add ? "up" : "down"); + s = format (s, "vni %d ", mp->vni); + s = format (s, "%s %d ", mp->is_l2 ? "bd_id" : "table_id", mp->dp_table); + + FINISH; +} + +static void *vl_api_lisp_pitr_set_locator_set_t_print + (vl_api_lisp_pitr_set_locator_set_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: lisp_pitr_set_locator_set "); + + if (mp->is_add) + s = format (s, "locator-set %s ", mp->ls_name); + else + s = format (s, "del"); + + FINISH; +} + +static u8 * +format_lisp_flat_eid (u8 * s, va_list * args) +{ + u32 type = va_arg (*args, u32); + u8 *eid = va_arg (*args, u8 *); + u32 eid_len = va_arg (*args, u32); + + switch (type) + { + case 0: + return format (s, "%U/%d", format_ip4_address, eid, eid_len); + case 1: + return format (s, "%U/%d", format_ip6_address, eid, eid_len); + case 3: + return format (s, "%U", format_ethernet_address, eid); + } + return 0; +} + +/** Used for transferring locators via VPP API */ +typedef CLIB_PACKED (struct + { + u8 is_ip4; + /**< is locator an IPv4 address */ + u8 priority; + /**< locator priority */ + u8 weight; + /**< locator weight */ + u8 addr[16]; + /**< IPv4/IPv6 address */ + }) rloc_t; + +static u8 * +format_rloc (u8 * s, va_list * args) +{ + rloc_t *rloc = va_arg (*args, rloc_t *); + + if (rloc->is_ip4) + s = format (s, "%U ", format_ip4_address, rloc->addr); + else + s = format (s, "%U ", format_ip6_address, rloc->addr); + + s = format (s, "p %d w %d", rloc->priority, rloc->weight); + + return s; +} + +static void *vl_api_lisp_add_del_remote_mapping_t_print + (vl_api_lisp_add_del_remote_mapping_t * mp, void *handle) +{ + u8 *s; + u32 i, rloc_num = 0; + + s = format (0, "SCRIPT: lisp_add_del_remote_mapping "); + + if (mp->del_all) + s = format (s, "del-all "); + + s = format (s, "%s ", mp->is_add ? "add" : "del"); + s = format (s, "vni %d ", clib_net_to_host_u32 (mp->vni)); + + s = format (s, "eid %U ", format_lisp_flat_eid, + mp->eid_type, mp->eid, mp->eid_len); + + if (mp->is_src_dst) + { + s = format (s, "seid %U ", format_lisp_flat_eid, + mp->eid_type, mp->seid, mp->seid_len); + } + + rloc_num = clib_net_to_host_u32 (mp->rloc_num); + + if (0 == rloc_num) + s = format (s, "action %d", mp->action); + else + { + rloc_t *rloc = (rloc_t *) mp->rlocs; + for (i = 0; i < rloc_num; i++) + s = format (s, "%U ", format_rloc, &rloc[i]); + } + + FINISH; +} + +static void *vl_api_lisp_add_del_adjacency_t_print + (vl_api_lisp_add_del_adjacency_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: lisp_add_del_adjacency "); + + s = format (s, "%s ", mp->is_add ? "add" : "del"); + s = format (s, "vni %d ", clib_net_to_host_u32 (mp->vni)); + s = format (s, "reid %U leid %U ", + format_lisp_flat_eid, mp->eid_type, mp->reid, mp->reid_len, + format_lisp_flat_eid, mp->eid_type, mp->leid, mp->leid_len); + + FINISH; +} + +static void *vl_api_lisp_add_del_map_request_itr_rlocs_t_print + (vl_api_lisp_add_del_map_request_itr_rlocs_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: lisp_add_del_map_request_itr_rlocs "); + + if (mp->is_add) + s = format (s, "%s", mp->locator_set_name); + else + s = format (s, "del"); + + FINISH; +} + +static void *vl_api_lisp_eid_table_add_del_map_t_print + (vl_api_lisp_eid_table_add_del_map_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: lisp_eid_table_add_del_map "); + + if (!mp->is_add) + s = format (s, "del "); + + s = format (s, "vni %d ", clib_net_to_host_u32 (mp->vni)); + s = format (s, "%s %d ", + mp->is_l2 ? "bd_index" : "vrf", + clib_net_to_host_u32 (mp->dp_table)); + FINISH; +} + +static void *vl_api_lisp_add_del_local_eid_t_print + (vl_api_lisp_add_del_local_eid_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: lisp_add_del_local_eid "); + + if (!mp->is_add) + s = format (s, "del "); + + s = format (s, "vni %d ", clib_net_to_host_u32 (mp->vni)); + s = format (s, "eid %U ", format_lisp_flat_eid, mp->eid_type, mp->eid, + mp->prefix_len); + s = format (s, "locator-set %s ", mp->locator_set_name); + if (*mp->key) + { + u32 key_id = mp->key_id; + s = format (s, "key-id %U", format_hmac_key_id, key_id); + s = format (s, "secret-key %s", mp->key); + } + FINISH; +} + +static void *vl_api_lisp_gpe_add_del_fwd_entry_t_print + (vl_api_lisp_gpe_add_del_fwd_entry_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: lisp_gpe_add_del_fwd_entry TODO"); + + FINISH; +} + +static void *vl_api_lisp_add_del_map_resolver_t_print + (vl_api_lisp_add_del_map_resolver_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: lisp_add_del_map_resolver "); + + if (!mp->is_add) + s = format (s, "del "); + + if (mp->is_ipv6) + s = format (s, "%U ", format_ip6_address, mp->ip_address); + else + s = format (s, "%U ", format_ip4_address, mp->ip_address); + + FINISH; +} + +static void *vl_api_lisp_gpe_enable_disable_t_print + (vl_api_lisp_gpe_enable_disable_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: lisp_gpe_enable_disable "); + + s = format (s, "%s ", mp->is_en ? "enable" : "disable"); + + FINISH; +} + +typedef CLIB_PACKED (struct + { + u32 sw_if_index; + /**< locator sw_if_index */ + u8 priority; + /**< locator priority */ + u8 weight; + /**< locator weight */ + }) ls_locator_t; + +static u8 * +format_locator (u8 * s, va_list * args) +{ + ls_locator_t *l = va_arg (*args, ls_locator_t *); + + return format (s, "sw_if_index %d p %d w %d", + l->sw_if_index, l->priority, l->weight); +} + +static void *vl_api_lisp_add_del_locator_set_t_print + (vl_api_lisp_add_del_locator_set_t * mp, void *handle) +{ + u8 *s; + u32 loc_num = 0, i; + ls_locator_t *locs; + + s = format (0, "SCRIPT: lisp_add_del_locator_set "); + + if (!mp->is_add) + s = format (s, "del "); + + s = format (s, "locator-set %s ", mp->locator_set_name); + + loc_num = clib_net_to_host_u32 (mp->locator_num); + locs = (ls_locator_t *) mp->locators; + + for (i = 0; i < loc_num; i++) + s = format (s, "%U ", format_locator, &locs[i]); + + FINISH; +} + +static void *vl_api_lisp_add_del_locator_t_print + (vl_api_lisp_add_del_locator_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: lisp_add_del_locator "); + + if (!mp->is_add) + s = format (s, "del "); + + s = format (s, "locator-set %s ", mp->locator_set_name); + s = format (s, "sw_if_index %d ", mp->sw_if_index); + s = format (s, "p %d w %d ", mp->priority, mp->weight); + + FINISH; +} + +static void *vl_api_lisp_locator_set_dump_t_print + (vl_api_lisp_locator_set_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: lisp_locator_set_dump "); + if (mp->filter == 1) + s = format (s, "local"); + else if (mp->filter == 2) + s = format (s, "remote"); + + FINISH; +} + +static void *vl_api_lisp_locator_dump_t_print + (vl_api_lisp_locator_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: lisp_locator_dump "); + if (mp->is_index_set) + s = format (s, "ls_index %d", clib_net_to_host_u32 (mp->ls_index)); + else + s = format (s, "ls_name %s", mp->ls_name); + + FINISH; +} + +static void *vl_api_lisp_map_request_mode_t_print + (vl_api_lisp_map_request_mode_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: lisp_map_request_mode "); + + switch (mp->mode) + { + case 0: + s = format (s, "dst-only"); + break; + case 1: + s = format (s, "src-dst"); + default: + break; + } + + FINISH; +} + +static void *vl_api_lisp_eid_table_dump_t_print + (vl_api_lisp_eid_table_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: lisp_eid_table_dump "); + + if (mp->eid_set) + { + s = format (s, "vni %d ", clib_net_to_host_u32 (mp->vni)); + s = format (s, "eid %U ", format_lisp_flat_eid, mp->eid_type, + mp->eid, mp->prefix_length); + switch (mp->filter) + { + case 1: + s = format (s, "local "); + break; + case 2: + s = format (s, "remote "); + break; + } + } + + FINISH; +} + +static void *vl_api_lisp_rloc_probe_enable_disable_t_print + (vl_api_lisp_rloc_probe_enable_disable_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: lisp_rloc_probe_enable_disable "); + if (mp->is_enabled) + s = format (s, "enable"); + else + s = format (s, "disable"); + + FINISH; +} + +static void *vl_api_lisp_map_register_enable_disable_t_print + (vl_api_lisp_map_register_enable_disable_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: lisp_map_register_enable_disable "); + if (mp->is_enabled) + s = format (s, "enable"); + else + s = format (s, "disable"); + + FINISH; +} + +static void *vl_api_lisp_adjacencies_get_t_print + (vl_api_lisp_adjacencies_get_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: lisp_adjacencies_get "); + s = format (s, "vni %d", clib_net_to_host_u32 (mp->vni)); + + FINISH; +} + +static void *vl_api_lisp_eid_table_map_dump_t_print + (vl_api_lisp_eid_table_map_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: lisp_eid_table_map_dump "); + + if (mp->is_l2) + s = format (s, "l2"); + else + s = format (s, "l3"); + + FINISH; +} + +static void *vl_api_ipsec_gre_add_del_tunnel_t_print + (vl_api_ipsec_gre_add_del_tunnel_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: ipsec_gre_add_del_tunnel "); + + s = format (s, "dst %U ", format_ip4_address, + (ip4_address_t *) & (mp->dst_address)); + + s = format (s, "src %U ", format_ip4_address, + (ip4_address_t *) & (mp->src_address)); + + s = format (s, "local_sa %d ", ntohl (mp->local_sa_id)); + + s = format (s, "remote_sa %d ", ntohl (mp->remote_sa_id)); + + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_ipsec_gre_tunnel_dump_t_print + (vl_api_ipsec_gre_tunnel_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: ipsec_gre_tunnel_dump "); + + if (mp->sw_if_index != ~0) + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + + FINISH; +} + +static void *vl_api_l2_interface_pbb_tag_rewrite_t_print + (vl_api_l2_interface_pbb_tag_rewrite_t * mp, void *handle) +{ + u8 *s; + u32 vtr_op = ntohl (mp->vtr_op); + + s = format (0, "SCRIPT: l2_interface_pbb_tag_rewrite "); + + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + s = format (s, "vtr_op %d ", vtr_op); + if (vtr_op != L2_VTR_DISABLED && vtr_op != L2_VTR_POP_2) + { + if (vtr_op == L2_VTR_TRANSLATE_2_2) + s = format (s, "%d ", ntohs (mp->outer_tag)); + s = format (s, "dmac %U ", format_ethernet_address, &mp->b_dmac); + s = format (s, "smac %U ", format_ethernet_address, &mp->b_smac); + s = format (s, "sid %d ", ntohl (mp->i_sid)); + s = format (s, "vlanid %d ", ntohs (mp->b_vlanid)); + } + + FINISH; +} + +static void *vl_api_flow_classify_set_interface_t_print + (vl_api_flow_classify_set_interface_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: flow_classify_set_interface "); + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + if (mp->ip4_table_index != ~0) + s = format (s, "ip4-table %d ", ntohl (mp->ip4_table_index)); + if (mp->ip6_table_index != ~0) + s = format (s, "ip6-table %d ", ntohl (mp->ip6_table_index)); + if (mp->is_add == 0) + s = format (s, "del "); + + FINISH; +} + +static void * +vl_api_punt_t_print (vl_api_punt_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: punt "); + + if (mp->ipv != (u8) ~ 0) + s = format (s, "ip %d ", mp->ipv); + + s = format (s, "protocol %d ", mp->l4_protocol); + + if (mp->l4_port != (u16) ~ 0) + s = format (s, "port %d ", ntohs (mp->l4_port)); + + if (!mp->is_add) + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_flow_classify_dump_t_print + (vl_api_flow_classify_dump_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: flow_classify_dump "); + switch (mp->type) + { + case FLOW_CLASSIFY_TABLE_IP4: + s = format (s, "type ip4 "); + break; + case FLOW_CLASSIFY_TABLE_IP6: + s = format (s, "type ip6 "); + break; + default: + break; + } + + FINISH; +} + +static void *vl_api_get_first_msg_id_t_print + (vl_api_get_first_msg_id_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: get_first_msg_id %s ", mp->name); + + FINISH; +} + +static void *vl_api_ioam_enable_t_print + (vl_api_ioam_enable_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: ioam_enable "); + + if (mp->trace_enable) + s = format (s, "trace enabled"); + + if (mp->pot_enable) + s = format (s, "POT enabled"); + + if (mp->seqno) + s = format (s, "Seqno enabled"); + + if (mp->analyse) + s = format (s, "Analyse enabled"); + + FINISH; +} + +static void *vl_api_ioam_disable_t_print + (vl_api_ioam_disable_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: ioam_disable "); + s = format (s, "trace disabled"); + s = format (s, "POT disabled"); + s = format (s, "Seqno disabled"); + s = format (s, "Analyse disabled"); + + FINISH; +} + +static void *vl_api_feature_enable_disable_t_print + (vl_api_feature_enable_disable_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: feature_enable_disable "); + s = format (s, "arc_name %s ", mp->arc_name); + s = format (s, "feature_name %s ", mp->feature_name); + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + if (!mp->enable) + s = format (s, "disable"); + + FINISH; +} + +static void *vl_api_sw_interface_tag_add_del_t_print + (vl_api_sw_interface_tag_add_del_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_tag_add_del "); + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + if (mp->is_add) + s = format (s, "tag %s ", mp->tag); + else + s = format (s, "del "); + + FINISH; +} + +static void *vl_api_sw_interface_set_mtu_t_print + (vl_api_sw_interface_set_mtu_t * mp, void *handle) +{ + u8 *s; + + s = format (0, "SCRIPT: sw_interface_set_mtu "); + s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index)); + s = format (s, "tag %d ", ntohs (mp->mtu)); + + FINISH; +} + +#define foreach_custom_print_no_arg_function \ +_(lisp_eid_table_vni_dump) \ +_(lisp_map_resolver_dump) \ +_(lisp_map_server_dump) \ +_(show_lisp_rloc_probe_state) \ +_(show_lisp_map_register_state) \ +_(show_lisp_map_request_mode) \ +_(lisp_gpe_tunnel_dump) + +#define _(f) \ +static void * vl_api_ ## f ## _t_print \ + (vl_api_ ## f ## _t * mp, void * handle) \ +{ \ + u8 * s; \ + s = format (0, "SCRIPT: " #f ); \ + FINISH; \ +} +foreach_custom_print_no_arg_function +#undef _ +#define foreach_custom_print_function \ +_(CREATE_LOOPBACK, create_loopback) \ +_(SW_INTERFACE_SET_FLAGS, sw_interface_set_flags) \ +_(SW_INTERFACE_ADD_DEL_ADDRESS, sw_interface_add_del_address) \ +_(SW_INTERFACE_SET_TABLE, sw_interface_set_table) \ +_(SW_INTERFACE_SET_MPLS_ENABLE, sw_interface_set_mpls_enable) \ +_(SW_INTERFACE_SET_VPATH, sw_interface_set_vpath) \ +_(SW_INTERFACE_SET_VXLAN_BYPASS, sw_interface_set_vxlan_bypass) \ +_(TAP_CONNECT, tap_connect) \ +_(TAP_MODIFY, tap_modify) \ +_(TAP_DELETE, tap_delete) \ +_(SW_INTERFACE_TAP_DUMP, sw_interface_tap_dump) \ +_(IP_ADD_DEL_ROUTE, ip_add_del_route) \ +_(PROXY_ARP_ADD_DEL, proxy_arp_add_del) \ +_(PROXY_ARP_INTFC_ENABLE_DISABLE, proxy_arp_intfc_enable_disable) \ +_(MPLS_TUNNEL_ADD_DEL, mpls_tunnel_add_del) \ +_(SW_INTERFACE_SET_UNNUMBERED, sw_interface_set_unnumbered) \ +_(IP_NEIGHBOR_ADD_DEL, ip_neighbor_add_del) \ +_(RESET_VRF, reset_vrf) \ +_(CREATE_VLAN_SUBIF, create_vlan_subif) \ +_(CREATE_SUBIF, create_subif) \ +_(OAM_ADD_DEL, oam_add_del) \ +_(RESET_FIB, reset_fib) \ +_(DHCP_PROXY_CONFIG, dhcp_proxy_config) \ +_(DHCP_PROXY_SET_VSS, dhcp_proxy_set_vss) \ +_(SET_IP_FLOW_HASH, set_ip_flow_hash) \ +_(SW_INTERFACE_IP6_SET_LINK_LOCAL_ADDRESS, \ + sw_interface_ip6_set_link_local_address) \ +_(SW_INTERFACE_IP6ND_RA_PREFIX, sw_interface_ip6nd_ra_prefix) \ +_(SW_INTERFACE_IP6ND_RA_CONFIG, sw_interface_ip6nd_ra_config) \ +_(SET_ARP_NEIGHBOR_LIMIT, set_arp_neighbor_limit) \ +_(L2_PATCH_ADD_DEL, l2_patch_add_del) \ +_(SR_TUNNEL_ADD_DEL, sr_tunnel_add_del) \ +_(SR_POLICY_ADD_DEL, sr_policy_add_del) \ +_(SR_MULTICAST_MAP_ADD_DEL, sr_multicast_map_add_del) \ +_(SW_INTERFACE_SET_L2_XCONNECT, sw_interface_set_l2_xconnect) \ +_(L2FIB_ADD_DEL, l2fib_add_del) \ +_(L2_FLAGS, l2_flags) \ +_(BRIDGE_FLAGS, bridge_flags) \ +_(CLASSIFY_ADD_DEL_TABLE, classify_add_del_table) \ +_(CLASSIFY_ADD_DEL_SESSION, classify_add_del_session) \ +_(SW_INTERFACE_SET_L2_BRIDGE, sw_interface_set_l2_bridge) \ +_(SW_INTERFACE_SET_DPDK_HQOS_PIPE, sw_interface_set_dpdk_hqos_pipe) \ +_(SW_INTERFACE_SET_DPDK_HQOS_SUBPORT, sw_interface_set_dpdk_hqos_subport)\ +_(SW_INTERFACE_SET_DPDK_HQOS_TCTBL, sw_interface_set_dpdk_hqos_tctbl) \ +_(BRIDGE_DOMAIN_ADD_DEL, bridge_domain_add_del) \ +_(BRIDGE_DOMAIN_DUMP, bridge_domain_dump) \ +_(CLASSIFY_SET_INTERFACE_IP_TABLE, classify_set_interface_ip_table) \ +_(CLASSIFY_SET_INTERFACE_L2_TABLES, classify_set_interface_l2_tables) \ +_(ADD_NODE_NEXT, add_node_next) \ +_(DHCP_PROXY_CONFIG_2, dhcp_proxy_config_2) \ +_(DHCP_CLIENT_CONFIG, dhcp_client_config) \ +_(L2TPV3_CREATE_TUNNEL, l2tpv3_create_tunnel) \ +_(L2TPV3_SET_TUNNEL_COOKIES, l2tpv3_set_tunnel_cookies) \ +_(L2TPV3_INTERFACE_ENABLE_DISABLE, l2tpv3_interface_enable_disable) \ +_(L2TPV3_SET_LOOKUP_KEY, l2tpv3_set_lookup_key) \ +_(SW_IF_L2TPV3_TUNNEL_DUMP, sw_if_l2tpv3_tunnel_dump) \ +_(VXLAN_ADD_DEL_TUNNEL, vxlan_add_del_tunnel) \ +_(VXLAN_TUNNEL_DUMP, vxlan_tunnel_dump) \ +_(GRE_ADD_DEL_TUNNEL, gre_add_del_tunnel) \ +_(GRE_TUNNEL_DUMP, gre_tunnel_dump) \ +_(L2_FIB_CLEAR_TABLE, l2_fib_clear_table) \ +_(L2_INTERFACE_EFP_FILTER, l2_interface_efp_filter) \ +_(L2_INTERFACE_VLAN_TAG_REWRITE, l2_interface_vlan_tag_rewrite) \ +_(CREATE_VHOST_USER_IF, create_vhost_user_if) \ +_(MODIFY_VHOST_USER_IF, modify_vhost_user_if) \ +_(DELETE_VHOST_USER_IF, delete_vhost_user_if) \ +_(SW_INTERFACE_DUMP, sw_interface_dump) \ +_(CONTROL_PING, control_ping) \ +_(WANT_INTERFACE_EVENTS, want_interface_events) \ +_(CLI_REQUEST, cli_request) \ +_(CLI_INBAND, cli_inband) \ +_(MEMCLNT_CREATE, memclnt_create) \ +_(SW_INTERFACE_VHOST_USER_DUMP, sw_interface_vhost_user_dump) \ +_(SHOW_VERSION, show_version) \ +_(L2_FIB_TABLE_DUMP, l2_fib_table_dump) \ +_(VXLAN_GPE_ADD_DEL_TUNNEL, vxlan_gpe_add_del_tunnel) \ +_(VXLAN_GPE_TUNNEL_DUMP, vxlan_gpe_tunnel_dump) \ +_(INTERFACE_NAME_RENUMBER, interface_name_renumber) \ +_(WANT_IP4_ARP_EVENTS, want_ip4_arp_events) \ +_(WANT_IP6_ND_EVENTS, want_ip6_nd_events) \ +_(INPUT_ACL_SET_INTERFACE, input_acl_set_interface) \ +_(IP_ADDRESS_DUMP, ip_address_dump) \ +_(IP_DUMP, ip_dump) \ +_(DELETE_LOOPBACK, delete_loopback) \ +_(BD_IP_MAC_ADD_DEL, bd_ip_mac_add_del) \ +_(COP_INTERFACE_ENABLE_DISABLE, cop_interface_enable_disable) \ +_(COP_WHITELIST_ENABLE_DISABLE, cop_whitelist_enable_disable) \ +_(AF_PACKET_CREATE, af_packet_create) \ +_(AF_PACKET_DELETE, af_packet_delete) \ +_(SW_INTERFACE_CLEAR_STATS, sw_interface_clear_stats) \ +_(MPLS_FIB_DUMP, mpls_fib_dump) \ +_(MPLS_TUNNEL_DUMP, mpls_tunnel_dump) \ +_(CLASSIFY_TABLE_IDS,classify_table_ids) \ +_(CLASSIFY_TABLE_BY_INTERFACE, classify_table_by_interface) \ +_(CLASSIFY_TABLE_INFO,classify_table_info) \ +_(CLASSIFY_SESSION_DUMP,classify_session_dump) \ +_(SET_IPFIX_EXPORTER, set_ipfix_exporter) \ +_(IPFIX_EXPORTER_DUMP, ipfix_exporter_dump) \ +_(SET_IPFIX_CLASSIFY_STREAM, set_ipfix_classify_stream) \ +_(IPFIX_CLASSIFY_STREAM_DUMP, ipfix_classify_stream_dump) \ +_(IPFIX_CLASSIFY_TABLE_ADD_DEL, ipfix_classify_table_add_del) \ +_(IPFIX_CLASSIFY_TABLE_DUMP, ipfix_classify_table_dump) \ +_(SW_INTERFACE_SPAN_ENABLE_DISABLE, sw_interface_span_enable_disable) \ +_(SW_INTERFACE_SPAN_DUMP, sw_interface_span_dump) \ +_(GET_NEXT_INDEX, get_next_index) \ +_(PG_CREATE_INTERFACE,pg_create_interface) \ +_(PG_CAPTURE, pg_capture) \ +_(PG_ENABLE_DISABLE, pg_enable_disable) \ +_(POLICER_ADD_DEL, policer_add_del) \ +_(POLICER_DUMP, policer_dump) \ +_(POLICER_CLASSIFY_SET_INTERFACE, policer_classify_set_interface) \ +_(POLICER_CLASSIFY_DUMP, policer_classify_dump) \ +_(IP_SOURCE_AND_PORT_RANGE_CHECK_ADD_DEL, \ + ip_source_and_port_range_check_add_del) \ +_(IP_SOURCE_AND_PORT_RANGE_CHECK_INTERFACE_ADD_DEL, \ + ip_source_and_port_range_check_interface_add_del) \ +_(LISP_ENABLE_DISABLE, lisp_enable_disable) \ +_(LISP_GPE_ENABLE_DISABLE, lisp_gpe_enable_disable) \ +_(LISP_GPE_ADD_DEL_IFACE, lisp_gpe_add_del_iface) \ +_(LISP_PITR_SET_LOCATOR_SET, lisp_pitr_set_locator_set) \ +_(LISP_MAP_REQUEST_MODE, lisp_map_request_mode) \ +_(SHOW_LISP_MAP_REQUEST_MODE, show_lisp_map_request_mode) \ +_(LISP_ADD_DEL_REMOTE_MAPPING, lisp_add_del_remote_mapping) \ +_(LISP_ADD_DEL_ADJACENCY, lisp_add_del_adjacency) \ +_(LISP_ADD_DEL_MAP_REQUEST_ITR_RLOCS, \ + lisp_add_del_map_request_itr_rlocs) \ +_(LISP_EID_TABLE_ADD_DEL_MAP, lisp_eid_table_add_del_map) \ +_(LISP_ADD_DEL_LOCAL_EID, lisp_add_del_local_eid) \ +_(LISP_GPE_ADD_DEL_FWD_ENTRY, lisp_gpe_add_del_fwd_entry) \ +_(LISP_ADD_DEL_LOCATOR_SET, lisp_add_del_locator_set) \ +_(LISP_ADD_DEL_MAP_RESOLVER, lisp_add_del_map_resolver) \ +_(LISP_ADD_DEL_LOCATOR, lisp_add_del_locator) \ +_(LISP_EID_TABLE_DUMP, lisp_eid_table_dump) \ +_(LISP_EID_TABLE_MAP_DUMP, lisp_eid_table_map_dump) \ +_(LISP_EID_TABLE_VNI_DUMP, lisp_eid_table_vni_dump) \ +_(LISP_GPE_TUNNEL_DUMP, lisp_gpe_tunnel_dump) \ +_(LISP_MAP_RESOLVER_DUMP, lisp_map_resolver_dump) \ +_(LISP_MAP_SERVER_DUMP, lisp_map_server_dump) \ +_(LISP_LOCATOR_SET_DUMP, lisp_locator_set_dump) \ +_(LISP_LOCATOR_DUMP, lisp_locator_dump) \ +_(LISP_ADJACENCIES_GET, lisp_adjacencies_get) \ +_(SHOW_LISP_RLOC_PROBE_STATE, show_lisp_rloc_probe_state) \ +_(SHOW_LISP_MAP_REGISTER_STATE, show_lisp_map_register_state) \ +_(LISP_RLOC_PROBE_ENABLE_DISABLE, lisp_rloc_probe_enable_disable) \ +_(LISP_MAP_REGISTER_ENABLE_DISABLE, lisp_map_register_enable_disable) \ +_(IPSEC_GRE_ADD_DEL_TUNNEL, ipsec_gre_add_del_tunnel) \ +_(IPSEC_GRE_TUNNEL_DUMP, ipsec_gre_tunnel_dump) \ +_(DELETE_SUBIF, delete_subif) \ +_(L2_INTERFACE_PBB_TAG_REWRITE, l2_interface_pbb_tag_rewrite) \ +_(PUNT, punt) \ +_(FLOW_CLASSIFY_SET_INTERFACE, flow_classify_set_interface) \ +_(FLOW_CLASSIFY_DUMP, flow_classify_dump) \ +_(GET_FIRST_MSG_ID, get_first_msg_id) \ +_(IOAM_ENABLE, ioam_enable) \ +_(IOAM_DISABLE, ioam_disable) \ +_(IP_FIB_DUMP, ip_fib_dump) \ +_(IP6_FIB_DUMP, ip6_fib_dump) \ +_(FEATURE_ENABLE_DISABLE, feature_enable_disable) \ +_(SW_INTERFACE_TAG_ADD_DEL, sw_interface_tag_add_del) \ +_(SW_INTERFACE_SET_MTU, sw_interface_set_mtu) + void +vl_msg_api_custom_dump_configure (api_main_t * am) +{ +#define _(n,f) am->msg_print_handlers[VL_API_##n] \ + = (void *) vl_api_##f##_t_print; + foreach_custom_print_function; +#undef _ +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vpp/vpp/vpp-api/gmon.c b/vpp/vpp/vpp-api/gmon.c new file mode 100644 index 00000000..e5cb1271 --- /dev/null +++ b/vpp/vpp/vpp-api/gmon.c @@ -0,0 +1,319 @@ +/* + * Copyright (c) 2012 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +typedef struct +{ + svmdb_client_t *svmdb_client; + f64 *vector_rate_ptr; + f64 *input_rate_ptr; + f64 *sig_error_rate_ptr; + pid_t *vpef_pid_ptr; + u64 last_sig_errors; + u64 current_sig_errors; + uword *sig_error_bitmap; + vlib_main_t *vlib_main; + vlib_main_t **my_vlib_mains; + +} gmon_main_t; + +#if DPDK == 0 +static inline u64 +vnet_get_aggregate_rx_packets (void) +{ + return 0; +} +#else +#include +#include +#include +#endif + +gmon_main_t gmon_main; + +static u64 +get_significant_errors (gmon_main_t * gm) +{ + vlib_main_t *this_vlib_main; + vlib_error_main_t *em; + uword code; + int vm_index; + u64 significant_errors = 0; + + /* *INDENT-OFF* */ + clib_bitmap_foreach (code, gm->sig_error_bitmap, + ({ + for (vm_index = 0; vm_index < vec_len (gm->my_vlib_mains); vm_index++) + { + this_vlib_main = gm->my_vlib_mains[vm_index]; + em = &this_vlib_main->error_main; + significant_errors += em->counters[code] - + ((vec_len(em->counters_last_clear) > code) ? + em->counters_last_clear[code] : 0); + } + })); + /* *INDENT-ON* */ + + return (significant_errors); +} + +static clib_error_t * +publish_pid (vlib_main_t * vm) +{ + gmon_main_t *gm = &gmon_main; + + *gm->vpef_pid_ptr = getpid (); + + return 0; +} + +VLIB_API_INIT_FUNCTION (publish_pid); + + +static uword +gmon_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f) +{ + f64 vector_rate; + u64 input_packets, last_input_packets, new_sig_errors; + f64 last_runtime, dt, now; + gmon_main_t *gm = &gmon_main; + int i; + + last_runtime = 0.0; + last_input_packets = 0; + + last_runtime = 0.0; + last_input_packets = 0; + + /* Initial wait for the world to settle down */ + vlib_process_suspend (vm, 5.0); + + if (vec_len (vlib_mains) == 0) + vec_add1 (gm->my_vlib_mains, &vlib_global_main); + else + { + for (i = 0; i < vec_len (vlib_mains); i++) + vec_add1 (gm->my_vlib_mains, vlib_mains[i]); + } + + while (1) + { + vlib_process_suspend (vm, 5.0); + vector_rate = vlib_last_vector_length_per_node (vm); + *gm->vector_rate_ptr = vector_rate; + now = vlib_time_now (vm); + dt = now - last_runtime; + input_packets = vnet_get_aggregate_rx_packets (); + *gm->input_rate_ptr = (f64) (input_packets - last_input_packets) / dt; + last_runtime = now; + last_input_packets = input_packets; + + new_sig_errors = get_significant_errors (gm); + *gm->sig_error_rate_ptr = + ((f64) (new_sig_errors - gm->last_sig_errors)) / dt; + gm->last_sig_errors = new_sig_errors; + } + + return 0; /* not so much */ +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (gmon_process_node,static) = { + .function = gmon_process, + .type = VLIB_NODE_TYPE_PROCESS, + .name = "gmon-process", +}; +/* *INDENT-ON* */ + +static clib_error_t * +gmon_init (vlib_main_t * vm) +{ + gmon_main_t *gm = &gmon_main; + api_main_t *am = &api_main; + pid_t *swp = 0; + f64 *v = 0; + clib_error_t *error; + svmdb_map_args_t _ma, *ma = &_ma; + + if ((error = vlib_call_init_function (vm, vpe_api_init))) + return (error); + + if ((error = vlib_call_init_function (vm, vlibmemory_init))) + return (error); + + gm->vlib_main = vm; + + memset (ma, 0, sizeof (*ma)); + ma->root_path = am->root_path; + ma->uid = am->api_uid; + ma->gid = am->api_gid; + + gm->svmdb_client = svmdb_map (ma); + + /* Find or create, set to zero */ + vec_add1 (v, 0.0); + svmdb_local_set_vec_variable (gm->svmdb_client, + "vpp_vector_rate", (char *) v, sizeof (*v)); + vec_free (v); + vec_add1 (v, 0.0); + svmdb_local_set_vec_variable (gm->svmdb_client, + "vpp_input_rate", (char *) v, sizeof (*v)); + vec_free (v); + vec_add1 (v, 0.0); + svmdb_local_set_vec_variable (gm->svmdb_client, + "vpp_sig_error_rate", + (char *) v, sizeof (*v)); + vec_free (v); + + vec_add1 (swp, 0.0); + svmdb_local_set_vec_variable (gm->svmdb_client, + "vpp_pid", (char *) swp, sizeof (*swp)); + vec_free (swp); + + /* the value cells will never move, so acquire references to them */ + gm->vector_rate_ptr = + svmdb_local_get_variable_reference (gm->svmdb_client, + SVMDB_NAMESPACE_VEC, + "vpp_vector_rate"); + gm->input_rate_ptr = + svmdb_local_get_variable_reference (gm->svmdb_client, + SVMDB_NAMESPACE_VEC, + "vpp_input_rate"); + gm->sig_error_rate_ptr = + svmdb_local_get_variable_reference (gm->svmdb_client, + SVMDB_NAMESPACE_VEC, + "vpp_sig_error_rate"); + gm->vpef_pid_ptr = + svmdb_local_get_variable_reference (gm->svmdb_client, + SVMDB_NAMESPACE_VEC, "vpp_pid"); + return 0; +} + +VLIB_INIT_FUNCTION (gmon_init); + +static clib_error_t * +gmon_exit (vlib_main_t * vm) +{ + gmon_main_t *gm = &gmon_main; + + if (gm->vector_rate_ptr) + { + *gm->vector_rate_ptr = 0.0; + *gm->vpef_pid_ptr = 0; + *gm->input_rate_ptr = 0.0; + *gm->sig_error_rate_ptr = 0.0; + svm_region_unmap ((void *) gm->svmdb_client->db_rp); + vec_free (gm->svmdb_client); + } + return 0; +} + +VLIB_MAIN_LOOP_EXIT_FUNCTION (gmon_exit); + +static int +significant_error_enable_disable (gmon_main_t * gm, u32 index, int enable) +{ + vlib_main_t *vm = gm->vlib_main; + vlib_error_main_t *em = &vm->error_main; + + if (index >= vec_len (em->counters)) + return VNET_API_ERROR_NO_SUCH_ENTRY; + + gm->sig_error_bitmap = + clib_bitmap_set (gm->sig_error_bitmap, index, enable); + return 0; +} + +static clib_error_t * +set_significant_error_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + u32 index; + int enable = 1; + int rv; + gmon_main_t *gm = &gmon_main; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "%d", &index)) + ; + else if (unformat (input, "disable")) + enable = 0; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + + rv = significant_error_enable_disable (gm, index, enable); + + switch (rv) + { + case 0: + break; + + default: + return clib_error_return + (0, "significant_error_enable_disable returned %d", rv); + } + + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (set_significant_error_command, static) = { + .path = "set significant error", + .short_help = "set significant error [disable]", + .function = set_significant_error_command_fn, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vpp/vpp/vpp-api/summary_stats_client.c b/vpp/vpp/vpp-api/summary_stats_client.c new file mode 100644 index 00000000..54e16669 --- /dev/null +++ b/vpp/vpp/vpp-api/summary_stats_client.c @@ -0,0 +1,302 @@ +/* + *------------------------------------------------------------------ + * summary_stats_client - + * + * Copyright (c) 2010 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include + +#define f64_endian(a) +#define f64_print(a,b) + +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs + +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) +#define vl_printfun +#include +#undef vl_printfun + +vl_shmem_hdr_t *shmem_hdr; + +typedef struct +{ + volatile int sigterm_received; + + struct sockaddr_in send_data_addr; + int send_data_socket; + u8 *display_name; + + /* convenience */ + unix_shared_memory_queue_t *vl_input_queue; + u32 my_client_index; +} test_main_t; + +test_main_t test_main; + +/* + * Satisfy external references when -lvlib is not available. + */ +vlib_main_t vlib_global_main; +vlib_main_t **vlib_mains; + +void +vlib_cli_output (struct vlib_main_t *vm, char *fmt, ...) +{ + clib_warning ("vlib_cli_output callled..."); +} + + +static void +vl_api_vnet_summary_stats_reply_t_handler (vl_api_vnet_summary_stats_reply_t * + mp) +{ + test_main_t *tm = &test_main; + static u8 *sb; + int n; + + printf ("total rx pkts %llu, total rx bytes %llu\n", + (unsigned long long) mp->total_pkts[0], + (unsigned long long) mp->total_bytes[0]); + printf ("total tx pkts %llu, total tx bytes %llu\n", + (unsigned long long) mp->total_pkts[1], + (unsigned long long) mp->total_bytes[1]); + printf ("vector rate %.2f\n", mp->vector_rate); + + vec_reset_length (sb); + sb = format (sb, "%v,%.0f,%llu,%llu,%llu,%llu\n%c", + tm->display_name, mp->vector_rate, + (unsigned long long) mp->total_pkts[0], + (unsigned long long) mp->total_bytes[0], + (unsigned long long) mp->total_pkts[1], + (unsigned long long) mp->total_bytes[1], 0); + + n = sendto (tm->send_data_socket, sb, vec_len (sb), + 0, (struct sockaddr *) &tm->send_data_addr, + sizeof (tm->send_data_addr)); + + if (n != vec_len (sb)) + clib_unix_warning ("sendto"); + +} + +#define foreach_api_msg \ +_(VNET_SUMMARY_STATS_REPLY, vnet_summary_stats_reply) + +int +connect_to_vpe (char *name) +{ + int rv = 0; + + rv = vl_client_connect_to_vlib ("/vpe-api", name, 32); + +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler, \ + vl_noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 1); + foreach_api_msg; +#undef _ + + shmem_hdr = api_main.shmem_hdr; + + return rv; +} + +int +disconnect_from_vpe (void) +{ + vl_client_disconnect_from_vlib (); + return 0; +} + +static void +sigterm_handler (int sig) +{ + test_main_t *tm = &test_main; + tm->sigterm_received = 1; +} + +/* Parse an IP4 address %d.%d.%d.%d. */ +uword +unformat_ip4_address (unformat_input_t * input, va_list * args) +{ + u8 *result = va_arg (*args, u8 *); + unsigned a[4]; + + if (!unformat (input, "%d.%d.%d.%d", &a[0], &a[1], &a[2], &a[3])) + return 0; + + if (a[0] >= 256 || a[1] >= 256 || a[2] >= 256 || a[3] >= 256) + return 0; + + result[0] = a[0]; + result[1] = a[1]; + result[2] = a[2]; + result[3] = a[3]; + + return 1; +} + +int +main (int argc, char **argv) +{ + api_main_t *am = &api_main; + test_main_t *tm = &test_main; + vl_api_vnet_get_summary_stats_t *mp; + unformat_input_t _input, *input = &_input; + clib_error_t *error = 0; + ip4_address_t collector_ip; + u8 *display_name = 0; + u16 collector_port = 7654; + + collector_ip.as_u32 = (u32) ~ 0; + + unformat_init_command_line (input, argv); + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "collector-ip %U", + unformat_ip4_address, &collector_ip)) + ; + else if (unformat (input, "display-name %v", &display_name)) + ; + else if (unformat (input, "collector-port %d", &collector_port)) + ; + else + { + error = + clib_error_return + (0, "Usage: %s collector-ip \n" + " [display-name ] [collector-port ]\n" + " port defaults to 7654", argv[0]); + break; + } + } + + if (error == 0 && collector_ip.as_u32 == (u32) ~ 0) + error = clib_error_return (0, "collector-ip not set...\n"); + + + if (error) + { + clib_error_report (error); + exit (1); + } + + if (display_name == 0) + { + display_name = format (0, "vpe-to-%d.%d.%d.%d", + collector_ip.as_u8[0], + collector_ip.as_u8[1], + collector_ip.as_u8[2], collector_ip.as_u8[3]); + } + + + connect_to_vpe ("test_client"); + + tm->vl_input_queue = shmem_hdr->vl_input_queue; + tm->my_client_index = am->my_client_index; + tm->display_name = display_name; + + signal (SIGTERM, sigterm_handler); + signal (SIGINT, sigterm_handler); + signal (SIGQUIT, sigterm_handler); + + /* data (multicast) RX socket */ + tm->send_data_socket = socket (PF_INET, SOCK_DGRAM, IPPROTO_UDP); + if (tm->send_data_socket < 0) + { + clib_unix_warning (0, "data_rx_socket"); + exit (1); + } + + memset (&tm->send_data_addr, 0, sizeof (tm->send_data_addr)); + tm->send_data_addr.sin_family = AF_INET; + tm->send_data_addr.sin_addr.s_addr = collector_ip.as_u32; + tm->send_data_addr.sin_port = htons (collector_port); + + fformat (stdout, "Send SIGINT or SIGTERM to quit...\n"); + + while (1) + { + sleep (5); + + if (tm->sigterm_received) + break; + /* Poll for stats */ + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS); + mp->client_index = tm->my_client_index; + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); + } + + fformat (stdout, "Exiting...\n"); + + disconnect_from_vpe (); + exit (0); +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vpp/vpp/vpp-api/test_client.c b/vpp/vpp/vpp-api/test_client.c new file mode 100644 index 00000000..e0d7054a --- /dev/null +++ b/vpp/vpp/vpp-api/test_client.c @@ -0,0 +1,1531 @@ +/* + *------------------------------------------------------------------ + * api.c - message handler registration + * + * Copyright (c) 2010 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include + +#define f64_endian(a) +#define f64_print(a,b) + +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs + +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) +#define vl_printfun +#include +#undef vl_printfun + +vl_shmem_hdr_t *shmem_hdr; + +typedef struct +{ + int link_events_on; + int stats_on; + int oam_events_on; + + /* convenience */ + unix_shared_memory_queue_t *vl_input_queue; + u32 my_client_index; +} test_main_t; + +test_main_t test_main; + +/* + * Satisfy external references when -lvlib is not available. + */ +vlib_main_t vlib_global_main; +vlib_main_t **vlib_mains; + +void +vlib_cli_output (struct vlib_main_t *vm, char *fmt, ...) +{ + clib_warning ("vlib_cli_output callled..."); +} + +u8 * +format_ethernet_address (u8 * s, va_list * args) +{ + u8 *a = va_arg (*args, u8 *); + + return format (s, "%02x:%02x:%02x:%02x:%02x:%02x", + a[0], a[1], a[2], a[3], a[4], a[5]); +} + +static void +vl_api_sw_interface_details_t_handler (vl_api_sw_interface_details_t * mp) +{ + char *duplex, *speed; + + switch (mp->link_duplex << VNET_HW_INTERFACE_FLAG_DUPLEX_SHIFT) + { + case VNET_HW_INTERFACE_FLAG_HALF_DUPLEX: + duplex = "half"; + break; + case VNET_HW_INTERFACE_FLAG_FULL_DUPLEX: + duplex = "full"; + break; + default: + duplex = "bogus"; + break; + } + switch (mp->link_speed << VNET_HW_INTERFACE_FLAG_SPEED_SHIFT) + { + case VNET_HW_INTERFACE_FLAG_SPEED_10M: + speed = "10Mbps"; + break; + case VNET_HW_INTERFACE_FLAG_SPEED_100M: + speed = "100Mbps"; + break; + case VNET_HW_INTERFACE_FLAG_SPEED_1G: + speed = "1Gbps"; + break; + case VNET_HW_INTERFACE_FLAG_SPEED_10G: + speed = "10Gbps"; + break; + case VNET_HW_INTERFACE_FLAG_SPEED_40G: + speed = "40Gbps"; + break; + case VNET_HW_INTERFACE_FLAG_SPEED_100G: + speed = "100Gbps"; + break; + default: + speed = "bogus"; + break; + } + fformat (stdout, "details: %s sw_if_index %d sup_sw_if_index %d " + "link_duplex %s link_speed %s", + mp->interface_name, ntohl (mp->sw_if_index), + ntohl (mp->sup_sw_if_index), duplex, speed); + + if (mp->l2_address_length) + fformat (stdout, " l2 address: %U\n", + format_ethernet_address, mp->l2_address); + else + fformat (stdout, "\n"); +} + +static void +vl_api_sw_interface_set_flags_t_handler (vl_api_sw_interface_set_flags_t * mp) +{ + fformat (stdout, "set flags: sw_if_index %d, admin %s link %s\n", + ntohl (mp->sw_if_index), + mp->admin_up_down ? "up" : "down", + mp->link_up_down ? "up" : "down"); +} + +static void + vl_api_sw_interface_set_flags_reply_t_handler + (vl_api_sw_interface_set_flags_reply_t * mp) +{ + fformat (stdout, "set flags reply: reply %d\n", ntohl (mp->retval)); +} + +static void + vl_api_want_interface_events_reply_t_handler + (vl_api_want_interface_events_reply_t * mp) +{ +} + +static void +vl_api_want_stats_reply_t_handler (vl_api_want_stats_reply_t * mp) +{ + fformat (stdout, "want stats reply %d\n", ntohl (mp->retval)); +} + +static void +vl_api_want_oam_events_reply_t_handler (vl_api_want_oam_events_reply_t * mp) +{ + fformat (stdout, "want oam reply %d\n", ntohl (mp->retval)); +} + +static void +vl_api_ip_add_del_route_reply_t_handler (vl_api_ip_add_del_route_reply_t * mp) +{ + fformat (stdout, "add_route reply %d\n", ntohl (mp->retval)); +} + +static void + vl_api_sw_interface_add_del_address_reply_t_handler + (vl_api_sw_interface_add_del_address_reply_t * mp) +{ + fformat (stdout, "add_del_address reply %d\n", ntohl (mp->retval)); +} + +static void + vl_api_sw_interface_set_table_reply_t_handler + (vl_api_sw_interface_set_table_reply_t * mp) +{ + fformat (stdout, "set_table reply %d\n", ntohl (mp->retval)); +} + +static void +vl_api_tap_connect_reply_t_handler (vl_api_tap_connect_reply_t * mp) +{ + fformat (stdout, "tap connect reply %d, sw_if_index %d\n", + ntohl (mp->retval), ntohl (mp->sw_if_index)); +} + +static void +vl_api_create_vlan_subif_reply_t_handler (vl_api_create_vlan_subif_reply_t * + mp) +{ + fformat (stdout, "create vlan subif reply %d, sw_if_index %d\n", + ntohl (mp->retval), ntohl (mp->sw_if_index)); +} + +static void vl_api_proxy_arp_add_del_reply_t_handler + (vl_api_proxy_arp_add_del_reply_t * mp) +{ + fformat (stdout, "add del proxy arp reply %d\n", ntohl (mp->retval)); +} + +static void vl_api_proxy_arp_intfc_enable_disable_reply_t_handler + (vl_api_proxy_arp_intfc_enable_disable_reply_t * mp) +{ + fformat (stdout, "proxy arp intfc ena/dis reply %d\n", ntohl (mp->retval)); +} + +static void vl_api_ip_neighbor_add_del_reply_t_handler + (vl_api_ip_neighbor_add_del_reply_t * mp) +{ + fformat (stdout, "ip neighbor add del reply %d\n", ntohl (mp->retval)); +} + +static void +vl_api_vnet_interface_counters_t_handler (vl_api_vnet_interface_counters_t * + mp) +{ + char *counter_name; + u32 count, sw_if_index; + int i; + + count = ntohl (mp->count); + sw_if_index = ntohl (mp->first_sw_if_index); + if (mp->is_combined == 0) + { + u64 *vp, v; + vp = (u64 *) mp->data; + + switch (mp->vnet_counter_type) + { + case VNET_INTERFACE_COUNTER_DROP: + counter_name = "drop"; + break; + case VNET_INTERFACE_COUNTER_PUNT: + counter_name = "punt"; + break; + case VNET_INTERFACE_COUNTER_IP4: + counter_name = "ip4"; + break; + case VNET_INTERFACE_COUNTER_IP6: + counter_name = "ip6"; + break; + case VNET_INTERFACE_COUNTER_RX_NO_BUF: + counter_name = "rx-no-buf"; + break; + case VNET_INTERFACE_COUNTER_RX_MISS: + counter_name = "rx-miss"; + break; + case VNET_INTERFACE_COUNTER_RX_ERROR: + counter_name = "rx-error"; + break; + case VNET_INTERFACE_COUNTER_TX_ERROR: + counter_name = "tx-error (fifo-full)"; + break; + default: + counter_name = "bogus"; + break; + } + for (i = 0; i < count; i++) + { + v = clib_mem_unaligned (vp, u64); + v = clib_net_to_host_u64 (v); + vp++; + fformat (stdout, "%d.%s %lld\n", sw_if_index, counter_name, v); + sw_if_index++; + } + } + else + { + vlib_counter_t *vp; + u64 packets, bytes; + vp = (vlib_counter_t *) mp->data; + + switch (mp->vnet_counter_type) + { + case VNET_INTERFACE_COUNTER_RX: + counter_name = "rx"; + break; + case VNET_INTERFACE_COUNTER_TX: + counter_name = "tx"; + break; + default: + counter_name = "bogus"; + break; + } + for (i = 0; i < count; i++) + { + packets = clib_mem_unaligned (&vp->packets, u64); + packets = clib_net_to_host_u64 (packets); + bytes = clib_mem_unaligned (&vp->bytes, u64); + bytes = clib_net_to_host_u64 (bytes); + vp++; + fformat (stdout, "%d.%s.packets %lld\n", + sw_if_index, counter_name, packets); + fformat (stdout, "%d.%s.bytes %lld\n", + sw_if_index, counter_name, bytes); + sw_if_index++; + } + } +} + +/* Format an IP4 address. */ +u8 * +format_ip4_address (u8 * s, va_list * args) +{ + u8 *a = va_arg (*args, u8 *); + return format (s, "%d.%d.%d.%d", a[0], a[1], a[2], a[3]); +} + +/* Format an IP4 route destination and length. */ +u8 * +format_ip4_address_and_length (u8 * s, va_list * args) +{ + u8 *a = va_arg (*args, u8 *); + u8 l = va_arg (*args, u32); + return format (s, "%U/%d", format_ip4_address, a, l); +} + +static void +vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp) +{ + int i; + vl_api_ip4_fib_counter_t *ctrp; + u32 count; + + count = ntohl (mp->count); + + fformat (stdout, "fib id %d, count this msg %d\n", + ntohl (mp->vrf_id), count); + + ctrp = mp->c; + for (i = 0; i < count; i++) + { + fformat (stdout, "%U: %lld packets, %lld bytes\n", + format_ip4_address_and_length, &ctrp->address, + (u32) ctrp->address_length, + clib_net_to_host_u64 (ctrp->packets), + clib_net_to_host_u64 (ctrp->bytes)); + ctrp++; + } +} + +/* Format an IP6 address. */ +u8 * +format_ip6_address (u8 * s, va_list * args) +{ + ip6_address_t *a = va_arg (*args, ip6_address_t *); + u32 i, i_max_n_zero, max_n_zeros, i_first_zero, n_zeros, last_double_colon; + + i_max_n_zero = ARRAY_LEN (a->as_u16); + max_n_zeros = 0; + i_first_zero = i_max_n_zero; + n_zeros = 0; + for (i = 0; i < ARRAY_LEN (a->as_u16); i++) + { + u32 is_zero = a->as_u16[i] == 0; + if (is_zero && i_first_zero >= ARRAY_LEN (a->as_u16)) + { + i_first_zero = i; + n_zeros = 0; + } + n_zeros += is_zero; + if ((!is_zero && n_zeros > max_n_zeros) + || (i + 1 >= ARRAY_LEN (a->as_u16) && n_zeros > max_n_zeros)) + { + i_max_n_zero = i_first_zero; + max_n_zeros = n_zeros; + i_first_zero = ARRAY_LEN (a->as_u16); + n_zeros = 0; + } + } + + last_double_colon = 0; + for (i = 0; i < ARRAY_LEN (a->as_u16); i++) + { + if (i == i_max_n_zero && max_n_zeros > 1) + { + s = format (s, "::"); + i += max_n_zeros - 1; + last_double_colon = 1; + } + else + { + s = format (s, "%s%x", + (last_double_colon || i == 0) ? "" : ":", + clib_net_to_host_u16 (a->as_u16[i])); + last_double_colon = 0; + } + } + + return s; +} + +/* Format an IP6 route destination and length. */ +u8 * +format_ip6_address_and_length (u8 * s, va_list * args) +{ + ip6_address_t *a = va_arg (*args, ip6_address_t *); + u8 l = va_arg (*args, u32); + return format (s, "%U/%d", format_ip6_address, a, l); +} + +static void +vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp) +{ + int i; + vl_api_ip6_fib_counter_t *ctrp; + u32 count; + + count = ntohl (mp->count); + + fformat (stdout, "fib id %d, count this msg %d\n", + ntohl (mp->vrf_id), count); + + ctrp = mp->c; + for (i = 0; i < count; i++) + { + fformat (stdout, "%U: %lld packets, %lld bytes\n", + format_ip6_address_and_length, &ctrp->address, + (u32) ctrp->address_length, + clib_net_to_host_u64 (ctrp->packets), + clib_net_to_host_u64 (ctrp->bytes)); + ctrp++; + } +} + +static void +vl_api_oam_event_t_handler (vl_api_oam_event_t * mp) +{ + fformat (stdout, "OAM: %U now %s\n", + format_ip4_address, &mp->dst_address, + mp->state == 1 ? "alive" : "dead"); +} + +static void +vl_api_oam_add_del_reply_t_handler (vl_api_oam_add_del_reply_t * mp) +{ + fformat (stdout, "oam add del reply %d\n", ntohl (mp->retval)); +} + +static void +vl_api_reset_fib_reply_t_handler (vl_api_reset_fib_reply_t * mp) +{ + fformat (stdout, "fib reset reply %d\n", ntohl (mp->retval)); +} + +static void +vl_api_dhcp_proxy_set_vss_reply_t_handler (vl_api_dhcp_proxy_set_vss_reply_t * + mp) +{ + fformat (stdout, "dhcp proxy set vss reply %d\n", ntohl (mp->retval)); +} + +static void +vl_api_dhcp_proxy_config_reply_t_handler (vl_api_dhcp_proxy_config_reply_t * + mp) +{ + fformat (stdout, "dhcp proxy config reply %d\n", ntohl (mp->retval)); +} + +static void +vl_api_set_ip_flow_hash_reply_t_handler (vl_api_set_ip_flow_hash_reply_t * mp) +{ + fformat (stdout, "set ip flow hash reply %d\n", ntohl (mp->retval)); +} + +static void + vl_api_sw_interface_ip6nd_ra_config_reply_t_handler + (vl_api_sw_interface_ip6nd_ra_config_reply_t * mp) +{ + fformat (stdout, "ip6 nd ra-config reply %d\n", ntohl (mp->retval)); +} + +static void + vl_api_sw_interface_ip6nd_ra_prefix_reply_t_handler + (vl_api_sw_interface_ip6nd_ra_prefix_reply_t * mp) +{ + fformat (stdout, "ip6 nd ra-prefix reply %d\n", ntohl (mp->retval)); +} + +static void + vl_api_sw_interface_ip6_enable_disable_reply_t_handler + (vl_api_sw_interface_ip6_enable_disable_reply_t * mp) +{ + fformat (stdout, "ip6 enable/disable reply %d\n", ntohl (mp->retval)); +} + +static void + vl_api_sw_interface_ip6_set_link_local_address_reply_t_handler + (vl_api_sw_interface_ip6_set_link_local_address_reply_t * mp) +{ + fformat (stdout, "ip6 set link-local address reply %d\n", + ntohl (mp->retval)); +} + +static void vl_api_create_loopback_reply_t_handler + (vl_api_create_loopback_reply_t * mp) +{ + fformat (stdout, "create loopback status %d, sw_if_index %d\n", + ntohl (mp->retval), ntohl (mp->sw_if_index)); +} + +static void +vl_api_sr_tunnel_add_del_reply_t_handler (vl_api_sr_tunnel_add_del_reply_t * + mp) +{ + fformat (stdout, "sr tunnel add/del reply %d\n", ntohl (mp->retval)); +} + +static void vl_api_l2_patch_add_del_reply_t_handler + (vl_api_l2_patch_add_del_reply_t * mp) +{ + fformat (stdout, "l2 patch reply %d\n", ntohl (mp->retval)); +} + +static void vl_api_sw_interface_set_l2_xconnect_reply_t_handler + (vl_api_sw_interface_set_l2_xconnect_reply_t * mp) +{ + fformat (stdout, "l2_xconnect reply %d\n", ntohl (mp->retval)); +} + +static void vl_api_sw_interface_set_l2_bridge_reply_t_handler + (vl_api_sw_interface_set_l2_bridge_reply_t * mp) +{ + fformat (stdout, "l2_bridge reply %d\n", ntohl (mp->retval)); +} + +static void +noop_handler (void *notused) +{ +} + +#define vl_api_vnet_ip4_fib_counters_t_endian noop_handler +#define vl_api_vnet_ip4_fib_counters_t_print noop_handler +#define vl_api_vnet_ip6_fib_counters_t_endian noop_handler +#define vl_api_vnet_ip6_fib_counters_t_print noop_handler + +#define foreach_api_msg \ +_(SW_INTERFACE_DETAILS, sw_interface_details) \ +_(SW_INTERFACE_SET_FLAGS, sw_interface_set_flags) \ +_(SW_INTERFACE_SET_FLAGS_REPLY, sw_interface_set_flags_reply) \ +_(WANT_INTERFACE_EVENTS_REPLY, want_interface_events_reply) \ +_(WANT_STATS_REPLY, want_stats_reply) \ +_(WANT_OAM_EVENTS_REPLY, want_oam_events_reply) \ +_(OAM_EVENT, oam_event) \ +_(OAM_ADD_DEL_REPLY, oam_add_del_reply) \ +_(VNET_INTERFACE_COUNTERS, vnet_interface_counters) \ +_(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \ +_(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \ +_(IP_ADD_DEL_ROUTE_REPLY, ip_add_del_route_reply) \ +_(SW_INTERFACE_ADD_DEL_ADDRESS_REPLY, sw_interface_add_del_address_reply) \ +_(SW_INTERFACE_SET_TABLE_REPLY, sw_interface_set_table_reply) \ +_(TAP_CONNECT_REPLY, tap_connect_reply) \ +_(CREATE_VLAN_SUBIF_REPLY, create_vlan_subif_reply) \ +_(PROXY_ARP_ADD_DEL_REPLY, proxy_arp_add_del_reply) \ +_(PROXY_ARP_INTFC_ENABLE_DISABLE_REPLY, proxy_arp_intfc_enable_disable_reply) \ +_(IP_NEIGHBOR_ADD_DEL_REPLY, ip_neighbor_add_del_reply) \ +_(RESET_FIB_REPLY, reset_fib_reply) \ +_(DHCP_PROXY_CONFIG_REPLY, dhcp_proxy_config_reply) \ +_(DHCP_PROXY_SET_VSS_REPLY, dhcp_proxy_set_vss_reply) \ +_(SET_IP_FLOW_HASH_REPLY, set_ip_flow_hash_reply) \ +_(SW_INTERFACE_IP6ND_RA_CONFIG_REPLY, sw_interface_ip6nd_ra_config_reply) \ +_(SW_INTERFACE_IP6ND_RA_PREFIX_REPLY, sw_interface_ip6nd_ra_prefix_reply) \ +_(SW_INTERFACE_IP6_ENABLE_DISABLE_REPLY, sw_interface_ip6_enable_disable_reply) \ +_(SW_INTERFACE_IP6_SET_LINK_LOCAL_ADDRESS_REPLY, sw_interface_ip6_set_link_local_address_reply) \ + _(CREATE_LOOPBACK_REPLY, create_loopback_reply) \ +_(L2_PATCH_ADD_DEL_REPLY, l2_patch_add_del_reply) \ +_(SR_TUNNEL_ADD_DEL_REPLY,sr_tunnel_add_del_reply) \ +_(SW_INTERFACE_SET_L2_XCONNECT_REPLY, sw_interface_set_l2_xconnect_reply) \ +_(SW_INTERFACE_SET_L2_BRIDGE_REPLY, sw_interface_set_l2_bridge_reply) + +int +connect_to_vpe (char *name) +{ + int rv = 0; + + rv = vl_client_connect_to_vlib ("/vpe-api", name, 32); + +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler, \ + noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 1); + foreach_api_msg; +#undef _ + + shmem_hdr = api_main.shmem_hdr; + + return rv; +} + +int +disconnect_from_vpe (void) +{ + vl_client_disconnect_from_vlib (); + return 0; +} + +void +link_up_down_enable_disable (test_main_t * tm, int enable) +{ + vl_api_want_interface_events_t *mp; + + /* Request admin / link up down messages */ + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_EVENTS); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->enable_disable = enable; + mp->pid = getpid (); + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); + tm->link_events_on = enable; +} + +void +stats_enable_disable (test_main_t * tm, int enable) +{ + vl_api_want_stats_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_WANT_STATS); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->enable_disable = enable; + mp->pid = getpid (); + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); + tm->stats_on = enable; +} + +void +oam_events_enable_disable (test_main_t * tm, int enable) +{ + vl_api_want_oam_events_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_WANT_OAM_EVENTS); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->enable_disable = enable; + mp->pid = getpid (); + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); + tm->oam_events_on = enable; +} + +void +oam_add_del (test_main_t * tm, int is_add) +{ + vl_api_oam_add_del_t *mp; + ip4_address_t tmp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_OAM_ADD_DEL); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->is_add = is_add; + + tmp.as_u32 = ntohl (0xc0a80101); /* 192.168.1.1 */ + clib_memcpy (mp->src_address, tmp.as_u8, 4); + + tmp.as_u32 = ntohl (0xc0a80103); /* 192.168.1.3 */ + clib_memcpy (mp->dst_address, tmp.as_u8, 4); + + mp->vrf_id = 0; + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +dump (test_main_t * tm) +{ + vl_api_sw_interface_dump_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_DUMP); + mp->client_index = tm->my_client_index; + mp->name_filter_valid = 1; + strncpy ((char *) mp->name_filter, "eth", sizeof (mp->name_filter) - 1); + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +add_del_ip4_route (test_main_t * tm, int enable_disable) +{ + vl_api_ip_add_del_route_t *mp; + u32 tmp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_IP_ADD_DEL_ROUTE); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->table_id = ntohl (0); + mp->create_vrf_if_needed = 1; + + mp->next_hop_sw_if_index = ntohl (5); + mp->is_add = enable_disable; + mp->next_hop_weight = 1; + + /* Next hop: 6.0.0.1 */ + tmp = ntohl (0x06000001); + clib_memcpy (mp->next_hop_address, &tmp, sizeof (tmp)); + + /* Destination: 10.0.0.1/32 */ + tmp = ntohl (0x0); + clib_memcpy (mp->dst_address, &tmp, sizeof (tmp)); + mp->dst_address_length = 0; + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +add_del_ip6_route (test_main_t * tm, int enable_disable) +{ + vl_api_ip_add_del_route_t *mp; + u64 tmp[2]; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_IP_ADD_DEL_ROUTE); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->next_hop_sw_if_index = ntohl (5); + mp->is_add = enable_disable; + mp->is_ipv6 = 1; + mp->next_hop_weight = 1; + mp->dst_address_length = 64; + + /* add/del dabe::/64 via db01::11 */ + + tmp[0] = clib_host_to_net_u64 (0xdabe000000000000ULL); + tmp[1] = clib_host_to_net_u64 (0x0ULL); + clib_memcpy (mp->dst_address, &tmp[0], 8); + clib_memcpy (&mp->dst_address[8], &tmp[1], 8); + + tmp[0] = clib_host_to_net_u64 (0xdb01000000000000ULL); + tmp[1] = clib_host_to_net_u64 (0x11ULL); + clib_memcpy (mp->next_hop_address, &tmp[0], 8); + clib_memcpy (&mp->next_hop_address[8], &tmp[1], 8); + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +add_del_interface_address (test_main_t * tm, int enable_disable) +{ + vl_api_sw_interface_add_del_address_t *mp; + u32 tmp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_ADD_DEL_ADDRESS); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->sw_if_index = ntohl (5); + mp->is_add = enable_disable; + mp->address_length = 8; + + tmp = ntohl (0x01020304); + clib_memcpy (mp->address, &tmp, 4); + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +add_del_v6_interface_address (test_main_t * tm, int enable_disable) +{ + vl_api_sw_interface_add_del_address_t *mp; + u64 tmp[2]; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_ADD_DEL_ADDRESS); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->is_ipv6 = 1; + mp->sw_if_index = ntohl (5); + mp->is_add = enable_disable; + mp->address_length = 64; + + tmp[0] = clib_host_to_net_u64 (0xdb01000000000000ULL); + tmp[1] = clib_host_to_net_u64 (0x11ULL); + + clib_memcpy (mp->address, &tmp[0], 8); + clib_memcpy (&mp->address[8], &tmp[1], 8); + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +del_all_interface_addresses (test_main_t * tm) +{ + vl_api_sw_interface_add_del_address_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_ADD_DEL_ADDRESS); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->sw_if_index = ntohl (5); + mp->del_all = 1; + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +set_interface_table (test_main_t * tm, int is_ipv6, u32 vrf_id) +{ + vl_api_sw_interface_set_table_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_SET_TABLE); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->sw_if_index = ntohl (5); + mp->is_ipv6 = is_ipv6; + mp->vrf_id = ntohl (vrf_id); + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +connect_unix_tap (test_main_t * tm, char *name) +{ + vl_api_tap_connect_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_TAP_CONNECT); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + strncpy ((char *) mp->tap_name, name, sizeof (mp->tap_name) - 1); + mp->use_random_mac = 1; + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +create_vlan_subif (test_main_t * tm, u32 vlan_id) +{ + vl_api_create_vlan_subif_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_CREATE_VLAN_SUBIF); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->sw_if_index = ntohl (5); + mp->vlan_id = ntohl (vlan_id); + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +add_del_proxy_arp (test_main_t * tm, int is_add) +{ + vl_api_proxy_arp_add_del_t *mp; + u32 tmp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_PROXY_ARP_ADD_DEL); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->vrf_id = ntohl (11); + mp->is_add = is_add; + + /* proxy fib 11, 1.1.1.1 -> 1.1.1.10 */ + tmp = ntohl (0x01010101); + clib_memcpy (mp->low_address, &tmp, 4); + + tmp = ntohl (0x0101010a); + clib_memcpy (mp->hi_address, &tmp, 4); + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +proxy_arp_intfc_enable_disable (test_main_t * tm, int enable_disable) +{ + vl_api_proxy_arp_intfc_enable_disable_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_PROXY_ARP_INTFC_ENABLE_DISABLE); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->sw_if_index = ntohl (6); + mp->enable_disable = enable_disable; + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +add_ip4_neighbor (test_main_t * tm, int add_del) +{ + vl_api_ip_neighbor_add_del_t *mp; + u32 tmp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_IP_NEIGHBOR_ADD_DEL); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->vrf_id = ntohl (11); + mp->sw_if_index = ntohl (6); + mp->is_add = add_del; + + memset (mp->mac_address, 0xbe, sizeof (mp->mac_address)); + + tmp = ntohl (0x0101010a); + clib_memcpy (mp->dst_address, &tmp, 4); + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +add_ip6_neighbor (test_main_t * tm, int add_del) +{ + vl_api_ip_neighbor_add_del_t *mp; + u64 tmp[2]; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_IP_NEIGHBOR_ADD_DEL); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->vrf_id = ntohl (11); + mp->sw_if_index = ntohl (6); + mp->is_add = add_del; + mp->is_ipv6 = 1; + + memset (mp->mac_address, 0xbe, sizeof (mp->mac_address)); + + tmp[0] = clib_host_to_net_u64 (0xdb01000000000000ULL); + tmp[1] = clib_host_to_net_u64 (0x11ULL); + + clib_memcpy (mp->dst_address, &tmp[0], 8); + clib_memcpy (&mp->dst_address[8], &tmp[1], 8); + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +reset_fib (test_main_t * tm, u8 is_ip6) +{ + vl_api_reset_fib_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_RESET_FIB); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->vrf_id = ntohl (11); + mp->is_ipv6 = is_ip6; + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +dhcpv6_set_vss (test_main_t * tm) +{ + vl_api_dhcp_proxy_set_vss_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_DHCP_PROXY_SET_VSS); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->oui = ntohl (6); + mp->fib_id = ntohl (60); + mp->is_add = 1; + mp->is_ipv6 = 1; + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +dhcpv4_set_vss (test_main_t * tm) +{ + vl_api_dhcp_proxy_set_vss_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_DHCP_PROXY_SET_VSS); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->oui = ntohl (4); + mp->fib_id = ntohl (40); + mp->is_add = 1; + mp->is_ipv6 = 0; + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +dhcp_set_vss (test_main_t * tm) +{ + dhcpv4_set_vss (tm); + dhcpv6_set_vss (tm); +} + +void +dhcp_set_proxy (test_main_t * tm, int ipv6) +{ + vl_api_dhcp_proxy_config_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_DHCP_PROXY_CONFIG); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->vrf_id = ntohl (0); + mp->is_ipv6 = ipv6; + mp->insert_circuit_id = 1; + mp->is_add = 1; + mp->dhcp_server[0] = 0x20; + mp->dhcp_server[1] = 0x01; + mp->dhcp_server[2] = 0xab; + mp->dhcp_server[3] = 0xcd; + mp->dhcp_server[4] = 0x12; + mp->dhcp_server[5] = 0x34; + mp->dhcp_server[6] = 0xfe; + mp->dhcp_server[7] = 0xdc; + mp->dhcp_server[14] = 0; + mp->dhcp_server[15] = 0x2; + + mp->dhcp_src_address[0] = 0x20; + mp->dhcp_src_address[1] = 0x01; + mp->dhcp_src_address[2] = 0xab; + mp->dhcp_src_address[3] = 0xcd; + mp->dhcp_src_address[4] = 0x12; + mp->dhcp_src_address[5] = 0x34; + mp->dhcp_src_address[6] = 0x56; + mp->dhcp_src_address[7] = 0x78; + mp->dhcp_src_address[14] = 0; + mp->dhcp_src_address[15] = 0x2; + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +set_ip_flow_hash (test_main_t * tm, u8 is_ip6) +{ + vl_api_set_ip_flow_hash_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_SET_IP_FLOW_HASH); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->vrf_id = 0; + mp->is_ipv6 = is_ip6; + mp->dst = 1; + mp->reverse = 1; + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +ip6nd_ra_config (test_main_t * tm, int is_no) +{ + vl_api_sw_interface_ip6nd_ra_config_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->sw_if_index = ntohl (5); + mp->is_no = is_no; + + mp->suppress = 1; + + + mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_IP6ND_RA_CONFIG); + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +ip6nd_ra_prefix (test_main_t * tm, int is_no) +{ + vl_api_sw_interface_ip6nd_ra_prefix_t *mp; + u64 tmp[2]; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->sw_if_index = ntohl (5); + mp->is_no = is_no; + + mp->use_default = 1; + + + tmp[0] = clib_host_to_net_u64 (0xdb01000000000000ULL); + tmp[1] = clib_host_to_net_u64 (0x11ULL); + + + clib_memcpy (mp->address, &tmp[0], 8); + clib_memcpy (&mp->address[8], &tmp[1], 8); + + mp->address_length = 64; + + + mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_IP6ND_RA_PREFIX); + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +ip6_enable_disable (test_main_t * tm, int enable) +{ + vl_api_sw_interface_ip6_enable_disable_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->sw_if_index = ntohl (5); + mp->enable = (enable == 1);; + + mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_IP6_ENABLE_DISABLE); + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +loop_create (test_main_t * tm) +{ + vl_api_create_loopback_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + + mp->_vl_msg_id = ntohs (VL_API_CREATE_LOOPBACK); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +ip6_set_link_local_address (test_main_t * tm) +{ + vl_api_sw_interface_ip6_set_link_local_address_t *mp; + u64 tmp[2]; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->sw_if_index = ntohl (5); + + tmp[0] = clib_host_to_net_u64 (0xfe80000000000000ULL); + tmp[1] = clib_host_to_net_u64 (0x11ULL); + + clib_memcpy (mp->address, &tmp[0], 8); + clib_memcpy (&mp->address[8], &tmp[1], 8); + + mp->address_length = 64; + + mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_IP6_SET_LINK_LOCAL_ADDRESS); + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + + +void +set_flags (test_main_t * tm, int up_down) +{ + vl_api_sw_interface_set_flags_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + + mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_SET_FLAGS); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->sw_if_index = ntohl (5); + mp->admin_up_down = up_down; + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); + +} + +void +l2_patch_add_del (test_main_t * tm, int is_add) +{ + vl_api_l2_patch_add_del_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_L2_PATCH_ADD_DEL); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->is_add = is_add; + mp->rx_sw_if_index = ntohl (1); + mp->tx_sw_if_index = ntohl (2); + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +l2_xconnect (test_main_t * tm) +{ + vl_api_sw_interface_set_l2_xconnect_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_SET_L2_XCONNECT); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->rx_sw_if_index = ntohl (5); + mp->tx_sw_if_index = ntohl (6); + mp->enable = 1; + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +void +l2_bridge (test_main_t * tm) +{ + vl_api_sw_interface_set_l2_bridge_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_SET_L2_BRIDGE); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + mp->rx_sw_if_index = ntohl (5); + mp->bd_id = ntohl (6); + mp->bvi = ntohl (1); + mp->shg = ntohl (0); + mp->enable = 1; + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +int +main (int argc, char **argv) +{ + api_main_t *am = &api_main; + test_main_t *tm = &test_main; + int ch; + + connect_to_vpe ("test_client"); + + tm->vl_input_queue = shmem_hdr->vl_input_queue; + tm->my_client_index = am->my_client_index; + + fformat (stdout, "Type 'h' for help, 'q' to quit...\n"); + + while (1) + { + ch = getchar (); + switch (ch) + { + case 'q': + goto done; + case 'd': + dump (tm); + break; + case 'L': + link_up_down_enable_disable (tm, 1 /* enable_disable */ ); + break; + case 'l': + link_up_down_enable_disable (tm, 0 /* enable_disable */ ); + break; + case 'S': + stats_enable_disable (tm, 1 /* enable_disable */ ); + break; + case 's': + stats_enable_disable (tm, 0 /* enable_disable */ ); + break; + case '3': + add_del_ip4_route (tm, 0 /* add */ ); + break; + case '4': + add_del_ip4_route (tm, 1 /* add */ ); + break; + case '5': + add_del_ip6_route (tm, 0 /* add */ ); + break; + case '6': + add_del_ip6_route (tm, 1 /* add */ ); + break; + case 'A': + add_del_interface_address (tm, 1 /* add */ ); + break; + case 'a': + add_del_interface_address (tm, 0 /* add */ ); + break; + case 'B': + add_del_v6_interface_address (tm, 1 /* add */ ); + break; + case 'b': + add_del_v6_interface_address (tm, 0 /* add */ ); + break; + case 'E': + l2_patch_add_del (tm, 1 /* is_add */ ); + break; + case 'e': + l2_patch_add_del (tm, 0 /* is_add */ ); + break; + case 'z': + del_all_interface_addresses (tm); + break; + case 't': + set_interface_table (tm, 0 /* is_ipv6 */ , + 11 /* my amp goes to 11 */ ); + break; + case 'T': + set_interface_table (tm, 1 /* is_ipv6 */ , + 12 /* my amp goes to 12 */ ); + break; + + case 'u': + create_vlan_subif (tm, 123); + break; + + case 'c': + connect_unix_tap (tm, "foo"); + break; + + case 'n': + add_ip4_neighbor (tm, 1 /* is_add */ ); + add_ip6_neighbor (tm, 1 /* is_add */ ); + break; + + case 'N': + add_ip4_neighbor (tm, 0 /* is_add */ ); + add_ip6_neighbor (tm, 0 /* is_add */ ); + break; + + case 'p': + add_del_proxy_arp (tm, 1 /* add */ ); + break; + + case 'i': + proxy_arp_intfc_enable_disable (tm, 1 /* enable */ ); + break; + + case 'O': + oam_events_enable_disable (tm, 0 /* enable */ ); + break; + + case 'o': + oam_events_enable_disable (tm, 1 /* enable */ ); + break; + + case '0': + oam_add_del (tm, 0 /* is_add */ ); + break; + + case '1': + oam_add_del (tm, 1 /* is_add */ ); + break; + + case 'r': + reset_fib (tm, 0 /* is_ip6 */ ); + break; + + case 'R': + reset_fib (tm, 1 /* is_ip6 */ ); + break; + + case 'j': + dhcp_set_vss (tm); + break; + + case 'k': + dhcp_set_proxy (tm, 0); + break; + + case 'K': + dhcp_set_proxy (tm, 1 /*ipv6 */ ); + break; + + case 'v': + set_ip_flow_hash (tm, 0 /* is_ip6 */ ); + break; + + case 'V': + ip6_set_link_local_address (tm); + break; + + case 'w': + ip6_enable_disable (tm, 1 /* enable */ ); + break; + + case 'W': + ip6_enable_disable (tm, 0 /* disable */ ); + break; + + case 'x': + ip6nd_ra_config (tm, 0 /* is_no */ ); + break; + case 'X': + ip6nd_ra_config (tm, 1 /* is_no */ ); + break; + case 'y': + ip6nd_ra_prefix (tm, 0 /* is_no */ ); + break; + case 'Y': + ip6nd_ra_prefix (tm, 1 /* is_no */ ); + break; + + case '7': + loop_create (tm); + break; + + case 'F': + set_flags (tm, 1 /* up_down */ ); + break; + + case 'f': + set_flags (tm, 0 /* up_down */ ); + break; + + case '@': + l2_xconnect (tm); + break; + + case '#': + l2_bridge (tm); + break; + + case 'h': + fformat (stdout, "q=quit,d=dump,L=link evts on,l=link evts off\n"); + fformat (stdout, "S=stats on,s=stats off\n"); + fformat (stdout, "4=add v4 route, 3=del v4 route\n"); + fformat (stdout, "6=add v6 route, 5=del v6 route\n"); + fformat (stdout, "A=add v4 intfc route, a=del v4 intfc route\n"); + fformat (stdout, "B=add v6 intfc route, b=del v6 intfc route\n"); + fformat (stdout, "z=del all intfc routes\n"); + fformat (stdout, "t=set v4 intfc table, T=set v6 intfc table\n"); + fformat (stdout, "c=connect unix tap\n"); + fformat (stdout, + "j=set dhcpv4 and v6 link-address/option-82 params\n"); + fformat (stdout, "k=set dhcpv4 relay agent params\n"); + fformat (stdout, "K=set dhcpv6 relay agent params\n"); + fformat (stdout, "E=add l2 patch, e=del l2 patch\n"); + fformat (stdout, "V=ip6 set link-local address \n"); + fformat (stdout, "w=ip6 enable \n"); + fformat (stdout, "W=ip6 disable \n"); + fformat (stdout, "x=ip6 nd config \n"); + fformat (stdout, "X=no ip6 nd config\n"); + fformat (stdout, "y=ip6 nd prefix \n"); + fformat (stdout, "Y=no ip6 nd prefix\n"); + fformat (stdout, "@=l2 xconnect\n"); + fformat (stdout, "#=l2 bridge\n"); + + default: + break; + } + + } + +done: + + if (tm->link_events_on) + link_up_down_enable_disable (tm, 0 /* enable */ ); + if (tm->stats_on) + stats_enable_disable (tm, 0 /* enable */ ); + if (tm->oam_events_on) + oam_events_enable_disable (tm, 0 /* enable */ ); + + disconnect_from_vpe (); + exit (0); +} + +#undef vl_api_version +#define vl_api_version(n,v) static u32 vpe_api_version = v; +#include +#undef vl_api_version + +void +vl_client_add_api_signatures (vl_api_memclnt_create_t * mp) +{ + /* + * Send the main API signature in slot 0. This bit of code must + * match the checks in ../vpe/api/api.c: vl_msg_api_version_check(). + */ + mp->api_versions[0] = clib_host_to_net_u32 (vpe_api_version); +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vpp/vpp/vpp-api/test_ha.c b/vpp/vpp/vpp-api/test_ha.c new file mode 100644 index 00000000..91364d8b --- /dev/null +++ b/vpp/vpp/vpp-api/test_ha.c @@ -0,0 +1,249 @@ +/* + *------------------------------------------------------------------ + * api.c - message handler registration + * + * Copyright (c) 2010 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#define f64_endian(a) +#define f64_print(a,b) + +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs + +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) +#define vl_printfun +#include +#undef vl_printfun + +vl_shmem_hdr_t *shmem_hdr; + +typedef struct +{ + u32 pings_sent; + u32 pings_replied; + volatile u32 signal_received; + + /* convenience */ + unix_shared_memory_queue_t *vl_input_queue; + u32 my_client_index; + svmdb_client_t *svmdb_client; +} test_main_t; + +test_main_t test_main; + +static void vl_api_control_ping_reply_t_handler + (vl_api_control_ping_reply_t * mp) +{ + test_main_t *tm = &test_main; + + fformat (stdout, "control ping reply from pid %d\n", ntohl (mp->vpe_pid)); + tm->pings_replied++; +} + +vlib_main_t vlib_global_main; +vlib_main_t **vlib_mains; + +void +vlib_cli_output (struct vlib_main_t *vm, char *fmt, ...) +{ + clib_warning ("BUG: vlib_cli_output callled..."); +} + +#define foreach_api_msg \ +_(CONTROL_PING_REPLY,control_ping_reply) + +void +ping (test_main_t * tm) +{ + vl_api_control_ping_t *mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_CONTROL_PING); + mp->client_index = tm->my_client_index; + mp->context = 0xdeadbeef; + + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp); +} + +static void +noop_handler (void *notused) +{ +} + +int +connect_to_vpe (char *name) +{ + int rv = 0; + test_main_t *tm = &test_main; + api_main_t *am = &api_main; + + rv = vl_client_connect_to_vlib ("/vpe-api", name, 32); + if (rv < 0) + return rv; + +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler, \ + noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 1); + foreach_api_msg; +#undef _ + + shmem_hdr = api_main.shmem_hdr; + tm->vl_input_queue = shmem_hdr->vl_input_queue; + tm->my_client_index = am->my_client_index; + return 0; +} + +int +disconnect_from_vpe (void) +{ + vl_client_disconnect_from_vlib (); + + return 0; +} + +void +signal_handler (int signo) +{ + test_main_t *tm = &test_main; + + tm->signal_received = 1; +} + + +int +main (int argc, char **argv) +{ + test_main_t *tm = &test_main; + api_main_t *am = &api_main; + u32 swt_pid = 0; + int connected = 0; + + signal (SIGINT, signal_handler); + + while (1) + { + if (tm->signal_received) + break; + + if (am->shmem_hdr) + swt_pid = am->shmem_hdr->vl_pid; + + /* If kill returns 0, the vpe-f process is alive */ + if (kill (swt_pid, 0) == 0) + { + /* Try to connect */ + if (connected == 0) + { + fformat (stdout, "Connect to VPE-f\n"); + if (connect_to_vpe ("test_ha_client") >= 0) + { + tm->pings_sent = 0; + tm->pings_replied = 0; + connected = 1; + } + else + { + fformat (stdout, "Connect failed, sleep and retry...\n"); + sleep (1); + continue; + } + } + tm->pings_sent++; + ping (tm); + + sleep (1); + + /* havent heard back in 3 seconds, disco / reco */ + if ((tm->pings_replied + 3) <= tm->pings_sent) + { + fformat (stdout, "VPE-f pid %d not responding\n", swt_pid); + swt_pid = 0; + disconnect_from_vpe (); + connected = 0; + } + } + else + { + if (connected) + { + fformat (stdout, "VPE-f pid %d died\n", swt_pid); + swt_pid = 0; + disconnect_from_vpe (); + connected = 0; + } + sleep (1); + } + } + + fformat (stdout, "Signal received, graceful exit\n"); + disconnect_from_vpe (); + exit (0); +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vpp/vpp/vpp-api/vpe.api b/vpp/vpp/vpp-api/vpe.api new file mode 100644 index 00000000..d5818cb7 --- /dev/null +++ b/vpp/vpp/vpp-api/vpe.api @@ -0,0 +1,4848 @@ +/* + * Copyright (c) 2015-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** \file + + This file defines vpe control-plane API messages which are generally + called through a shared memory interface. +*/ + +/* + * Note: API placement cleanup in progress + * If you're looking for interface APIs, please + * see .../vnet/vnet/{interface.api,interface_api.c} + * IP APIs: see .../vnet/vnet/ip/{ip.api, ip_api.c} + * TAP APIs: see .../vnet/vnet/unix/{tap.api, tap_api.c} + */ + +/** \brief Create a new subinterface with the given vlan id + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - software index of the new vlan's parent interface + @param vlan_id - vlan tag of the new interface +*/ +define create_vlan_subif +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u32 vlan_id; +}; + +/** \brief Reply for the vlan subinterface create request + @param context - returned sender context, to match reply w/ request + @param retval - return code + @param sw_if_index - software index allocated for the new subinterface +*/ +define create_vlan_subif_reply +{ + u32 context; + i32 retval; + u32 sw_if_index; +}; + +/** \brief Enable or Disable MPLS on and interface + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - index of the interface + @param enable - if non-zero enable, else disable +*/ +define sw_interface_set_mpls_enable +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u8 enable; +}; + +/** \brief Reply for MPLS state on an interface + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define sw_interface_set_mpls_enable_reply +{ + u32 context; + i32 retval; +}; + +/** \brief MPLS Route Add / del route + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param mr_label - The MPLS label value + @param mr_eos - The End of stack bit + @param mr_table_id - The MPLS table-id the route is added in + @param mr_classify_table_index - If this is a classify route, + this is the classify table index + @param mr_create_table_if_needed - If the MPLS or IP tables do not exist, + create them + @param mr_is_add - Is this a route add or delete + @param mr_is_classify - Is this route result a classify + @param mr_is_multipath - Is this route update a multipath - i.e. is this + a path addition to an existing route + @param mr_is_resolve_host - Recurse resolution constraint via a host prefix + @param mr_is_resolve_attached - Recurse resolution constraint via attached prefix + @param mr_next_hop_proto_is_ip4 - The next-hop is IPV4 + @param mr_next_hop_weight - The weight, for UCMP + @param mr_next_hop[16] - the nextop address + @param mr_next_hop_sw_if_index - the next-hop SW interface + @param mr_next_hop_table_id - the next-hop table-id (if appropriate) + @param mr_next_hop_n_out_labels - the number of labels in the label stack + @param mr_next_hop_out_label_stack - the next-hop output label stack, outer most first + @param next_hop_via_label - The next-hop is a resolved via a local label +*/ +define mpls_route_add_del +{ + u32 client_index; + u32 context; + u32 mr_label; + u8 mr_eos; + u32 mr_table_id; + u32 mr_classify_table_index; + u8 mr_create_table_if_needed; + u8 mr_is_add; + u8 mr_is_classify; + u8 mr_is_multipath; + u8 mr_is_resolve_host; + u8 mr_is_resolve_attached; + u8 mr_next_hop_proto_is_ip4; + u8 mr_next_hop_weight; + u8 mr_next_hop[16]; + u8 mr_next_hop_n_out_labels; + u32 mr_next_hop_sw_if_index; + u32 mr_next_hop_table_id; + u32 mr_next_hop_via_label; + u32 mr_next_hop_out_label_stack[mr_next_hop_n_out_labels]; +}; + +/** \brief Reply for MPLS route add / del request + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define mpls_route_add_del_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Dump MPLS fib table + @param client_index - opaque cookie to identify the sender +*/ +define mpls_fib_dump +{ + u32 client_index; + u32 context; +}; + +/** \brief FIB path + @param sw_if_index - index of the interface + @param weight - The weight, for UCMP + @param is_local - local if non-zero, else remote + @param is_drop - Drop the packet + @param is_unreach - Drop the packet and rate limit send ICMP unreachable + @param is_prohibit - Drop the packet and rate limit send ICMP prohibited + @param afi - the afi of the next hop, IP46_TYPE_IP4=1, IP46_TYPE_IP6=2 + @param next_hop[16] - the next hop address + + WARNING: this type is replicated, pending cleanup completion + +*/ +typeonly manual_print manual_endian define fib_path2 +{ + u32 sw_if_index; + u32 weight; + u8 is_local; + u8 is_drop; + u8 is_unreach; + u8 is_prohibit; + u8 afi; + u8 next_hop[16]; +}; + +/** \brief mpls FIB table response + @param table_id - MPLS fib table id + @param s_bit - End-of-stack bit + @param label - MPLS label value + @param count - the number of fib_path in path + @param path - array of of fib_path structures +*/ +manual_endian manual_print define mpls_fib_details +{ + u32 context; + u32 table_id; + u8 eos_bit; + u32 label; + u32 count; + vl_api_fib_path2_t path[count]; +}; + +/** \brief Bind/Unbind an MPLS local label to an IP prefix. i.e. create + a per-prefix label entry. + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param mb_mpls_table_id - The MPLS table-id the MPLS entry will be added in + @param mb_label - The MPLS label value to bind + @param mb_ip_table_id - The IP table-id of the IP prefix to bind to. + @param mb_create_table_if_needed - Create either/both tables if required. + @param mb_is_bind - Bind or unbind + @param mb_is_ip4 - The prefix to bind to is IPv4 + @param mb_address_length - Length of IP prefix + @param mb_address[16] - IP prefix/ +*/ +define mpls_ip_bind_unbind +{ + u32 client_index; + u32 context; + u32 mb_mpls_table_id; + u32 mb_label; + u32 mb_ip_table_id; + u8 mb_create_table_if_needed; + u8 mb_is_bind; + u8 mb_is_ip4; + u8 mb_address_length; + u8 mb_address[16]; +}; + +/** \brief Reply for MPLS IP bind/unbind request + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define mpls_ip_bind_unbind_reply +{ + u32 context; + i32 retval; +}; + +/** \brief MPLS tunnel Add / del route + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param mt_is_add - Is this a route add or delete + @param mt_sw_if_index - The SW interface index of the tunnel to delete + @param mt_next_hop_proto_is_ip4 - The next-hop is IPV4 + @param mt_next_hop_weight - The weight, for UCMP + @param mt_next_hop[16] - the nextop address + @param mt_next_hop_sw_if_index - the next-hop SW interface + @param mt_next_hop_table_id - the next-hop table-id (if appropriate) + @param mt_next_hop_n_out_labels - the number of next-hop output labels + @param mt_next_hop_out_label_stack - the next-hop output label stack, outer most first +*/ +define mpls_tunnel_add_del +{ + u32 client_index; + u32 context; + u32 mt_sw_if_index; + u8 mt_is_add; + u8 mt_l2_only; + u8 mt_next_hop_proto_is_ip4; + u8 mt_next_hop_weight; + u8 mt_next_hop[16]; + u8 mt_next_hop_n_out_labels; + u32 mt_next_hop_sw_if_index; + u32 mt_next_hop_table_id; + u32 mt_next_hop_out_label_stack[mt_next_hop_n_out_labels]; +}; + +/** \brief Reply for MPLS tunnel add / del request + @param context - returned sender context, to match reply w/ request + @param retval - return code + @param sw_if_index - SW interface index of the tunnel created +*/ +define mpls_tunnel_add_del_reply +{ + u32 context; + i32 retval; + u32 sw_if_index; +}; + +/** \brief Dump mpls eth tunnel table + @param client_index - opaque cookie to identify the sender + @param tunnel_index - eth tunnel identifier or -1 in case of all tunnels +*/ +define mpls_tunnel_dump +{ + u32 client_index; + u32 context; + i32 tunnel_index; +}; + +/** \brief mpls eth tunnel operational state response + @param tunnel_index - eth tunnel identifier + @param intfc_address - interface ipv4 addr + @param mask_width - interface ipv4 addr mask + @param hw_if_index - interface id + @param l2_only - + @param tunnel_dst_mac - + @param tx_sw_if_index - + @param encap_index - reference to mpls label table + @param nlabels - number of resolved labels + @param labels - resolved labels +*/ +define mpls_tunnel_details +{ + u32 context; + u32 tunnel_index; + u8 mt_l2_only; + u8 mt_sw_if_index; + u8 mt_next_hop_proto_is_ip4; + u8 mt_next_hop[16]; + u32 mt_next_hop_sw_if_index; + u32 mt_next_hop_table_id; + u32 mt_next_hop_n_labels; + u32 mt_next_hop_out_labels[mt_next_hop_n_labels]; +}; + +/** \brief Proxy ARP add / del request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param vrf_id - VRF / Fib table ID + @param is_add - 1 if adding the Proxy ARP range, 0 if deleting + @param low_address[4] - Low address of the Proxy ARP range + @param hi_address[4] - High address of the Proxy ARP range +*/ +define proxy_arp_add_del +{ + u32 client_index; + u32 context; + u32 vrf_id; + u8 is_add; + u8 low_address[4]; + u8 hi_address[4]; +}; + +/** \brief Reply for proxy arp add / del request + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define proxy_arp_add_del_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Proxy ARP add / del request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - Which interface to enable / disable Proxy Arp on + @param enable_disable - 1 to enable Proxy ARP on interface, 0 to disable +*/ +define proxy_arp_intfc_enable_disable +{ + u32 client_index; + u32 context; + u32 sw_if_index; + /* 1 = on, 0 = off */ + u8 enable_disable; +}; + +/** \brief Reply for Proxy ARP interface enable / disable request + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define proxy_arp_intfc_enable_disable_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Reset VRF (remove all routes etc) request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_ipv6 - 1 for IPv6 neighbor, 0 for IPv4 + @param vrf_id - ID of th FIB table / VRF to reset +*/ +define reset_vrf +{ + u32 client_index; + u32 context; + u8 is_ipv6; + u32 vrf_id; +}; + +/** \brief Reply for Reset VRF request + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define reset_vrf_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Is Address Reachable request - DISABLED + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param next_hop_sw_if_index - index of interface used to get to next hop + @param is_ipv6 - 1 for IPv6, 0 for IPv4 + @param is_error - address not found or does not match intf + @param address[] - Address in question +*/ +define is_address_reachable +{ + u32 client_index; /* (api_main_t *) am->my_client_index */ + u32 context; + u32 next_hop_sw_if_index; + u8 is_known; /* on reply, this is the answer */ + u8 is_ipv6; + u8 is_error; /* address not found or does not match intf */ + u8 address[16]; +}; + +/** \brief Want Stats, register for stats updates + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param enable_disable - 1 = enable stats, 0 = disable + @param pid - pid of process requesting stats updates +*/ +define want_stats +{ + u32 client_index; + u32 context; + u32 enable_disable; + u32 pid; +}; + +/** \brief Reply for Want Stats request + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define want_stats_reply +{ + u32 context; + i32 retval; +}; + +typeonly manual_print manual_endian define ip4_fib_counter +{ + u32 address; + u8 address_length; + u64 packets; + u64 bytes; +}; + +manual_print manual_endian define vnet_ip4_fib_counters +{ + u32 vrf_id; + u32 count; + vl_api_ip4_fib_counter_t c[count]; +}; + +typeonly manual_print manual_endian define ip6_fib_counter +{ + u64 address[2]; + u8 address_length; + u64 packets; + u64 bytes; +}; + +manual_print manual_endian define vnet_ip6_fib_counters +{ + u32 vrf_id; + u32 count; + vl_api_ip6_fib_counter_t c[count]; +}; + +/** \brief Request for a single block of summary stats + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define vnet_get_summary_stats +{ + u32 client_index; + u32 context; +}; + +/** \brief Reply for vnet_get_summary_stats request + @param context - sender context, to match reply w/ request + @param retval - return code for request + @param total_pkts - + @param total_bytes - + @param vector_rate - +*/ +define vnet_summary_stats_reply +{ + u32 context; + i32 retval; + u64 total_pkts[2]; + u64 total_bytes[2]; + f64 vector_rate; +}; + +/** \brief OAM event structure + @param dst_address[] - + @param state +*/ +define oam_event +{ + u8 dst_address[4]; + u8 state; +}; + +/** \brief Want OAM events request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param enable_disable- enable if non-zero, else disable + @param pid - pid of the requesting process +*/ +define want_oam_events +{ + u32 client_index; + u32 context; + u32 enable_disable; + u32 pid; +}; + +/** \brief Want OAM events response + @param context - sender context, to match reply w/ request + @param retval - return code for the want oam stats request +*/ +define want_oam_events_reply +{ + u32 context; + i32 retval; +}; + +/** \brief OAM add / del target request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param vrf_id - vrf_id of the target + @param src_address[] - source address to use for the updates + @param dst_address[] - destination address of the target + @param is_add - add target if non-zero, else delete +*/ +define oam_add_del +{ + u32 client_index; + u32 context; + u32 vrf_id; + u8 src_address[4]; + u8 dst_address[4]; + u8 is_add; +}; + +/** \brief OAM add / del target response + @param context - sender context, to match reply w/ request + @param retval - return code of the request +*/ +define oam_add_del_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Reset fib table request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param vrf_id - vrf/table id of the fib table to reset + @param is_ipv6 - an ipv6 fib to reset if non-zero, else ipv4 +*/ +define reset_fib +{ + u32 client_index; + u32 context; + u32 vrf_id; + u8 is_ipv6; +}; + +/** \brief Reset fib response + @param context - sender context, to match reply w/ request + @param retval - return code for the reset bfib request +*/ +define reset_fib_reply +{ + u32 context; + i32 retval; +}; + +/** \brief DHCP Proxy config add / del request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param vrf_id - vrf id + @param if_ipv6 - ipv6 if non-zero, else ipv4 + @param is_add - add the config if non-zero, else delete + @param insert_circuit_id - option82 suboption 1 fib number + @param dhcp_server[] - server address + @param dhcp_src_address[] - +*/ +define dhcp_proxy_config +{ + u32 client_index; + u32 context; + u32 vrf_id; + u8 is_ipv6; + u8 is_add; + u8 insert_circuit_id; + u8 dhcp_server[16]; + u8 dhcp_src_address[16]; +}; + +/** \brief DHCP Proxy config response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define dhcp_proxy_config_reply +{ + u32 context; + i32 retval; +}; + +/** \brief DHCP Proxy set / unset vss request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param tbl_id - table id + @param oui - first part of vpn id + @param fib_id - second part of vpn id + @param is_ipv6 - ip6 if non-zero, else ip4 + @param is_add - set vss if non-zero, else delete +*/ +define dhcp_proxy_set_vss +{ + u32 client_index; + u32 context; + u32 tbl_id; + u32 oui; + u32 fib_id; + u8 is_ipv6; + u8 is_add; +}; + +/** \brief DHCP proxy set / unset vss response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define dhcp_proxy_set_vss_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Create loopback interface request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param mac_address - mac addr to assign to the interface if none-zero +*/ +define create_loopback +{ + u32 client_index; + u32 context; + u8 mac_address[6]; +}; + +/** \brief Create loopback interface response + @param context - sender context, to match reply w/ request + @param sw_if_index - sw index of the interface that was created + @param retval - return code for the request +*/ +define create_loopback_reply +{ + u32 context; + i32 retval; + u32 sw_if_index; +}; + +/** \brief Delete loopback interface request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - sw index of the interface that was created +*/ +define delete_loopback +{ + u32 client_index; + u32 context; + u32 sw_if_index; +}; + +/** \brief Delete loopback interface response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define delete_loopback_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Control ping from client to api server request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define control_ping +{ + u32 client_index; + u32 context; +}; + +/** \brief Control ping from the client to the server response + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param retval - return code for the request + @param vpe_pid - the pid of the vpe, returned by the server +*/ +define control_ping_reply +{ + u32 context; + i32 retval; + u32 client_index; + u32 vpe_pid; +}; + +/** \brief Process a vpe parser cli string request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param cmd_in_shmem - pointer to cli command string +*/ +define cli_request +{ + u32 client_index; + u32 context; + u64 cmd_in_shmem; +}; +define cli_inband +{ + u32 client_index; + u32 context; + u32 length; + u8 cmd[length]; +}; + +/** \brief vpe parser cli string response + @param context - sender context, to match reply w/ request + @param retval - return code for request + @param reply_in_shmem - Reply string from cli processing if any +*/ +define cli_reply +{ + u32 context; + i32 retval; + u64 reply_in_shmem; +}; +define cli_inband_reply +{ + u32 context; + i32 retval; + u32 length; + u8 reply[length]; +}; + +/** \brief Set max allowed ARP or ip6 neighbor entries request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_ipv6 - neighbor limit if non-zero, else ARP limit + @param arp_neighbor_limit - the new limit, defaults are ~ 50k +*/ +define set_arp_neighbor_limit +{ + u32 client_index; + u32 context; + u8 is_ipv6; + u32 arp_neighbor_limit; +}; + +/** \brief Set max allowed ARP or ip6 neighbor entries response + @param context - sender context, to match reply w/ request + @param retval - return code for request +*/ +define set_arp_neighbor_limit_reply +{ + u32 context; + i32 retval; +}; + +/** \brief L2 interface patch add / del request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param rx_sw_if_index - receive side interface + @param tx_sw_if_index - transmit side interface + @param is_add - if non-zero set up the interface patch, else remove it +*/ +define l2_patch_add_del +{ + u32 client_index; + u32 context; + u32 rx_sw_if_index; + u32 tx_sw_if_index; + u8 is_add; +}; + +/** \brief L2 interface patch add / del response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define l2_patch_add_del_reply +{ + u32 context; + i32 retval; +}; + +/** \brief IPv6 segment routing tunnel add / del request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add the tunnel if non-zero, else delete it + @param name[] - tunnel name (len. 64) + @param src_address[] - + @param dst_address[] - + @param dst_mask_width - + @param inner_vrf_id - + @param outer_vrf_id - + @param flags_net_byte_order - + @param n_segments - + @param n_tags - + @param segs_and_tags[] - + @param policy_name[] - name of policy to associate this tunnel to (len. 64) +*/ +define sr_tunnel_add_del +{ + u32 client_index; + u32 context; + u8 is_add; + u8 name[64]; + u8 src_address[16]; + u8 dst_address[16]; + u8 dst_mask_width; + u32 inner_vrf_id; + u32 outer_vrf_id; + u16 flags_net_byte_order; + u8 n_segments; + u8 n_tags; + u8 policy_name[64]; + u8 segs_and_tags[0]; +}; + +/** \brief IPv6 segment routing tunnel add / del response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define sr_tunnel_add_del_reply +{ + u32 context; + i32 retval; +}; + +/** \brief IPv6 segment routing policy add / del request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add the tunnel if non-zero, else delete it + @param name[] - policy name (len. 64) + @param tunnel_names[] - +*/ +define sr_policy_add_del +{ + u32 client_index; + u32 context; + u8 is_add; + u8 name[64]; + u8 tunnel_names[0]; +}; + +/** \brief IPv6 segment routing policy add / del response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define sr_policy_add_del_reply +{ + u32 context; + i32 retval; +}; + +/** \brief IPv6 segment routing multicast map to policy add / del request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add the tunnel if non-zero, else delete it + @param multicast_address[] - IP6 multicast address + @param policy_name[] = policy name (len.64) +*/ +define sr_multicast_map_add_del +{ + u32 client_index; + u32 context; + u8 is_add; + u8 multicast_address[16]; + u8 policy_name[64]; +}; + +/** \brief IPv6 segment routing multicast map to policy add / del response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define sr_multicast_map_add_del_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Interface set vpath request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - interface used to reach neighbor + @param enable - if non-zero enable, else disable +*/ +define sw_interface_set_vpath +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u8 enable; +}; + +/** \brief Interface set vpath response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define sw_interface_set_vpath_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Interface set vxlan-bypass request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - interface used to reach neighbor + @param is_ipv6 - if non-zero, enable ipv6-vxlan-bypass, else ipv4-vxlan-bypass + @param enable - if non-zero enable, else disable +*/ +define sw_interface_set_vxlan_bypass +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u8 is_ipv6; + u8 enable; +}; + +/** \brief Interface set vxlan-bypass response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define sw_interface_set_vxlan_bypass_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Set L2 XConnect between two interfaces request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param rx_sw_if_index - Receive interface index + @param tx_sw_if_index - Transmit interface index + @param enable - enable xconnect if not 0, else set to L3 mode +*/ +define sw_interface_set_l2_xconnect +{ + u32 client_index; + u32 context; + u32 rx_sw_if_index; + u32 tx_sw_if_index; + u8 enable; +}; + +/** \brief Set L2 XConnect response + @param context - sender context, to match reply w/ request + @param retval - L2 XConnect request return code +*/ +define sw_interface_set_l2_xconnect_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Interface bridge mode request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param rx_sw_if_index - the interface + @param bd_id - bridge domain id + @param bvi - Setup interface as a bvi, bridge mode only + @param shg - Shared horizon group, for bridge mode only + @param enable - Enable beige mode if not 0, else set to L3 mode +*/ +define sw_interface_set_l2_bridge +{ + u32 client_index; + u32 context; + u32 rx_sw_if_index; + u32 bd_id; + u8 shg; + u8 bvi; + u8 enable; +}; + +/** \brief Interface bridge mode response + @param context - sender context, to match reply w/ request + @param retval - Bridge mode request return code +*/ +define sw_interface_set_l2_bridge_reply +{ + u32 context; + i32 retval; +}; + +/** \brief L2 FIB add entry request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param mac - the entry's mac address + @param bd_id - the entry's bridge domain id + @param sw_if_index - the interface + @param is_add - If non zero add the entry, else delete it + @param static_mac - + @param filter_mac - +*/ +define l2fib_add_del +{ + u32 client_index; + u32 context; + u64 mac; + u32 bd_id; + u32 sw_if_index; + u8 is_add; + u8 static_mac; + u8 filter_mac; + u8 bvi_mac; +}; + +/** \brief L2 FIB add entry response + @param context - sender context, to match reply w/ request + @param retval - return code for the add l2fib entry request +*/ +define l2fib_add_del_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Set L2 flags request !!! TODO - need more info, feature bits in l2_input.h + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - interface + @param is_set - if non-zero, set the bits, else clear them + @param feature_bitmap - non-zero bits to set or clear +*/ +define l2_flags +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u8 is_set; + u32 feature_bitmap; +}; + +/** \brief Set L2 bits response + @param context - sender context, to match reply w/ request + @param retval - return code for the set l2 bits request +*/ +define l2_flags_reply +{ + u32 context; + i32 retval; + u32 resulting_feature_bitmap; +}; + +/** \brief Set bridge flags (such as L2_LEARN, L2_FWD, L2_FLOOD, + L2_UU_FLOOD, or L2_ARP_TERM) request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param bd_id - the bridge domain to set the flags for + @param is_set - if non-zero, set the flags, else clear them + @param feature_bitmap - bits that are non-zero to set or clear +*/ +define bridge_flags +{ + u32 client_index; + u32 context; + u32 bd_id; + u8 is_set; + u32 feature_bitmap; +}; + +/** \brief Set bridge flags response + @param context - sender context, to match reply w/ request + @param retval - return code for the set bridge flags request + @param resulting_feature_bitmap - the feature bitmap value after the request is implemented +*/ +define bridge_flags_reply +{ + u32 context; + i32 retval; + u32 resulting_feature_bitmap; +}; + +/** \brief Set bridge domain ip to mac entry request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param bd_id - the bridge domain to set the flags for + @param is_add - if non-zero, add the entry, else clear it + @param is_ipv6 - if non-zero, ipv6 address, else ipv4 address + @param mac_address - MAC address + @param +*/ +define bd_ip_mac_add_del +{ + u32 client_index; + u32 context; + u32 bd_id; + u8 is_add; + u8 is_ipv6; + u8 ip_address[16]; + u8 mac_address[6]; +}; + +/** \brief Set bridge domain ip to mac entry response + @param context - sender context, to match reply w/ request + @param retval - return code for the set bridge flags request +*/ +define bd_ip_mac_add_del_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Add/Delete classification table request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add- if non-zero add the table, else delete it + @param del_chain - if non-zero delete the whole chain of tables + @param table_index - if add, reuturns index of the created table, else specifies the table to delete + @param nbuckets - number of buckets when adding a table + @param memory_size - memory size when adding a table + @param match_n_vectors - number of match vectors + @param next_table_index - index of next table + @param miss_next_index - index of miss table + @param current_data_flag - option to use current node's packet payload + as the starting point from where packets are classified, + This option is only valid for L2/L3 input ACL for now. + 0: by default, classify data from the buffer's start location + 1: classify packets from VPP node’s current data pointer + @param current_data_offset - a signed value to shift the start location of + the packet to be classified + For example, if input IP ACL node is used, L2 header’s first byte + can be accessible by configuring current_data_offset to -14 + if there is no vlan tag. + This is valid only if current_data_flag is set to 1. + @param mask[] - match mask +*/ +define classify_add_del_table +{ + u32 client_index; + u32 context; + u8 is_add; + u8 del_chain; + u32 table_index; + u32 nbuckets; + u32 memory_size; + u32 skip_n_vectors; + u32 match_n_vectors; + u32 next_table_index; + u32 miss_next_index; + u32 current_data_flag; + i32 current_data_offset; + u8 mask[0]; +}; + +/** \brief Add/Delete classification table response + @param context - sender context, to match reply w/ request + @param retval - return code for the table add/del requst + @param new_table_index - for add, returned index of the new table + @param skip_n_vectors - for add, returned value of skip_n_vectors in table + @param match_n_vectors -for add, returned value of match_n_vectors in table +*/ +define classify_add_del_table_reply +{ + u32 context; + i32 retval; + u32 new_table_index; + u32 skip_n_vectors; + u32 match_n_vectors; +}; + +/** \brief Classify add / del session request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add session if non-zero, else delete + @param table_index - index of the table to add/del the session, required + @param hit_next_index - for add, hit_next_index of new session, required + @param opaque_index - for add, opaque_index of new session + @param advance -for add, advance value for session + @param action - + 0: no action (by default) + metadata is not used. + 1: Classified IP packets will be looked up from the + specified ipv4 fib table (configured by metadata as VRF id). + Only valid for L3 input ACL node + 2: Classified IP packets will be looked up from the + specified ipv6 fib table (configured by metadata as VRF id). + Only valid for L3 input ACL node + @param metadata - valid only if action != 0 + VRF id if action is 1 or 2. + @param match[] - for add, match value for session, required +*/ +define classify_add_del_session +{ + u32 client_index; + u32 context; + u8 is_add; + u32 table_index; + u32 hit_next_index; + u32 opaque_index; + i32 advance; + u8 action; + u32 metadata; + u8 match[0]; +}; + +/** \brief Classify add / del session response + @param context - sender context, to match reply w/ request + @param retval - return code for the add/del session request +*/ +define classify_add_del_session_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Set/unset the classification table for an interface request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_ipv6 - ipv6 if non-zero, else ipv4 + @param sw_if_index - interface to associate with the table + @param table_index - index of the table, if ~0 unset the table +*/ +define classify_set_interface_ip_table +{ + u32 client_index; + u32 context; + u8 is_ipv6; + u32 sw_if_index; + u32 table_index; /* ~0 => off */ +}; + +/** \brief Set/unset interface classification table response + @param context - sender context, to match reply w/ request + @param retval - return code +*/ +define classify_set_interface_ip_table_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Set/unset l2 classification tables for an interface request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - interface to set/unset tables for + @param ip4_table_index - ip4 index, use ~0 for all 3 indexes to unset + @param ip6_table_index - ip6 index + @param other_table_index - other index +*/ +define classify_set_interface_l2_tables +{ + u32 client_index; + u32 context; + u32 sw_if_index; + /* 3 x ~0 => off */ + u32 ip4_table_index; + u32 ip6_table_index; + u32 other_table_index; + u8 is_input; +}; + +/** \brief Set/unset l2 classification tables for an interface response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define classify_set_interface_l2_tables_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Get node index using name request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param node_name[] - name of the node +*/ +define get_node_index +{ + u32 client_index; + u32 context; + u8 node_name[64]; +}; + +/** \brief Get node index using name request + @param context - sender context, to match reply w/ request + @param retval - return code for the request + @param node_index - index of the desired node if found, else ~0 +*/ +define get_node_index_reply +{ + u32 context; + i32 retval; + u32 node_index; +}; + +/** \brief Set the next node for a given node request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param node_name[] - node to add the next node to + @param next_name[] - node to add as the next node +*/ +define add_node_next +{ + u32 client_index; + u32 context; + u8 node_name[64]; + u8 next_name[64]; +}; + +/** \brief IP Set the next node for a given node response + @param context - sender context, to match reply w/ request + @param retval - return code for the add next node request + @param next_index - the index of the next node if success, else ~0 +*/ +define add_node_next_reply +{ + u32 context; + i32 retval; + u32 next_index; +}; + +/** \brief DHCP Proxy config 2 add / del request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param rx_vrf_id - receive vrf id + @param server_vrf_id - server vrf id + @param if_ipv6 - ipv6 if non-zero, else ipv4 + @param is_add - add the config if non-zero, else delete + @param insert_circuit_id - option82 suboption 1 fib number + @param dhcp_server[] - server address + @param dhcp_src_address[] - +*/ +define dhcp_proxy_config_2 +{ + u32 client_index; + u32 context; + u32 rx_vrf_id; + u32 server_vrf_id; + u8 is_ipv6; + u8 is_add; + u8 insert_circuit_id; + u8 dhcp_server[16]; + u8 dhcp_src_address[16]; +}; + +/** \brief DHCP Proxy config 2 add / del response + @param context - sender context, to match reply w/ request + @param retval - return code for request +*/ +define dhcp_proxy_config_2_reply +{ + u32 context; + i32 retval; +}; + +/** \brief l2tpv3 tunnel interface create request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param client_address - remote client tunnel ip address + @param client_address - local tunnel ip address + @param is_ipv6 - ipv6 if non-zero, else ipv4 + @param local_session_id - local tunnel session id + @param remote_session_id - remote tunnel session id + @param local_cookie - local tunnel cookie + @param l2_sublayer_present - l2 sublayer is present in packets if non-zero + @param encap_vrf_id - fib identifier used for outgoing encapsulated packets +*/ +define l2tpv3_create_tunnel +{ + u32 client_index; + u32 context; + u8 client_address[16]; + u8 our_address[16]; + u8 is_ipv6; + u32 local_session_id; + u32 remote_session_id; + u64 local_cookie; + u64 remote_cookie; + u8 l2_sublayer_present; + u32 encap_vrf_id; +}; + +/** \brief l2tpv3 tunnel interface create response + @param context - sender context, to match reply w/ request + @param retval - return code for the request + @param sw_if_index - index of the new tunnel interface +*/ +define l2tpv3_create_tunnel_reply +{ + u32 context; + i32 retval; + u32 sw_if_index; +}; + +define l2tpv3_set_tunnel_cookies +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u64 new_local_cookie; + u64 new_remote_cookie; +}; + +/** \brief L2TP tunnel set cookies response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define l2tpv3_set_tunnel_cookies_reply +{ + u32 context; + i32 retval; +}; + +define sw_if_l2tpv3_tunnel_details +{ + u32 context; + u32 sw_if_index; + u8 interface_name[64]; + u8 client_address[16]; + u8 our_address[16]; + u32 local_session_id; + u32 remote_session_id; + u64 local_cookie[2]; + u64 remote_cookie; + u8 l2_sublayer_present; +}; + +define sw_if_l2tpv3_tunnel_dump +{ + u32 client_index; + u32 context; +}; + +/** \brief L2 fib clear table request, clear all mac entries in the l2 fib + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define l2_fib_clear_table +{ + u32 client_index; + u32 context; +}; + +/** \brief L2 fib clear table response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define l2_fib_clear_table_reply +{ + u32 context; + i32 retval; +}; + +/** \brief L2 interface ethernet flow point filtering enable/disable request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - interface to enable/disable filtering on + @param enable_disable - if non-zero enable filtering, else disable +*/ +define l2_interface_efp_filter +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u32 enable_disable; +}; + +/** \brief L2 interface ethernet flow point filtering response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define l2_interface_efp_filter_reply +{ + u32 context; + i32 retval; +}; + +define l2tpv3_interface_enable_disable +{ + u32 client_index; + u32 context; + u8 enable_disable; + u32 sw_if_index; +}; + +define l2tpv3_interface_enable_disable_reply +{ + u32 context; + i32 retval; +}; + +define l2tpv3_set_lookup_key +{ + u32 client_index; + u32 context; + /* 0 = ip6 src_address, 1 = ip6 dst_address, 2 = session_id */ + u8 key; +}; + +define l2tpv3_set_lookup_key_reply +{ + u32 context; + i32 retval; +}; + +define vxlan_add_del_tunnel +{ + u32 client_index; + u32 context; + u8 is_add; + u8 is_ipv6; + u8 src_address[16]; + u8 dst_address[16]; + u32 mcast_sw_if_index; + u32 encap_vrf_id; + u32 decap_next_index; + u32 vni; +}; + +define vxlan_add_del_tunnel_reply +{ + u32 context; + i32 retval; + u32 sw_if_index; +}; + +define vxlan_tunnel_dump +{ + u32 client_index; + u32 context; + u32 sw_if_index; +}; + +define vxlan_tunnel_details +{ + u32 context; + u32 sw_if_index; + u8 src_address[16]; + u8 dst_address[16]; + u32 mcast_sw_if_index; + u32 encap_vrf_id; + u32 decap_next_index; + u32 vni; + u8 is_ipv6; +}; + +define gre_add_del_tunnel +{ + u32 client_index; + u32 context; + u8 is_add; + u8 is_ipv6; + u8 teb; + u8 src_address[16]; + u8 dst_address[16]; + u32 outer_fib_id; +}; + +define gre_add_del_tunnel_reply +{ + u32 context; + i32 retval; + u32 sw_if_index; +}; + +define gre_tunnel_dump +{ + u32 client_index; + u32 context; + u32 sw_if_index; +}; + +define gre_tunnel_details +{ + u32 context; + u32 sw_if_index; + u8 is_ipv6; + u8 teb; + u8 src_address[16]; + u8 dst_address[16]; + u32 outer_fib_id; +}; + +/** \brief L2 interface vlan tag rewrite configure request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - interface the operation is applied to + @param vtr_op - Choose from l2_vtr_op_t enum values + @param push_dot1q - first pushed flag dot1q id set, else dot1ad + @param tag1 - Needed for any push or translate vtr op + @param tag2 - Needed for any push 2 or translate x-2 vtr ops +*/ +define l2_interface_vlan_tag_rewrite +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u32 vtr_op; + u32 push_dot1q; // ethertype of first pushed tag is dot1q/dot1ad + u32 tag1; // first pushed tag + u32 tag2; // second pushed tag +}; + +/** \brief L2 interface vlan tag rewrite response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define l2_interface_vlan_tag_rewrite_reply +{ + u32 context; + i32 retval; +}; + +/** \brief vhost-user interface create request + @param client_index - opaque cookie to identify the sender + @param is_server - our side is socket server + @param sock_filename - unix socket filename, used to speak with frontend + @param use_custom_mac - enable or disable the use of the provided hardware address + @param mac_address - hardware address to use if 'use_custom_mac' is set +*/ +define create_vhost_user_if +{ + u32 client_index; + u32 context; + u8 is_server; + u8 sock_filename[256]; + u8 renumber; + u32 custom_dev_instance; + u8 use_custom_mac; + u8 mac_address[6]; + u8 tag[64]; +}; + +/** \brief vhost-user interface create response + @param context - sender context, to match reply w/ request + @param retval - return code for the request + @param sw_if_index - interface the operation is applied to +*/ +define create_vhost_user_if_reply +{ + u32 context; + i32 retval; + u32 sw_if_index; +}; + +/** \brief vhost-user interface modify request + @param client_index - opaque cookie to identify the sender + @param is_server - our side is socket server + @param sock_filename - unix socket filename, used to speak with frontend +*/ +define modify_vhost_user_if +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u8 is_server; + u8 sock_filename[256]; + u8 renumber; + u32 custom_dev_instance; +}; + +/** \brief vhost-user interface modify response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define modify_vhost_user_if_reply +{ + u32 context; + i32 retval; +}; + +/** \brief vhost-user interface delete request + @param client_index - opaque cookie to identify the sender +*/ +define delete_vhost_user_if +{ + u32 client_index; + u32 context; + u32 sw_if_index; +}; + +/** \brief vhost-user interface delete response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define delete_vhost_user_if_reply +{ + u32 context; + i32 retval; +}; + +define create_subif +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u32 sub_id; + + /* These fields map directly onto the subif template */ + u8 no_tags; + u8 one_tag; + u8 two_tags; + u8 dot1ad; // 0 = dot1q, 1=dot1ad + u8 exact_match; + u8 default_sub; + u8 outer_vlan_id_any; + u8 inner_vlan_id_any; + u16 outer_vlan_id; + u16 inner_vlan_id; +}; + +define create_subif_reply +{ + u32 context; + i32 retval; + u32 sw_if_index; +}; + +/** \brief show version + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define show_version +{ + u32 client_index; + u32 context; +}; + +/** \brief show version response + @param context - sender context, to match reply w/ request + @param retval - return code for the request + @param program - name of the program (vpe) + @param version - version of the program + @param build_directory - root of the workspace where the program was built +*/ +define show_version_reply +{ + u32 context; + i32 retval; + u8 program[32]; + u8 version[32]; + u8 build_date[32]; + u8 build_directory[256]; +}; + +/** \brief Vhost-user interface details structure (fix this) + @param sw_if_index - index of the interface + @param interface_name - name of interface + @param virtio_net_hdr_sz - net header size + @param features - interface features + @param is_server - vhost-user server socket + @param sock_filename - socket filename + @param num_regions - number of used memory regions +*/ +define sw_interface_vhost_user_details +{ + u32 context; + u32 sw_if_index; + u8 interface_name[64]; + u32 virtio_net_hdr_sz; + u64 features; + u8 is_server; + u8 sock_filename[256]; + u32 num_regions; + i32 sock_errno; +}; + +/* works */ +define sw_interface_vhost_user_dump +{ + u32 client_index; + u32 context; +}; + +/** \brief l2 fib table entry structure + @param bd_id - the l2 fib / bridge domain table id + @param mac - the entry's mac address + @param sw_if_index - index of the interface + @param static_mac - the entry is statically configured. + @param filter_mac - the entry is a mac filter entry. + @param bvi_mac - the mac address is a bridge virtual interface +*/ +define l2_fib_table_entry +{ + u32 context; + u32 bd_id; + u64 mac; + u32 sw_if_index; + u8 static_mac; + u8 filter_mac; + u8 bvi_mac; +}; + +/** \brief Dump l2 fib (aka bridge domain) table + @param client_index - opaque cookie to identify the sender + @param bd_id - the l2 fib / bridge domain table identifier +*/ +define l2_fib_table_dump +{ + u32 client_index; + u32 context; + u32 bd_id; +}; + +define vxlan_gpe_add_del_tunnel +{ + u32 client_index; + u32 context; + u8 is_ipv6; + u8 local[16]; + u8 remote[16]; + u32 encap_vrf_id; + u32 decap_vrf_id; + u8 protocol; + u32 vni; + u8 is_add; +}; + +define vxlan_gpe_add_del_tunnel_reply +{ + u32 context; + i32 retval; + u32 sw_if_index; +}; + +define vxlan_gpe_tunnel_dump +{ + u32 client_index; + u32 context; + u32 sw_if_index; +}; + +define vxlan_gpe_tunnel_details +{ + u32 context; + u32 sw_if_index; + u8 local[16]; + u8 remote[16]; + u32 vni; + u8 protocol; + u32 encap_vrf_id; + u32 decap_vrf_id; + u8 is_ipv6; +}; + +/** \brief add or delete locator_set + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add address if non-zero, else delete + @param locator_set_name - locator name + @param locator_num - number of locators + @param locators - LISP locator records + Structure of one locator record is as follows: + + define locator_t { + u32 sw_if_index; + u8 priority; + u8 weight; + } +*/ +define lisp_add_del_locator_set +{ + u32 client_index; + u32 context; + u8 is_add; + u8 locator_set_name[64]; + u32 locator_num; + u8 locators[0]; +}; + +/** \brief Reply for locator_set add/del + @param context - returned sender context, to match reply w/ request + @param retval - return code + @param ls_index - locator set index +*/ +define lisp_add_del_locator_set_reply +{ + u32 context; + i32 retval; + u32 ls_index; +}; + +/** \brief add or delete locator for locator_set + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add address if non-zero, else delete + @param locator_set_name - name of locator_set to add/del locator + @param sw_if_index - index of the interface + @param priority - priority of the lisp locator + @param weight - weight of the lisp locator +*/ +define lisp_add_del_locator +{ + u32 client_index; + u32 context; + u8 is_add; + u8 locator_set_name[64]; + u32 sw_if_index; + u8 priority; + u8 weight; +}; + +/** \brief Reply for locator add/del + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define lisp_add_del_locator_reply +{ + u32 context; + i32 retval; +}; + +/** \brief add or delete lisp eid-table + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add address if non-zero, else delete + @param eid_type: + 0 : ipv4 + 1 : ipv6 + 2 : mac + @param eid - EID can be ip4, ip6 or mac + @param prefix_len - prefix len + @param locator_set_name - name of locator_set to add/del eid-table + @param vni - virtual network instance + @param key_id + HMAC_NO_KEY 0 + HMAC_SHA_1_96 1 + HMAC_SHA_256_128 2 + @param key - secret key +*/ +define lisp_add_del_local_eid +{ + u32 client_index; + u32 context; + u8 is_add; + u8 eid_type; + u8 eid[16]; + u8 prefix_len; + u8 locator_set_name[64]; + u32 vni; + u16 key_id; + u8 key[64]; +}; + +/** \brief Reply for local_eid add/del + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define lisp_add_del_local_eid_reply +{ + u32 context; + i32 retval; +}; + +/** \brief add or delete lisp gpe tunnel + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add address if non-zero, else delete + @param eid_type - + 0 : ipv4 + 1 : ipv6 + 2 : mac + @param rmt_eid - remote eid + @param lcl_eid - local eid + @param rmt_len - remote prefix len + @param lcl_len - local prefix len + @param vni - virtual network identifier + @param dp_table - vrf/bridge domain id + @param loc_num - number of locators + @param lcl_locs - array of local locators + @param rmt_locs - array of remote locators + @param action - negative action when 0 locators configured +*/ +define lisp_gpe_add_del_fwd_entry +{ + u32 client_index; + u32 context; + u8 is_add; + u8 eid_type; + u8 rmt_eid[16]; + u8 lcl_eid[16]; + u8 rmt_len; + u8 lcl_len; + u32 vni; + u32 dp_table; + u32 loc_num; + u8 lcl_locs[loc_num]; + u8 rmt_locs[loc_num]; + u8 action; +}; + +/** \brief Reply for gpe_fwd_entry add/del + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define lisp_gpe_add_del_fwd_entry_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Add/delete map server + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add address if non-zero; delete otherwise + @param is_ipv6 - if non-zero the address is ipv6, else ipv4 + @param ip_address - map server IP address +*/ +define lisp_add_del_map_server +{ + u32 client_index; + u32 context; + u8 is_add; + u8 is_ipv6; + u8 ip_address[16]; +}; + +/** \brief Reply for lisp_add_del_map_server + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define lisp_add_del_map_server_reply +{ + u32 context; + i32 retval; +}; + +/** \brief add or delete map-resolver + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add address if non-zero, else delete + @param is_ipv6 - if non-zero the address is ipv6, else ipv4 + @param ip_address - array of address bytes +*/ +define lisp_add_del_map_resolver +{ + u32 client_index; + u32 context; + u8 is_add; + u8 is_ipv6; + u8 ip_address[16]; +}; + +/** \brief Reply for map_resolver add/del + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define lisp_add_del_map_resolver_reply +{ + u32 context; + i32 retval; +}; + +/** \brief enable or disable lisp-gpe protocol + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_en - enable protocol if non-zero, else disable +*/ +define lisp_gpe_enable_disable +{ + u32 client_index; + u32 context; + u8 is_en; +}; + +/** \brief Reply for gpe enable/disable + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define lisp_gpe_enable_disable_reply +{ + u32 context; + i32 retval; +}; + +/** \brief enable or disable LISP feature + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_en - enable protocol if non-zero, else disable +*/ +define lisp_enable_disable +{ + u32 client_index; + u32 context; + u8 is_en; +}; + +/** \brief Reply for gpe enable/disable + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define lisp_enable_disable_reply +{ + u32 context; + i32 retval; +}; + +/** \brief add or delete gpe_iface + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add address if non-zero, else delete +*/ +define lisp_gpe_add_del_iface +{ + u32 client_index; + u32 context; + u8 is_add; + u8 is_l2; + u32 dp_table; + u32 vni; +}; + +/** \brief Reply for gpe_iface add/del + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define lisp_gpe_add_del_iface_reply +{ + u32 context; + i32 retval; +}; + +/** \brief configure or disable LISP PITR node + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param ls_name - locator set name + @param is_add - add locator set if non-zero, else disable pitr +*/ +define lisp_pitr_set_locator_set +{ + u32 client_index; + u32 context; + u8 is_add; + u8 ls_name[64]; +}; + +/** \brief Reply for lisp_pitr_set_locator_set + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define lisp_pitr_set_locator_set_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Get state of LISP RLOC probing + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define show_lisp_rloc_probe_state +{ + u32 client_index; + u32 context; +}; + +/** \brief Reply for show_lisp_rloc_probe_state + @param context - returned sender context, to match reply w/ request + @param retval - return code + @param is_enabled - state of RLOC probing +*/ +define show_lisp_rloc_probe_state_reply +{ + u32 context; + i32 retval; + u8 is_enabled; +}; + +/** \brief enable/disable LISP RLOC probing + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_enable - enable if non-zero; disable otherwise +*/ +define lisp_rloc_probe_enable_disable +{ + u32 client_index; + u32 context; + u8 is_enabled; +}; + +/** \brief Reply for lisp_rloc_probe_enable_disable + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define lisp_rloc_probe_enable_disable_reply +{ + u32 context; + i32 retval; +}; + +/** \brief enable/disable LISP map-register + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_enable - enable if non-zero; disable otherwise +*/ +define lisp_map_register_enable_disable +{ + u32 client_index; + u32 context; + u8 is_enabled; +}; + +/** \brief Reply for lisp_map_register_enable_disable + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define lisp_map_register_enable_disable_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Get state of LISP map-register + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define show_lisp_map_register_state +{ + u32 client_index; + u32 context; +}; + +/** \brief Reply for show_lisp_map_register_state + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define show_lisp_map_register_state_reply +{ + u32 context; + i32 retval; + u8 is_enabled; +}; + +/** \brief set LISP map-request mode. Based on configuration VPP will send + src/dest or just normal destination map requests. + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param mode - new map-request mode. Supported values are: + 0 - destination only + 1 - source/destaination +*/ +define lisp_map_request_mode +{ + u32 client_index; + u32 context; + u8 mode; +}; + +/** \brief Reply for lisp_map_request_mode + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define lisp_map_request_mode_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Request for LISP map-request mode + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define show_lisp_map_request_mode +{ + u32 client_index; + u32 context; +}; + +/** \brief Reply for show_lisp_map_request_mode + @param context - returned sender context, to match reply w/ request + @param retval - return code + @param mode - map-request mode +*/ +define show_lisp_map_request_mode_reply +{ + u32 context; + i32 retval; + u8 mode; +}; + +/** \brief add or delete remote static mapping + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add address if non-zero, else delete + @param is_src_dst - flag indicating src/dst based routing policy + @param del_all - if set, delete all remote mappings + @param vni - virtual network instance + @param action - negative map-reply action + @param eid_type - + 0 : ipv4 + 1 : ipv6 + 2 : mac + @param deid - dst EID + @param seid - src EID, valid only if is_src_dst is enabled + @param rloc_num - number of remote locators + @param rlocs - remote locator records + Structure of remote locator: + + define rloc_t { + u8 is_ip4; + u8 priority; + u8 weight; + u8 addr[16]; + } +*/ +define lisp_add_del_remote_mapping +{ + u32 client_index; + u32 context; + u8 is_add; + u8 is_src_dst; + u8 del_all; + u32 vni; + u8 action; + u8 eid_type; + u8 eid[16]; + u8 eid_len; + u8 seid[16]; + u8 seid_len; + u32 rloc_num; + u8 rlocs[0]; +}; + +/** \brief Reply for lisp_add_del_remote_mapping + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define lisp_add_del_remote_mapping_reply +{ + u32 context; + i32 retval; +}; + +/** \brief add or delete LISP adjacency adjacency + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add address if non-zero, else delete + @param vni - virtual network instance + @param eid_type - + 0 : ipv4 + 1 : ipv6 + 2 : mac + @param reid - remote EID + @param leid - local EID +*/ +define lisp_add_del_adjacency +{ + u32 client_index; + u32 context; + u8 is_add; + u32 vni; + u8 eid_type; + u8 reid[16]; + u8 leid[16]; + u8 reid_len; + u8 leid_len; +}; + +/** \brief Reply for lisp_add_del_adjacency + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define lisp_add_del_adjacency_reply +{ + u32 context; + i32 retval; +}; + +/** \brief add or delete map request itr rlocs + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add address if non-zero, else delete + @param locator_set_name - locator set name +*/ +define lisp_add_del_map_request_itr_rlocs +{ + u32 client_index; + u32 context; + u8 is_add; + u8 locator_set_name[64]; +}; + +/** \brief Reply for lisp_add_del_map_request_itr_rlocs + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ + +define lisp_add_del_map_request_itr_rlocs_reply +{ + u32 context; + i32 retval; +}; + +/** \brief map/unmap vni/bd_index to vrf + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add or delete mapping + @param dp_table - virtual network id/bridge domain index + @param vrf - vrf +*/ +define lisp_eid_table_add_del_map +{ + u32 client_index; + u32 context; + u8 is_add; + u32 vni; + u32 dp_table; + u8 is_l2; +}; + +/** \brief Reply for lisp_eid_table_add_del_map + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define lisp_eid_table_add_del_map_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Request for map lisp locator status + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param locator_set_index - index of locator_set + @param ls_name - locator set name + @param is_index_set - flag indicating whether ls_name or ls_index is set + */ +define lisp_locator_dump +{ + u32 client_index; + u32 context; + u32 ls_index; + u8 ls_name[64]; + u8 is_index_set; +}; + +/** \brief LISP locator_set status + @param local - if is set, then locator is local + @param locator_set_name - name of the locator_set + @param sw_if_index - sw_if_index of the locator + @param priority - locator priority + @param weight - locator weight + */ +define lisp_locator_details +{ + u32 context; + u8 local; + u32 sw_if_index; + u8 is_ipv6; + u8 ip_address[16]; + u8 priority; + u8 weight; +}; + +/** \brief LISP locator_set status + @param context - sender context, to match reply w/ request + @param ls_index - locator set index + @param ls_name - name of the locator set + */ +define lisp_locator_set_details +{ + u32 context; + u32 ls_index; + u8 ls_name[64]; +}; + +/** \brief Request for locator_set summary status + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param filter - filter type + Supported values: + 0: all locator sets + 1: local locator sets + 2: remote locator sets + */ +define lisp_locator_set_dump +{ + u32 client_index; + u32 context; + u8 filter; +}; + +/** \brief Dump lisp eid-table + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param locator_set_index - index of locator_set, if ~0 then the mapping + is negative + @param action - negative map request action + @param is_local - local if non-zero, else remote + @param eid_type: + 0 : ipv4 + 1 : ipv6 + 2 : mac + @param is_src_dst - EID is type of source/destination + @param eid - EID can be ip4, ip6 or mac + @param eid_prefix_len - prefix length + @param seid - source EID can be ip4, ip6 or mac + @param seid_prefix_len - source prefix length + @param vni - virtual network instance + @param ttl - time to live + @param authoritative - authoritative + @param key_id + HMAC_NO_KEY 0 + HMAC_SHA_1_96 1 + HMAC_SHA_256_128 2 + @param key - secret key +*/ + +define lisp_eid_table_details +{ + u32 context; + u32 locator_set_index; + u8 action; + u8 is_local; + u8 eid_type; + u8 is_src_dst; + u32 vni; + u8 eid[16]; + u8 eid_prefix_len; + u8 seid[16]; + u8 seid_prefix_len; + u32 ttl; + u8 authoritative; + u16 key_id; + u8 key[64]; +}; + +/** \brief Request for eid table summary status + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param eid_set - if non-zero request info about specific mapping + @param vni - virtual network instance; valid only if eid_set != 0 + @param prefix_length - prefix length if EID is IP address; + valid only if eid_set != 0 + @param eid_type - EID type; valid only if eid_set != 0 + Supported values: + 0: EID is IPv4 + 1: EID is IPv6 + 2: EID is ethernet address + @param eid - endpoint identifier + @param filter - filter type; + Support values: + 0: all eid + 1: local eid + 2: remote eid + */ +define lisp_eid_table_dump +{ + u32 client_index; + u32 context; + u8 eid_set; + u8 prefix_length; + u32 vni; + u8 eid_type; + u8 eid[16]; + u8 filter; +}; + +/** \brief LISP adjacency + @param eid_type - + 0 : ipv4 + 1 : ipv6 + 2 : mac + @param reid - remote EID + @param leid - local EID + @param reid_prefix_len - remote EID IP prefix length + @param leid_prefix_len - local EID IP prefix length + */ +typeonly manual_print manual_endian define lisp_adjacency +{ + u8 eid_type; + u8 reid[16]; + u8 leid[16]; + u8 reid_prefix_len; + u8 leid_prefix_len; +}; + +/** \brief LISP adjacency reply + @param count - number of adjacencies + @param adjacencies - array of adjacencies + */ +manual_endian manual_print define lisp_adjacencies_get_reply +{ + u32 context; + i32 retval; + u32 count; + vl_api_lisp_adjacency_t adjacencies[count]; +}; + +/** \brief Request for LISP adjacencies + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param vni - filter adjacencies by VNI + */ +define lisp_adjacencies_get +{ + u32 client_index; + u32 context; + u32 vni; +}; + +/** \brief Shows relationship between vni and vrf/bd + @param dp_table - VRF index or bridge domain index + @param vni - vitual network instance + */ +define lisp_eid_table_map_details +{ + u32 context; + u32 vni; + u32 dp_table; +}; + +/** \brief Request for lisp_eid_table_map_details + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_l2 - if set dump vni/bd mappings else vni/vrf + */ +define lisp_eid_table_map_dump +{ + u32 client_index; + u32 context; + u8 is_l2; +}; + +/** \brief Dumps all VNIs used in mappings + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + */ +define lisp_eid_table_vni_dump +{ + u32 client_index; + u32 context; +}; + +/** \brief reply to lisp_eid_table_vni_dump + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param vni - virtual network instance + */ +define lisp_eid_table_vni_details +{ + u32 client_index; + u32 context; + u32 vni; +}; + +define lisp_gpe_tunnel_details +{ + u32 context; + u32 tunnels; + u8 is_ipv6; + u8 source_ip[16]; + u8 destination_ip[16]; + u32 encap_fib_id; + u32 decap_fib_id; + u32 dcap_next; + u8 lisp_ver; + u8 next_protocol; + u8 flags; + u8 ver_res; + u8 res; + u32 iid; +}; + +/** \brief Request for gpe tunnel summary status + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + */ +define lisp_gpe_tunnel_dump +{ + u32 client_index; + u32 context; +}; + +/** \brief LISP map resolver status + @param is_ipv6 - if non-zero the address is ipv6, else ipv4 + @param ip_address - array of address bytes + */ +define lisp_map_resolver_details +{ + u32 context; + u8 is_ipv6; + u8 ip_address[16]; +}; + +/** \brief Request for map resolver summary status + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + */ +define lisp_map_resolver_dump +{ + u32 client_index; + u32 context; +}; + +/** \brief LISP map server details + @param is_ipv6 - if non-zero the address is ipv6, else ipv4 + @param ip_address - array of address bytes + */ +define lisp_map_server_details +{ + u32 context; + u8 is_ipv6; + u8 ip_address[16]; +}; + +/** \brief Request for map server summary status + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + */ +define lisp_map_server_dump +{ + u32 client_index; + u32 context; +}; + +/** \brief Request for lisp-gpe protocol status + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define show_lisp_status +{ + u32 client_index; + u32 context; +}; + +/** \brief Status of lisp, enable or disable + @param context - sender context, to match reply w/ request + @param feature_status - lisp enable if non-zero, else disable + @param gpe_status - lisp enable if non-zero, else disable +*/ +define show_lisp_status_reply +{ + u32 context; + i32 retval; + u8 feature_status; + u8 gpe_status; +}; + +/** \brief Get LISP map request itr rlocs status + @param context - sender context, to match reply w/ request + @param locator_set_name - name of the locator_set + */ +define lisp_get_map_request_itr_rlocs +{ + u32 client_index; + u32 context; +}; + +/** \brief Request for map request itr rlocs summary status + */ +define lisp_get_map_request_itr_rlocs_reply +{ + u32 context; + i32 retval; + u8 locator_set_name[64]; +}; + +/** \brief Request for lisp pitr status + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define show_lisp_pitr +{ + u32 client_index; + u32 context; +}; + +/** \brief Status of lisp pitr, enable or disable + @param context - sender context, to match reply w/ request + @param status - lisp pitr enable if non-zero, else disable + @param locator_set_name - name of the locator_set +*/ +define show_lisp_pitr_reply +{ + u32 context; + i32 retval; + u8 status; + u8 locator_set_name[64]; +}; + +/* Gross kludge, DGMS */ +define interface_name_renumber +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u32 new_show_dev_instance; +}; + +define interface_name_renumber_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Register for ip4 arp resolution events + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param enable_disable - 1 => register for events, 0 => cancel registration + @param pid - sender's pid + @param address - the exact ip4 address of interest +*/ +define want_ip4_arp_events +{ + u32 client_index; + u32 context; + u8 enable_disable; + u32 pid; + u32 address; +}; + +/** \brief Reply for interface events registration + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define want_ip4_arp_events_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Tell client about an ip4 arp resolution event + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param address - the exact ip4 address of interest + @param pid - client pid registered to receive notification + @param sw_if_index - interface which received ARP packet + @param new_mac - the new mac address + @param mac_ip - 0: resolution event, 1: mac/ip binding in bd +*/ +define ip4_arp_event +{ + u32 client_index; + u32 context; + u32 address; + u32 pid; + u32 sw_if_index; + u8 new_mac[6]; + u8 mac_ip; +}; + +/** \brief Register for ip6 nd resolution events + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param enable_disable - 1 => register for events, 0 => cancel registration + @param pid - sender's pid + @param address - the exact ip6 address of interest +*/ +define want_ip6_nd_events +{ + u32 client_index; + u32 context; + u8 enable_disable; + u32 pid; + u8 address[16]; +}; + +/** \brief Reply for ip6 nd resolution events registration + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define want_ip6_nd_events_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Tell client about an ip6 nd resolution or mac/ip event + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param pid - client pid registered to receive notification + @param sw_if_index - interface which received ARP packet + @param address - the exact ip6 address of interest + @param new_mac - the new mac address + @param mac_ip - 0: resolution event, 1: mac/ip binding in bd +*/ +define ip6_nd_event +{ + u32 client_index; + u32 context; + u32 pid; + u32 sw_if_index; + u8 address[16]; + u8 new_mac[6]; + u8 mac_ip; +}; + +/** \brief L2 bridge domain add or delete request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param bd_id - the bridge domain to create + @param flood - enable/disable bcast/mcast flooding in the bd + @param uu_flood - enable/disable uknown unicast flood in the bd + @param forward - enable/disable forwarding on all interfaces in the bd + @param learn - enable/disable learning on all interfaces in the bd + @param arp_term - enable/disable arp termination in the bd + @param mac_age - mac aging time in min, 0 for disabled + @param is_add - add or delete flag +*/ +define bridge_domain_add_del +{ + u32 client_index; + u32 context; + u32 bd_id; + u8 flood; + u8 uu_flood; + u8 forward; + u8 learn; + u8 arp_term; + u8 mac_age; + u8 is_add; +}; + +/** \brief L2 bridge domain add or delete response + @param context - sender context, to match reply w/ request + @param retval - return code for the set bridge flags request +*/ +define bridge_domain_add_del_reply +{ + u32 context; + i32 retval; +}; + +/** \brief L2 bridge domain request operational state details + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param bd_id - the bridge domain id desired or ~0 to request all bds +*/ +define bridge_domain_dump +{ + u32 client_index; + u32 context; + u32 bd_id; +}; + +/** \brief L2 bridge domain operational state response + @param bd_id - the bridge domain id + @param flood - bcast/mcast flooding state on all interfaces in the bd + @param uu_flood - uknown unicast flooding state on all interfaces in the bd + @param forward - forwarding state on all interfaces in the bd + @param learn - learning state on all interfaces in the bd + @param arp_term - arp termination state on all interfaces in the bd + @param mac_age - mac aging time in min, 0 for disabled + @param n_sw_ifs - number of sw_if_index's in the domain +*/ +define bridge_domain_details +{ + u32 context; + u32 bd_id; + u8 flood; + u8 uu_flood; + u8 forward; + u8 learn; + u8 arp_term; + u8 mac_age; + u32 bvi_sw_if_index; + u32 n_sw_ifs; +}; + +/** \brief L2 bridge domain sw interface operational state response + @param bd_id - the bridge domain id + @param sw_if_index - sw_if_index in the domain + @param shg - split horizon group for the interface +*/ +define bridge_domain_sw_if_details +{ + u32 context; + u32 bd_id; + u32 sw_if_index; + u8 shg; +}; + +/** \brief DHCP Client config add / del request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - index of the interface for DHCP client + @param hostname - hostname + @param is_add - add the config if non-zero, else delete + @param want_dhcp_event - DHCP event sent to the sender + via dhcp_compl_event API message if non-zero + @param pid - sender's pid +*/ +define dhcp_client_config +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u8 hostname[64]; + u8 is_add; + u8 want_dhcp_event; + u32 pid; +}; + +/** \brief DHCP Client config response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define dhcp_client_config_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Set/unset input ACL interface + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - interface to set/unset input ACL + @param ip4_table_index - ip4 classify table index (~0 for skip) + @param ip6_table_index - ip6 classify table index (~0 for skip) + @param l2_table_index - l2 classify table index (~0 for skip) + @param is_add - Set input ACL if non-zero, else unset + Note: User is recommeneded to use just one valid table_index per call. + (ip4_table_index, ip6_table_index, or l2_table_index) +*/ +define input_acl_set_interface +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u32 ip4_table_index; + u32 ip6_table_index; + u32 l2_table_index; + u8 is_add; +}; + +/** \brief Set/unset input ACL interface response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define input_acl_set_interface_reply +{ + u32 context; + i32 retval; +}; + +/** \brief IPsec: Add/delete Security Policy Database + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add SPD if non-zero, else delete + @param spd_id - SPD instance id (control plane allocated) +*/ + +define ipsec_spd_add_del +{ + u32 client_index; + u32 context; + u8 is_add; + u32 spd_id; +}; + +/** \brief Reply for IPsec: Add/delete Security Policy Database entry + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ + +define ipsec_spd_add_del_reply +{ + u32 context; + i32 retval; +}; + +/** \brief IPsec: Add/delete SPD from interface + + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add security mode if non-zero, else delete + @param sw_if_index - index of the interface + @param spd_id - SPD instance id to use for lookups +*/ + + +define ipsec_interface_add_del_spd +{ + u32 client_index; + u32 context; + + u8 is_add; + u32 sw_if_index; + u32 spd_id; +}; + +/** \brief Reply for IPsec: Add/delete SPD from interface + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ + +define ipsec_interface_add_del_spd_reply +{ + u32 context; + i32 retval; +}; + +/** \brief IPsec: Add/delete Security Policy Database entry + + See RFC 4301, 4.4.1.1 on how to match packet to selectors + + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add SPD if non-zero, else delete + @param spd_id - SPD instance id (control plane allocated) + @param priority - priority of SPD entry (non-unique value). Used to order SPD matching - higher priorities match before lower + @param is_outbound - entry applies to outbound traffic if non-zero, otherwise applies to inbound traffic + @param is_ipv6 - remote/local address are IPv6 if non-zero, else IPv4 + @param remote_address_start - start of remote address range to match + @param remote_address_stop - end of remote address range to match + @param local_address_start - start of local address range to match + @param local_address_stop - end of local address range to match + @param protocol - protocol type to match [0 means any] + @param remote_port_start - start of remote port range to match ... + @param remote_port_stop - end of remote port range to match [0 to 65535 means ANY, 65535 to 0 means OPAQUE] + @param local_port_start - start of local port range to match ... + @param local_port_stop - end of remote port range to match [0 to 65535 means ANY, 65535 to 0 means OPAQUE] + @param policy - 0 = bypass (no IPsec processing), 1 = discard (discard packet with ICMP processing), 2 = resolve (send request to control plane for SA resolving, and discard without ICMP processing), 3 = protect (apply IPsec policy using following parameters) + @param sa_id - SAD instance id (control plane allocated) + +*/ + +define ipsec_spd_add_del_entry +{ + u32 client_index; + u32 context; + u8 is_add; + + u32 spd_id; + i32 priority; + u8 is_outbound; + + // Selector + u8 is_ipv6; + u8 is_ip_any; + u8 remote_address_start[16]; + u8 remote_address_stop[16]; + u8 local_address_start[16]; + u8 local_address_stop[16]; + + u8 protocol; + + u16 remote_port_start; + u16 remote_port_stop; + u16 local_port_start; + u16 local_port_stop; + + // Policy + u8 policy; + u32 sa_id; +}; + +/** \brief Reply for IPsec: Add/delete Security Policy Database entry + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ + +define ipsec_spd_add_del_entry_reply +{ + u32 context; + i32 retval; +}; + +/** \brief IPsec: Add/delete Security Association Database entry + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add SAD entry if non-zero, else delete + + @param sad_id - sad id + + @param spi - security parameter index + + @param protocol - 0 = AH, 1 = ESP + + @param crypto_algorithm - 0 = Null, 1 = AES-CBC-128, 2 = AES-CBC-192, 3 = AES-CBC-256, 4 = 3DES-CBC + @param crypto_key_length - length of crypto_key in bytes + @param crypto_key - crypto keying material + + @param integrity_algorithm - 0 = None, 1 = MD5-96, 2 = SHA1-96, 3 = SHA-256, 4 = SHA-384, 5=SHA-512 + @param integrity_key_length - length of integrity_key in bytes + @param integrity_key - integrity keying material + + @param use_extended_sequence_number - use ESN when non-zero + + @param is_tunnel - IPsec tunnel mode if non-zero, else transport mode + @param is_tunnel_ipv6 - IPsec tunnel mode is IPv6 if non-zero, else IPv4 tunnel only valid if is_tunnel is non-zero + @param tunnel_src_address - IPsec tunnel source address IPv6 if is_tunnel_ipv6 is non-zero, else IPv4. Only valid if is_tunnel is non-zero + @param tunnel_dst_address - IPsec tunnel destination address IPv6 if is_tunnel_ipv6 is non-zero, else IPv4. Only valid if is_tunnel is non-zero + + To be added: + Anti-replay + IPsec tunnel address copy mode (to support GDOI) + */ + +define ipsec_sad_add_del_entry +{ + u32 client_index; + u32 context; + u8 is_add; + + u32 sad_id; + + u32 spi; + + u8 protocol; + + u8 crypto_algorithm; + u8 crypto_key_length; + u8 crypto_key[128]; + + u8 integrity_algorithm; + u8 integrity_key_length; + u8 integrity_key[128]; + + u8 use_extended_sequence_number; + + u8 is_tunnel; + u8 is_tunnel_ipv6; + u8 tunnel_src_address[16]; + u8 tunnel_dst_address[16]; +}; + +/** \brief Reply for IPsec: Add/delete Security Association Database entry + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ + +define ipsec_sad_add_del_entry_reply +{ + u32 context; + i32 retval; +}; + +/** \brief IPsec: Update Security Association keys + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + + @param sa_id - sa id + + @param crypto_key_length - length of crypto_key in bytes + @param crypto_key - crypto keying material + + @param integrity_key_length - length of integrity_key in bytes + @param integrity_key - integrity keying material +*/ + +define ipsec_sa_set_key +{ + u32 client_index; + u32 context; + + u32 sa_id; + + u8 crypto_key_length; + u8 crypto_key[128]; + + u8 integrity_key_length; + u8 integrity_key[128]; +}; + +/** \brief Reply for IPsec: Update Security Association keys + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ + +define ipsec_sa_set_key_reply +{ + u32 context; + i32 retval; +}; + +/** \brief IKEv2: Add/delete profile + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + + @param name - IKEv2 profile name + @param is_add - Add IKEv2 profile if non-zero, else delete +*/ +define ikev2_profile_add_del +{ + u32 client_index; + u32 context; + + u8 name[64]; + u8 is_add; +}; + +/** \brief Reply for IKEv2: Add/delete profile + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define ikev2_profile_add_del_reply +{ + u32 context; + i32 retval; +}; + +/** \brief IKEv2: Set IKEv2 profile authentication method + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + + @param name - IKEv2 profile name + @param auth_method - IKEv2 authentication method (shared-key-mic/rsa-sig) + @param is_hex - Authentication data in hex format if non-zero, else string + @param data_len - Authentication data length + @param data - Authentication data (for rsa-sig cert file path) +*/ +define ikev2_profile_set_auth +{ + u32 client_index; + u32 context; + + u8 name[64]; + u8 auth_method; + u8 is_hex; + u32 data_len; + u8 data[0]; +}; + +/** \brief Reply for IKEv2: Set IKEv2 profile authentication method + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define ikev2_profile_set_auth_reply +{ + u32 context; + i32 retval; +}; + +/** \brief IKEv2: Set IKEv2 profile local/remote identification + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + + @param name - IKEv2 profile name + @param is_local - Identification is local if non-zero, else remote + @param id_type - Identification type + @param data_len - Identification data length + @param data - Identification data +*/ +define ikev2_profile_set_id +{ + u32 client_index; + u32 context; + + u8 name[64]; + u8 is_local; + u8 id_type; + u32 data_len; + u8 data[0]; +}; + +/** \brief Reply for IKEv2: + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define ikev2_profile_set_id_reply +{ + u32 context; + i32 retval; +}; + +/** \brief IKEv2: Set IKEv2 profile traffic selector parameters + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + + @param name - IKEv2 profile name + @param is_local - Traffic selector is local if non-zero, else remote + @param proto - Traffic selector IP protocol (if zero not relevant) + @param start_port - The smallest port number allowed by traffic selector + @param end_port - The largest port number allowed by traffic selector + @param start_addr - The smallest address included in traffic selector + @param end_addr - The largest address included in traffic selector +*/ +define ikev2_profile_set_ts +{ + u32 client_index; + u32 context; + + u8 name[64]; + u8 is_local; + u8 proto; + u16 start_port; + u16 end_port; + u32 start_addr; + u32 end_addr; +}; + +/** \brief Reply for IKEv2: Set IKEv2 profile traffic selector parameters + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define ikev2_profile_set_ts_reply +{ + u32 context; + i32 retval; +}; + +/** \brief IKEv2: Set IKEv2 local RSA private key + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + + @param key_file - Key file absolute path +*/ +define ikev2_set_local_key +{ + u32 client_index; + u32 context; + + u8 key_file[256]; +}; + +/** \brief Reply for IKEv2: Set IKEv2 local key + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define ikev2_set_local_key_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Tell client about a DHCP completion event + @param client_index - opaque cookie to identify the sender + @param pid - client pid registered to receive notification + @param is_ipv6 - if non-zero the address is ipv6, else ipv4 + @param host_address - Host IP address + @param router_address - Router IP address + @param host_mac - Host MAC address +*/ +define dhcp_compl_event +{ + u32 client_index; + u32 pid; + u8 hostname[64]; + u8 is_ipv6; + u8 host_address[16]; + u8 router_address[16]; + u8 host_mac[6]; +}; + +/** \brief cop: enable/disable junk filtration features on an interface + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_inded - desired interface + @param enable_disable - 1 => enable, 0 => disable +*/ + +define cop_interface_enable_disable +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u8 enable_disable; +}; + +/** \brief cop: interface enable/disable junk filtration reply + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ + +define cop_interface_enable_disable_reply +{ + u32 context; + i32 retval; +}; + +/** \brief cop: enable/disable whitelist filtration features on an interface + Note: the supplied fib_id must match in order to remove the feature! + + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - interface handle, physical interfaces only + @param fib_id - fib identifier for the whitelist / blacklist fib + @param ip4 - 1 => enable ip4 filtration, 0=> disable ip4 filtration + @param ip6 - 1 => enable ip6 filtration, 0=> disable ip6 filtration + @param default_cop - 1 => enable non-ip4, non-ip6 filtration 0=> disable it +*/ + +define cop_whitelist_enable_disable +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u32 fib_id; + u8 ip4; + u8 ip6; + u8 default_cop; +}; + +/** \brief cop: interface enable/disable junk filtration reply + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ + +define cop_whitelist_enable_disable_reply +{ + u32 context; + i32 retval; +}; + +/** \brief get_node_graph - get a copy of the vpp node graph + including the current set of graph arcs. + + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ + +define get_node_graph +{ + u32 client_index; + u32 context; +}; + +/** \brief get_node_graph_reply + @param context - returned sender context, to match reply w/ request + @param retval - return code + @param reply_in_shmem - result from vlib_node_serialize, in shared + memory. Process with vlib_node_unserialize, remember to switch + heaps and free the result. +*/ + +define get_node_graph_reply +{ + u32 context; + i32 retval; + u64 reply_in_shmem; +}; + +/** \brief IOAM enable : Enable in-band OAM + @param id - profile id + @param seqno - To enable Seqno Processing + @param analyse - Enabling analysis of iOAM at decap node + @param pow_enable - Proof of Work enabled or not flag + @param trace_enable - iOAM Trace enabled or not flag +*/ +define ioam_enable +{ + u32 client_index; + u32 context; + u16 id; + u8 seqno; + u8 analyse; + u8 pot_enable; + u8 trace_enable; + u32 node_id; +}; + +/** \brief iOAM Trace profile add / del response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define ioam_enable_reply +{ + u32 context; + i32 retval; +}; + +/** \brief iOAM disable + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param index - MAP Domain index +*/ +define ioam_disable +{ + u32 client_index; + u32 context; + u16 id; +}; + +/** \brief iOAM disable response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define ioam_disable_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Create host-interface + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param host_if_name - interface name + @param hw_addr - interface MAC + @param use_random_hw_addr - use random generated MAC +*/ +define af_packet_create +{ + u32 client_index; + u32 context; + + u8 host_if_name[64]; + u8 hw_addr[6]; + u8 use_random_hw_addr; +}; + +/** \brief Create host-interface response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define af_packet_create_reply +{ + u32 context; + i32 retval; + u32 sw_if_index; +}; + +/** \brief Delete host-interface + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param host_if_name - interface name +*/ +define af_packet_delete +{ + u32 client_index; + u32 context; + + u8 host_if_name[64]; +}; + +/** \brief Delete host-interface response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define af_packet_delete_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Add/del policer + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add policer if non-zero, else delete + @param name - policer name + @param cir - CIR + @param eir - EIR + @param cb - Committed Burst + @param eb - Excess or Peak Burst + @param rate_type - rate type + @param round_type - rounding type + @param type - policer algorithm + @param color_aware - 0=color-blind, 1=color-aware + @param conform_action_type - conform action type + @param conform_dscp - DSCP for conform mar-and-transmit action + @param exceed_action_type - exceed action type + @param exceed_dscp - DSCP for exceed mar-and-transmit action + @param violate_action_type - violate action type + @param violate_dscp - DSCP for violate mar-and-transmit action +*/ +define policer_add_del +{ + u32 client_index; + u32 context; + + u8 is_add; + u8 name[64]; + u32 cir; + u32 eir; + u64 cb; + u64 eb; + u8 rate_type; + u8 round_type; + u8 type; + u8 color_aware; + u8 conform_action_type; + u8 conform_dscp; + u8 exceed_action_type; + u8 exceed_dscp; + u8 violate_action_type; + u8 violate_dscp; +}; + +/** \brief Add/del policer response + @param context - sender context, to match reply w/ request + @param retval - return value for request + @param policer_index - for add, returned index of the new policer +*/ +define policer_add_del_reply +{ + u32 context; + i32 retval; + u32 policer_index; +}; + +/** \brief Get list of policers + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param match_name_valid - if 0 request all policers otherwise use match_name + @param match_name - policer name +*/ +define policer_dump +{ + u32 client_index; + u32 context; + + u8 match_name_valid; + u8 match_name[64]; +}; + +/** \brief Policer operational state response. + @param context - sender context, to match reply w/ request + @param name - policer name + @param cir - CIR + @param eir - EIR + @param cb - Committed Burst + @param eb - Excess or Peak Burst + @param rate_type - rate type + @param round_type - rounding type + @param type - policer algorithm + @param conform_action_type - conform action type + @param conform_dscp - DSCP for conform mar-and-transmit action + @param exceed_action_type - exceed action type + @param exceed_dscp - DSCP for exceed mar-and-transmit action + @param violate_action_type - violate action type + @param violate_dscp - DSCP for violate mar-and-transmit action + @param single_rate - 1 = single rate policer, 0 = two rate policer + @param color_aware - for hierarchical policing + @param scale - power-of-2 shift amount for lower rates + @param cir_tokens_per_period - number of tokens for each period + @param pir_tokens_per_period - number of tokens for each period for 2-rate policer + @param current_limit - current limit + @param current_bucket - current bucket + @param extended_limit - extended limit + @param extended_bucket - extended bucket + @param last_update_time - last update time +*/ +define policer_details +{ + u32 context; + + u8 name[64]; + u32 cir; + u32 eir; + u64 cb; + u64 eb; + u8 rate_type; + u8 round_type; + u8 type; + u8 conform_action_type; + u8 conform_dscp; + u8 exceed_action_type; + u8 exceed_dscp; + u8 violate_action_type; + u8 violate_dscp; + u8 single_rate; + u8 color_aware; + u32 scale; + u32 cir_tokens_per_period; + u32 pir_tokens_per_period; + u32 current_limit; + u32 current_bucket; + u32 extended_limit; + u32 extended_bucket; + u64 last_update_time; +}; + +/** \brief Set/unset policer classify interface + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - interface to set/unset policer classify + @param ip4_table_index - ip4 classify table index (~0 for skip) + @param ip6_table_index - ip6 classify table index (~0 for skip) + @param l2_table_index - l2 classify table index (~0 for skip) + @param is_add - Set if non-zero, else unset + Note: User is recommeneded to use just one valid table_index per call. + (ip4_table_index, ip6_table_index, or l2_table_index) +*/ +define policer_classify_set_interface +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u32 ip4_table_index; + u32 ip6_table_index; + u32 l2_table_index; + u8 is_add; +}; + +/** \brief Set/unset policer classify interface response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define policer_classify_set_interface_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Get list of policer classify interfaces and tables + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param type - classify table type +*/ +define policer_classify_dump +{ + u32 client_index; + u32 context; + u8 type; +}; + +/** \brief Policer iclassify operational state response. + @param context - sender context, to match reply w/ request + @param sw_if_index - software interface index + @param table_index - classify table index +*/ +define policer_classify_details +{ + u32 context; + u32 sw_if_index; + u32 table_index; +}; + +/** \brief Create netmap + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param netmap_if_name - interface name + @param hw_addr - interface MAC + @param use_random_hw_addr - use random generated MAC + @param is_pipe - is pipe + @param is_master - 0=slave, 1=master +*/ +define netmap_create +{ + u32 client_index; + u32 context; + + u8 netmap_if_name[64]; + u8 hw_addr[6]; + u8 use_random_hw_addr; + u8 is_pipe; + u8 is_master; +}; + +/** \brief Create netmap response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define netmap_create_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Delete netmap + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param netmap_if_name - interface name +*/ +define netmap_delete +{ + u32 client_index; + u32 context; + + u8 netmap_if_name[64]; +}; + +/** \brief Delete netmap response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define netmap_delete_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Classify get table IDs request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define classify_table_ids +{ + u32 client_index; + u32 context; +}; + +/** \brief Reply for classify get table IDs request + @param context - sender context which was passed in the request + @param count - number of ids returned in response + @param ids - array of classify table ids +*/ +define classify_table_ids_reply +{ + u32 context; + i32 retval; + u32 count; + u32 ids[count]; +}; + +/** \brief Classify table ids by interface index request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - index of the interface +*/ +define classify_table_by_interface +{ + u32 client_index; + u32 context; + u32 sw_if_index; +}; + +/** \brief Reply for classify table id by interface index request + @param context - sender context which was passed in the request + @param count - number of ids returned in response + @param sw_if_index - index of the interface + @param l2_table_id - l2 classify table index + @param ip4_table_id - ip4 classify table index + @param ip6_table_id - ip6 classify table index +*/ +define classify_table_by_interface_reply +{ + u32 context; + i32 retval; + u32 sw_if_index; + u32 l2_table_id; + u32 ip4_table_id; + u32 ip6_table_id; +}; + +/** \brief Classify table info + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param table_id - classify table index +*/ +define classify_table_info +{ + u32 client_index; + u32 context; + u32 table_id; +}; + +/** \brief Reply for classify table info request + @param context - sender context which was passed in the request + @param count - number of ids returned in response + @param table_id - classify table index + @param nbuckets - number of buckets when adding a table + @param match_n_vectors - number of match vectors + @param skip_n_vectors - number of skip_n_vectors + @param active_sessions - number of sessions (active entries) + @param next_table_index - index of next table + @param miss_next_index - index of miss table + @param mask[] - match mask +*/ +define classify_table_info_reply +{ + u32 context; + i32 retval; + u32 table_id; + u32 nbuckets; + u32 match_n_vectors; + u32 skip_n_vectors; + u32 active_sessions; + u32 next_table_index; + u32 miss_next_index; + u32 mask_length; + u8 mask[mask_length]; +}; + +/** \brief Classify sessions dump request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param table_id - classify table index +*/ +define classify_session_dump +{ + u32 client_index; + u32 context; + u32 table_id; +}; + +/** \brief Reply for classify table session dump request + @param context - sender context which was passed in the request + @param count - number of ids returned in response + @param table_id - classify table index + @param hit_next_index - hit_next_index of session + @param opaque_index - for add, opaque_index of session + @param advance - advance value of session + @param match[] - match value for session +*/ +define classify_session_details +{ + u32 context; + i32 retval; + u32 table_id; + u32 hit_next_index; + i32 advance; + u32 opaque_index; + u32 match_length; + u8 match[match_length]; +}; + +/** \brief Configure IPFIX exporter process request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param collector_address - address of IPFIX collector + @param collector_port - port of IPFIX collector + @param src_address - address of IPFIX exporter + @param vrf_id - VRF / fib table ID + @param path_mtu - Path MTU between exporter and collector + @param template_interval - number of seconds after which to resend template + @param udp_checksum - UDP checksum calculation enable flag +*/ +define set_ipfix_exporter +{ + u32 client_index; + u32 context; + u8 collector_address[16]; + u16 collector_port; + u8 src_address[16]; + u32 vrf_id; + u32 path_mtu; + u32 template_interval; + u8 udp_checksum; +}; + +/** \brief Reply to IPFIX exporter configure request + @param context - sender context which was passed in the request +*/ +define set_ipfix_exporter_reply +{ + u32 context; + i32 retval; +}; + +/** \brief IPFIX exporter dump request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define ipfix_exporter_dump +{ + u32 client_index; + u32 context; +}; + +/** \brief Reply to IPFIX exporter dump request + @param context - sender context which was passed in the request + @param collector_address - address of IPFIX collector + @param collector_port - port of IPFIX collector + @param src_address - address of IPFIX exporter + @param fib_index - fib table index + @param path_mtu - Path MTU between exporter and collector + @param template_interval - number of seconds after which to resend template + @param udp_checksum - UDP checksum calculation enable flag +*/ +define ipfix_exporter_details +{ + u32 context; + u8 collector_address[16]; + u16 collector_port; + u8 src_address[16]; + u32 vrf_id; + u32 path_mtu; + u32 template_interval; + u8 udp_checksum; +}; + +/** \brief IPFIX classify stream configure request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param domain_id - domain ID reported in IPFIX messages for classify stream + @param src_port - source port of UDP session for classify stream +*/ +define set_ipfix_classify_stream { + u32 client_index; + u32 context; + u32 domain_id; + u16 src_port; +}; + +/** \brief IPFIX classify stream configure response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define set_ipfix_classify_stream_reply { + u32 context; + i32 retval; +}; + +/** \brief IPFIX classify stream dump request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define ipfix_classify_stream_dump { + u32 client_index; + u32 context; +}; + +/** \brief Reply to IPFIX classify stream dump request + @param context - sender context, to match reply w/ request + @param domain_id - domain ID reported in IPFIX messages for classify stream + @param src_port - source port of UDP session for classify stream +*/ +define ipfix_classify_stream_details { + u32 context; + u32 domain_id; + u16 src_port; +}; + +/** \brief IPFIX add or delete classifier table request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param table_id - classifier table ID + @param ip_version - version of IP used in the classifier table + @param transport_protocol - transport protocol used in the classifier table or 255 for unspecified +*/ +define ipfix_classify_table_add_del { + u32 client_index; + u32 context; + u32 table_id; + u8 ip_version; + u8 transport_protocol; + u8 is_add; +}; + +/** \brief IPFIX add classifier table response + @param context - sender context which was passed in the request +*/ +define ipfix_classify_table_add_del_reply { + u32 context; + i32 retval; +}; + +/** \brief IPFIX classify tables dump request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define ipfix_classify_table_dump { + u32 client_index; + u32 context; +}; + +/** \brief Reply to IPFIX classify tables dump request + @param context - sender context, to match reply w/ request + @param table_id - classifier table ID + @param ip_version - version of IP used in the classifier table + @param transport_protocol - transport protocol used in the classifier table or 255 for unspecified +*/ +define ipfix_classify_table_details { + u32 context; + u32 table_id; + u8 ip_version; + u8 transport_protocol; +}; + +/** \brief Set/unset flow classify interface + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - interface to set/unset flow classify + @param ip4_table_index - ip4 classify table index (~0 for skip) + @param ip6_table_index - ip6 classify table index (~0 for skip) + @param l2_table_index - l2 classify table index (~0 for skip) + @param is_add - Set if non-zero, else unset + Note: User is recommeneded to use just one valid table_index per call. + (ip4_table_index, ip6_table_index, or l2_table_index) +*/ +define flow_classify_set_interface { + u32 client_index; + u32 context; + u32 sw_if_index; + u32 ip4_table_index; + u32 ip6_table_index; + u8 is_add; +}; + +/** \brief Set/unset flow classify interface response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define flow_classify_set_interface_reply { + u32 context; + i32 retval; +}; + +/** \brief Get list of flow classify interfaces and tables + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param type - classify table type +*/ +define flow_classify_dump { + u32 client_index; + u32 context; + u8 type; +}; + +/** \brief Flow classify operational state response. + @param context - sender context, to match reply w/ request + @param sw_if_index - software interface index + @param table_index - classify table index +*/ +define flow_classify_details { + u32 context; + u32 sw_if_index; + u32 table_index; +}; + +/** \brief Query relative index via node names + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param node_name - name of node to find relative index from + @param next_name - next node from node_name to find relative index of +*/ +define get_next_index +{ + u32 client_index; + u32 context; + u8 node_name[64]; + u8 next_name[64]; +}; + +/** \brief Reply for get next node index + @param context - sender context which was passed in the request + @param retval - return value + @param next_index - index of the next_node +*/ +define get_next_index_reply +{ + u32 context; + i32 retval; + u32 next_index; +}; + +/** \brief PacketGenerator create interface request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param interface_id - interface index +*/ +define pg_create_interface +{ + u32 client_index; + u32 context; + u32 interface_id; +}; + +/** \brief PacketGenerator create interface response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define pg_create_interface_reply +{ + u32 context; + i32 retval; + u32 sw_if_index; +}; + +/** \brief PacketGenerator capture packets on given interface request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param interface_id - pg interface index + @param is_enabled - 1 if enabling streams, 0 if disabling + @param count - number of packets to be captured + @param pcap_file - pacp file name to store captured packets +*/ +define pg_capture +{ + u32 client_index; + u32 context; + u32 interface_id; + u8 is_enabled; + u32 count; + u32 pcap_name_length; + u8 pcap_file_name[pcap_name_length]; +}; + +/** \brief PacketGenerator capture packets response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define pg_capture_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Enable / disable packet generator request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_enabled - 1 if enabling streams, 0 if disabling + @param stream - stream name to be enable/disabled, if not specified handle all streams +*/ +define pg_enable_disable +{ + u32 client_index; + u32 context; + u8 is_enabled; + u32 stream_name_length; + u8 stream_name[stream_name_length]; +}; + +/** \brief Reply for enable / disable packet generator + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define pg_enable_disable_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Configure IP source and L4 port-range check + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_ip6 - 1 if source address type is IPv6 + @param is_add - 1 if add, 0 if delete + @param mask_length - mask length for address entry + @param address - array of address bytes + @param number_of_ranges - length of low_port and high_port arrays (must match) + @param low_ports[32] - up to 32 low end of port range entries (must have corresponding high_ports entry) + @param high_ports[32] - up to 32 high end of port range entries (must have corresponding low_ports entry) + @param vrf_id - fib table/vrf id to associate the source and port-range check with + @note To specify a single port set low_port and high_port entry the same +*/ +define ip_source_and_port_range_check_add_del +{ + u32 client_index; + u32 context; + u8 is_ipv6; + u8 is_add; + u8 mask_length; + u8 address[16]; + u8 number_of_ranges; + u16 low_ports[32]; + u16 high_ports[32]; + u32 vrf_id; +}; + +/** \brief Configure IP source and L4 port-range check reply + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define ip_source_and_port_range_check_add_del_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Set interface source and L4 port-range request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param interface_id - interface index + @param tcp_vrf_id - VRF associated with source and TCP port-range check + @param udp_vrf_id - VRF associated with source and TCP port-range check +*/ +define ip_source_and_port_range_check_interface_add_del +{ + u32 client_index; + u32 context; + u8 is_add; + u32 sw_if_index; + u32 tcp_in_vrf_id; + u32 tcp_out_vrf_id; + u32 udp_in_vrf_id; + u32 udp_out_vrf_id; +}; + +/** \brief Set interface source and L4 port-range response + @param context - sender context, to match reply w/ request + @param retval - return value for request +*/ +define ip_source_and_port_range_check_interface_add_del_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Add / del ipsec gre tunnel request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param local_sa_id - local SA id + @param remote_sa_id - remote SA id + @param is_add - 1 if adding the tunnel, 0 if deleting + @param src_address - tunnel source address + @param dst_address - tunnel destination address +*/ +define ipsec_gre_add_del_tunnel { + u32 client_index; + u32 context; + u32 local_sa_id; + u32 remote_sa_id; + u8 is_add; + u8 src_address[4]; + u8 dst_address[4]; +}; + +/** \brief Reply for add / del ipsec gre tunnel request + @param context - returned sender context, to match reply w/ request + @param retval - return code + @param sw_if_index - software index of the new ipsec gre tunnel +*/ +define ipsec_gre_add_del_tunnel_reply { + u32 context; + i32 retval; + u32 sw_if_index; +}; + +/** \brief Dump ipsec gre tunnel table + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param tunnel_index - gre tunnel identifier or -1 in case of all tunnels +*/ +define ipsec_gre_tunnel_dump { + u32 client_index; + u32 context; + u32 sw_if_index; +}; + +/** \brief mpls gre tunnel operational state response + @param context - returned sender context, to match reply w/ request + @param sw_if_index - software index of the ipsec gre tunnel + @param local_sa_id - local SA id + @param remote_sa_id - remote SA id + @param src_address - tunnel source address + @param dst_address - tunnel destination address +*/ +define ipsec_gre_tunnel_details { + u32 context; + u32 sw_if_index; + u32 local_sa_id; + u32 remote_sa_id; + u8 src_address[4]; + u8 dst_address[4]; +}; + +/** \brief Delete sub interface request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - sw index of the interface that was created by create_subif +*/ +define delete_subif { + u32 client_index; + u32 context; + u32 sw_if_index; +}; + +/** \brief Delete sub interface response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define delete_subif_reply { + u32 context; + i32 retval; +}; + +/** \brief DPDK interface HQoS pipe profile set request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - the interface + @param subport - subport ID + @param pipe - pipe ID within its subport + @param profile - pipe profile ID +*/ +define sw_interface_set_dpdk_hqos_pipe { + u32 client_index; + u32 context; + u32 sw_if_index; + u32 subport; + u32 pipe; + u32 profile; +}; + +/** \brief DPDK interface HQoS pipe profile set reply + @param context - sender context, to match reply w/ request + @param retval - request return code +*/ +define sw_interface_set_dpdk_hqos_pipe_reply { + u32 context; + i32 retval; +}; + +/** \brief DPDK interface HQoS subport parameters set request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - the interface + @param subport - subport ID + @param tb_rate - subport token bucket rate (measured in bytes/second) + @param tb_size - subport token bucket size (measured in credits) + @param tc_rate - subport traffic class 0 .. 3 rates (measured in bytes/second) + @param tc_period - enforcement period for rates (measured in milliseconds) +*/ +define sw_interface_set_dpdk_hqos_subport { + u32 client_index; + u32 context; + u32 sw_if_index; + u32 subport; + u32 tb_rate; + u32 tb_size; + u32 tc_rate[4]; + u32 tc_period; +}; + +/** \brief DPDK interface HQoS subport parameters set reply + @param context - sender context, to match reply w/ request + @param retval - request return code +*/ +define sw_interface_set_dpdk_hqos_subport_reply { + u32 context; + i32 retval; +}; + +/** \brief DPDK interface HQoS tctbl entry set request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - the interface + @param entry - entry index ID + @param tc - traffic class (0 .. 3) + @param queue - traffic class queue (0 .. 3) +*/ +define sw_interface_set_dpdk_hqos_tctbl { + u32 client_index; + u32 context; + u32 sw_if_index; + u32 entry; + u32 tc; + u32 queue; +}; + +/** \brief DPDK interface HQoS tctbl entry set reply + @param context - sender context, to match reply w/ request + @param retval - request return code +*/ +define sw_interface_set_dpdk_hqos_tctbl_reply { + u32 context; + i32 retval; +}; + +/** \brief L2 interface pbb tag rewrite configure request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - interface the operation is applied to + @param vtr_op - Choose from l2_vtr_op_t enum values + @param inner_tag - needed for translate_qinq vtr op only + @param outer_tag - needed for translate_qinq vtr op only + @param b_dmac - B-tag remote mac address, needed for any push or translate_qinq vtr op + @param b_smac - B-tag local mac address, needed for any push or translate qinq vtr op + @param b_vlanid - B-tag vlanid, needed for any push or translate qinq vtr op + @param i_sid - I-tag service id, needed for any push or translate qinq vtr op +*/ +define l2_interface_pbb_tag_rewrite +{ + u32 client_index; + u32 context; + u32 sw_if_index; + u32 vtr_op; + u16 outer_tag; + u8 b_dmac[6]; + u8 b_smac[6]; + u16 b_vlanid; + u32 i_sid; +}; + +/** \brief L2 interface pbb tag rewrite response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define l2_interface_pbb_tag_rewrite_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Punt traffic to the host + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add punt if non-zero, else delete + @param ipv - L3 protocol 4 - IPv4, 6 - IPv6, ~0 - All + @param l4_protocol - L4 protocol to be punted, only UDP (0x11) is supported + @param l4_port - TCP/UDP port to be punted +*/ +define punt { + u32 client_index; + u32 context; + u8 is_add; + u8 ipv; + u8 l4_protocol; + u16 l4_port; +}; + +/** \brief Reply to the punt request + @param context - sender context which was passed in the request + @param retval - return code of punt request +*/ +define punt_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Dump ipsec policy database data + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param spd_id - SPD instance id + @param sa_id - SA id, optional, set to ~0 to see all policies in SPD +*/ +define ipsec_spd_dump { + u32 client_index; + u32 context; + u32 spd_id; + u32 sa_id; +}; + +/** \brief IPsec policy database response + @param context - sender context which was passed in the request + @param spd_id - SPD instance id + @param priority - numeric value to control policy evaluation order + @param is_outbound - [1|0] to indicate if direction is [out|in]bound + @param is_ipv6 - [1|0] to indicate if address family is ipv[6|4] + @param local_start_addr - first address in local traffic selector range + @param local_stop_addr - last address in local traffic selector range + @param local_start_port - first port in local traffic selector range + @param local_stop_port - last port in local traffic selector range + @param remote_start_addr - first address in remote traffic selector range + @param remote_stop_addr - last address in remote traffic selector range + @param remote_start_port - first port in remote traffic selector range + @param remote_stop_port - last port in remote traffic selector range + @param protocol - traffic selector protocol + @param policy - policy action + @param sa_id - SA id + @param bytes - byte count of packets matching this policy + @param packets - count of packets matching this policy +*/ + +define ipsec_spd_details { + u32 context; + u32 spd_id; + i32 priority; + u8 is_outbound; + u8 is_ipv6; + u8 local_start_addr[16]; + u8 local_stop_addr[16]; + u16 local_start_port; + u16 local_stop_port; + u8 remote_start_addr[16]; + u8 remote_stop_addr[16]; + u16 remote_start_port; + u16 remote_stop_port; + u8 protocol; + u8 policy; + u32 sa_id; + u64 bytes; + u64 packets; +}; + +/** \brief Feature path enable/disable request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - the interface + @param enable - 1 = on, 0 = off +*/ +define feature_enable_disable { + u32 client_index; + u32 context; + u32 sw_if_index; + u8 enable; + u8 arc_name[64]; + u8 feature_name[64]; +}; + +/** \brief Reply to the eature path enable/disable request + @param context - sender context which was passed in the request + @param retval - return code for the request +*/ +define feature_enable_disable_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Configure BFD feature + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param slow_timer - slow timer (seconds) + @param min_tx - desired min tx interval + @param min_rx - desired min rx interval + @param detect_mult - desired detection multiplier +*/ +define bfd_set_config { + u32 client_index; + u32 context; + u32 slow_timer; + u32 min_tx; + u32 min_rx; + u8 detect_mult; +}; + +/** \brief Configure BFD feature response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define bfd_set_config_reply { + u32 context; + i32 retval; +}; + +/** \brief Get BFD configuration +*/ +define bfd_get_config { + u32 client_index; + u32 context; +}; + +/** \brief Get BFD configuration response + @param context - sender context, to match reply w/ request + @param retval - return code for the request + @param slow_timer - slow timer (seconds) + @param min_tx - desired min tx interval + @param min_rx - desired min rx interval + @param detect_mult - desired detection multiplier +*/ +define bfd_get_config_reply { + u32 client_index; + u32 context; + u32 slow_timer; + u32 min_tx; + u32 min_rx; + u8 detect_mult; +}; + +/** \brief Add UDP BFD session on interface + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - sw index of the interface + @param desired_min_tx - desired min transmit interval (microseconds) + @param required_min_rx - required min receive interval (microseconds) + @param detect_mult - detect multiplier (# of packets missed between connection goes down) + @param local_addr - local address + @param peer_addr - peer address + @param is_ipv6 - local_addr, peer_addr are IPv6 if non-zero, otherwise IPv4 +*/ +define bfd_udp_add { + u32 client_index; + u32 context; + u32 sw_if_index; + u32 desired_min_tx; + u32 required_min_rx; + u8 local_addr[16]; + u8 peer_addr[16]; + u8 is_ipv6; + u8 detect_mult; +}; + +/** \brief Add UDP BFD session response + @param context - sender context, to match reply w/ request + @param retval - return code for the request + @param bs_index - index of the session created +*/ +define bfd_udp_add_reply { + u32 context; + i32 retval; + u32 bs_index; +}; + +/** \brief Delete UDP BFD session on interface + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - sw index of the interface + @param local_addr - local address + @param peer_addr - peer address + @param is_ipv6 - local_addr, peer_addr are IPv6 if non-zero, otherwise IPv4 +*/ +define bfd_udp_del { + u32 client_index; + u32 context; + u32 sw_if_index; + u8 local_addr[16]; + u8 peer_addr[16]; + u8 is_ipv6; +}; + +/** \brief Delete UDP BFD session response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define bfd_udp_del_reply { + u32 context; + i32 retval; +}; + +/** \brief Get all BFD sessions + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define bfd_udp_session_dump { + u32 client_index; + u32 context; +}; + +/** \brief BFD session details structure + @param context - sender context, to match reply w/ request + @param bs_index - index of the session + @param sw_if_index - sw index of the interface + @param local_addr - local address + @param peer_addr - peer address + @param is_ipv6 - local_addr, peer_addr are IPv6 if non-zero, otherwise IPv4 + @param state - session state +*/ +define bfd_udp_session_details { + u32 context; + u32 bs_index; + u32 sw_if_index; + u8 local_addr[16]; + u8 peer_addr[16]; + u8 is_ipv6; + u8 state; +}; + +/** \brief Set flags of BFD session + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param bs_index - index of the bfd session to set flags on + @param admin_up_down - set the admin state, 1 = up, 0 = down +*/ +define bfd_session_set_flags { + u32 client_index; + u32 context; + u32 bs_index; + u8 admin_up_down; +}; + +/** \brief Reply to bfd_session_set_flags + @param context - sender context which was passed in the request + @param retval - return code of the set flags request +*/ +define bfd_session_set_flags_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Register for BFD events + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param enable_disable - 1 => register for events, 0 => cancel registration + @param pid - sender's pid +*/ +define want_bfd_events +{ + u32 client_index; + u32 context; + u32 enable_disable; + u32 pid; +}; + +/** \brief Reply for BFD events registration + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define want_bfd_events_reply +{ + u32 context; + i32 retval; +}; + +/* + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vpp/vpp/vpp-api/vpe_all_api_h.h b/vpp/vpp/vpp-api/vpe_all_api_h.h new file mode 100644 index 00000000..98717eaa --- /dev/null +++ b/vpp/vpp/vpp-api/vpe_all_api_h.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* + * Add to the bottom of the #include list, or elves will steal your + * keyboard in the middle of the night! + */ + +/* Include the (first) vlib-api API definition layer */ +#include + +/* Include the (second) vnet API definition layer */ +#define included_from_layer_3 +#include +#undef included_from_layer_3 + +/* Include the current layer (third) vpp API definition layer */ +#include + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vpp/vpp/vpp-api/vpe_msg_enum.h b/vpp/vpp/vpp-api/vpe_msg_enum.h new file mode 100644 index 00000000..1da3ec0c --- /dev/null +++ b/vpp/vpp/vpp-api/vpe_msg_enum.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef included_vpe_msg_enum_h +#define included_vpe_msg_enum_h + +#include + +#define vl_msg_id(n,h) n, +typedef enum +{ + VL_ILLEGAL_MESSAGE_ID = 0, +#include + VL_MSG_FIRST_AVAILABLE, +} vl_msg_id_t; +#undef vl_msg_id + +#endif /* included_vpe_msg_enum_h */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vpp/vpp/vpp-api/vpp_get_metrics.c b/vpp/vpp/vpp-api/vpp_get_metrics.c new file mode 100644 index 00000000..bbfa605a --- /dev/null +++ b/vpp/vpp/vpp-api/vpp_get_metrics.c @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +svmdb_client_t *c; +volatile int signal_received; + +static void +unix_signal_handler (int signum, siginfo_t * si, ucontext_t * uc) +{ + static int once; + + if (once) + exit (1); + + once = 1; + signal_received = 1; +} + +static void +setup_signal_handlers (void) +{ + uword i; + struct sigaction sa; + + for (i = 1; i < 32; i++) + { + memset (&sa, 0, sizeof (sa)); + sa.sa_sigaction = (void *) unix_signal_handler; + sa.sa_flags = SA_SIGINFO; + + switch (i) + { + /* these signals take the default action */ + case SIGABRT: + case SIGKILL: + case SIGSTOP: + case SIGUSR1: + case SIGUSR2: + continue; + + /* ignore SIGPIPE, SIGCHLD */ + case SIGPIPE: + case SIGCHLD: + sa.sa_sigaction = (void *) SIG_IGN; + break; + + /* catch and handle all other signals */ + default: + break; + } + + if (sigaction (i, &sa, 0) < 0) + return clib_unix_warning (0, "sigaction %U", format_signal, i); + } +} + +int +main (int argc, char **argv) +{ + unformat_input_t input; + char *chroot_path = 0; + u8 *chroot_path_u8; + int interval = 0; + f64 *vector_ratep, *rx_ratep, *sig_error_ratep; + pid_t *vpp_pidp; + svmdb_map_args_t _ma, *ma = &_ma; + int uid, gid, rv; + struct passwd _pw, *pw; + struct group _grp, *grp; + char *s, buf[128]; + + unformat_init_command_line (&input, argv); + + uid = geteuid (); + gid = getegid (); + + while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (&input, "chroot %s", &chroot_path_u8)) + { + chroot_path = (char *) chroot_path_u8; + } + else if (unformat (&input, "interval %d", &interval)) + ; + else if (unformat (&input, "uid %d", &uid)) + ; + else if (unformat (&input, "gid %d", &gid)) + ; + else if (unformat (&input, "uid %s", &s)) + { + /* lookup the username */ + pw = NULL; + rv = getpwnam_r (s, &_pw, buf, sizeof (buf), &pw); + if (rv < 0) + { + fformat (stderr, "cannot fetch username %s", s); + exit (1); + } + if (pw == NULL) + { + fformat (stderr, "username %s does not exist", s); + exit (1); + } + vec_free (s); + uid = pw->pw_uid; + } + else if (unformat (&input, "gid %s", &s)) + { + /* lookup the group name */ + grp = NULL; + rv = getgrnam_r (s, &_grp, buf, sizeof (buf), &grp); + if (rv != 0) + { + fformat (stderr, "cannot fetch group %s", s); + exit (1); + } + if (grp == NULL) + { + fformat (stderr, "group %s does not exist", s); + exit (1); + } + vec_free (s); + gid = grp->gr_gid; + } + else + { + fformat (stderr, + "usage: vpp_get_metrics [chroot ] [interval ]\n"); + exit (1); + } + } + + setup_signal_handlers (); + + memset (ma, 0, sizeof (*ma)); + ma->root_path = chroot_path; + ma->uid = uid; + ma->gid = gid; + + c = svmdb_map (ma); + + vpp_pidp = + svmdb_local_get_variable_reference (c, SVMDB_NAMESPACE_VEC, "vpp_pid"); + vector_ratep = + svmdb_local_get_variable_reference (c, SVMDB_NAMESPACE_VEC, + "vpp_vector_rate"); + rx_ratep = + svmdb_local_get_variable_reference (c, SVMDB_NAMESPACE_VEC, + "vpp_input_rate"); + sig_error_ratep = + svmdb_local_get_variable_reference (c, SVMDB_NAMESPACE_VEC, + "vpp_sig_error_rate"); + + /* + * Make sure vpp is actually running. Otherwise, there's every + * chance that the database region will be wiped out by the + * process monitor script + */ + + if (vpp_pidp == 0 || vector_ratep == 0 || rx_ratep == 0 + || sig_error_ratep == 0) + { + fformat (stdout, "vpp not running\n"); + exit (1); + } + + do + { + /* + * Once vpp exits, the svm db region will be recreated... + * Can't use kill (*vpp_pidp, 0) if running as non-root / + * accessing the shared-VM database via group perms. + */ + if (*vpp_pidp == 0) + { + fformat (stdout, "vpp not running\n"); + exit (1); + } + fformat (stdout, + "%d: vpp_vector_rate=%.2f, vpp_input_rate=%f, vpp_sig_error_rate=%f\n", + *vpp_pidp, *vector_ratep, *rx_ratep, *sig_error_ratep); + + if (interval) + sleep (interval); + if (signal_received) + break; + } + while (interval); + + svmdb_unmap (c); + exit (0); +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ -- cgit 1.2.3-korg