summaryrefslogtreecommitdiffstats
path: root/vpp/vpp-api
diff options
context:
space:
mode:
authorDave Barach <dave@barachs.net>2016-06-27 09:25:13 -0400
committerKeith Burns (alagalah) <alagalah@gmail.com>2016-06-27 06:54:32 -0700
commitaa6920e0a80d8271be1dda59f613a1d2b0e1d3e6 (patch)
tree473efa376151e0b7bce525b97f627e6c67ac6efb /vpp/vpp-api
parent20c90f765dd0350892421e1dea544752108f4ce9 (diff)
More janitorial work
Install vpp api header files in /usr/include/vpp-api, instead of /usr/include/api. Someone will eventually complain if we continue with the status quo. Generate /usr/bin/vpp_plugin_configure, to correctly configure standalone plugin compilation against header files installed from the dev package. If a plugin's CFLAGS don't precisely match the installed vpp engine binary, subtle misbehavior can and will occur. Example: the ip4/ip6 main_t structure size / member offsets depend on DPDK=[0|1]. Screw that one up, and your brand-new configurable ip feature will mysteriously fail to appear, even though the plugin loads perfectly. Change-Id: I20c97fe1042808a79935863209d995c31953b98c Signed-off-by: Dave Barach <dave@barachs.net>
Diffstat (limited to 'vpp/vpp-api')
-rw-r--r--vpp/vpp-api/api.c7110
-rw-r--r--vpp/vpp-api/custom_dump.c1951
-rw-r--r--vpp/vpp-api/gmon.c293
-rw-r--r--vpp/vpp-api/summary_stats_client.c279
-rw-r--r--vpp/vpp-api/test_client.c1536
-rw-r--r--vpp/vpp-api/test_ha.c219
-rw-r--r--vpp/vpp-api/vpe.api3954
-rw-r--r--vpp/vpp-api/vpe_all_api_h.h24
-rw-r--r--vpp/vpp-api/vpe_msg_enum.h28
9 files changed, 15394 insertions, 0 deletions
diff --git a/vpp/vpp-api/api.c b/vpp/vpp-api/api.c
new file mode 100644
index 00000000..2ea92df0
--- /dev/null
+++ b/vpp/vpp-api/api.c
@@ -0,0 +1,7110 @@
+/*
+ *------------------------------------------------------------------
+ * api.c - message handler registration
+ *
+ * Copyright (c) 2010 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <netinet/in.h>
+#include <signal.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <time.h>
+#include <fcntl.h>
+#include <string.h>
+#include <pwd.h>
+#include <grp.h>
+
+#include <vppinfra/clib.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/bitmap.h>
+#include <vppinfra/fifo.h>
+#include <vppinfra/time.h>
+#include <vppinfra/mheap.h>
+#include <vppinfra/heap.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/format.h>
+#include <vppinfra/error.h>
+
+#include <vnet/api_errno.h> // alagalah TODO : committers please pay note, is this ok?
+#include <vnet/vnet.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/l2_bd.h>
+#include <vnet/l2tp/l2tp.h>
+#include <vnet/ip/ip.h>
+#include <vnet/unix/tuntap.h>
+#include <vnet/unix/tapcli.h>
+#include <vnet/mpls-gre/mpls.h>
+#include <vnet/dhcp/proxy.h>
+#include <vnet/dhcp/client.h>
+#if IPV6SR > 0
+#include <vnet/sr/sr.h>
+#endif
+#include <vnet/dhcpv6/proxy.h>
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vnet/classify/vnet_classify.h>
+#include <vnet/classify/input_acl.h>
+#include <vnet/l2/l2_classify.h>
+#include <vnet/vxlan/vxlan.h>
+#include <vnet/gre/gre.h>
+#include <vnet/l2/l2_vtr.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/lisp-gpe/lisp_gpe.h>
+#include <vnet/lisp-cp/control.h>
+#include <vnet/map/map.h>
+#include <vnet/cop/cop.h>
+#include <vnet/ip/ip6_hop_by_hop.h>
+#include <vnet/devices/af_packet/af_packet.h>
+#include <vnet/policer/policer.h>
+#include <vnet/devices/netmap/netmap.h>
+
+#undef BIHASH_TYPE
+#undef __included_bihash_template_h__
+#include <vnet/l2/l2_fib.h>
+
+#if IPSEC > 0
+#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/ikev2.h>
+#endif /* IPSEC */
+#if DPDK > 0
+#include <vnet/devices/virtio/vhost-user.h>
+#endif
+
+#include <stats/stats.h>
+#include <oam/oam.h>
+
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ethernet/arp_packet.h>
+#include <vnet/interface.h>
+
+#include <vnet/l2/l2_fib.h>
+#include <vnet/l2/l2_bd.h>
+#include <vpp-api/vpe_msg_enum.h>
+
+#define f64_endian(a)
+#define f64_print(a,b)
+
+#define vl_typedefs /* define message structures */
+#include <vpp-api/vpe_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vpp-api/vpe_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vpp-api/vpe_all_api_h.h>
+#undef vl_printfun
+
+#define REPLY_MACRO(t) \
+do { \
+ unix_shared_memory_queue_t * q; \
+ rv = vl_msg_api_pd_handler (mp, rv); \
+ q = vl_api_client_index_to_input_queue (mp->client_index); \
+ if (!q) \
+ return; \
+ \
+ rmp = vl_msg_api_alloc (sizeof (*rmp)); \
+ rmp->_vl_msg_id = ntohs((t)); \
+ rmp->context = mp->context; \
+ rmp->retval = ntohl(rv); \
+ \
+ vl_msg_api_send_shmem (q, (u8 *)&rmp); \
+} while(0);
+
+#define REPLY_MACRO2(t, body) \
+do { \
+ unix_shared_memory_queue_t * q; \
+ rv = vl_msg_api_pd_handler (mp, rv); \
+ q = vl_api_client_index_to_input_queue (mp->client_index); \
+ if (!q) \
+ return; \
+ \
+ rmp = vl_msg_api_alloc (sizeof (*rmp)); \
+ rmp->_vl_msg_id = ntohs((t)); \
+ rmp->context = mp->context; \
+ rmp->retval = ntohl(rv); \
+ do {body;} while (0); \
+ vl_msg_api_send_shmem (q, (u8 *)&rmp); \
+} while(0);
+
+#if (1 || CLIB_DEBUG > 0) /* "trust, but verify" */
+
+#define VALIDATE_SW_IF_INDEX(mp) \
+ do { u32 __sw_if_index = ntohl(mp->sw_if_index); \
+ vnet_main_t *__vnm = vnet_get_main(); \
+ if (pool_is_free_index(__vnm->interface_main.sw_interfaces, \
+ __sw_if_index)) { \
+ rv = VNET_API_ERROR_INVALID_SW_IF_INDEX; \
+ goto bad_sw_if_index; \
+ } \
+} while(0);
+
+#define BAD_SW_IF_INDEX_LABEL \
+do { \
+bad_sw_if_index: \
+ ; \
+} while (0);
+
+#define VALIDATE_RX_SW_IF_INDEX(mp) \
+ do { u32 __rx_sw_if_index = ntohl(mp->rx_sw_if_index); \
+ vnet_main_t *__vnm = vnet_get_main(); \
+ if (pool_is_free_index(__vnm->interface_main.sw_interfaces, \
+ __rx_sw_if_index)) { \
+ rv = VNET_API_ERROR_INVALID_SW_IF_INDEX; \
+ goto bad_rx_sw_if_index; \
+ } \
+} while(0);
+
+#define BAD_RX_SW_IF_INDEX_LABEL \
+do { \
+bad_rx_sw_if_index: \
+ ; \
+} while (0);
+
+#define VALIDATE_TX_SW_IF_INDEX(mp) \
+ do { u32 __tx_sw_if_index = ntohl(mp->tx_sw_if_index); \
+ vnet_main_t *__vnm = vnet_get_main(); \
+ if (pool_is_free_index(__vnm->interface_main.sw_interfaces, \
+ __tx_sw_if_index)) { \
+ rv = VNET_API_ERROR_INVALID_SW_IF_INDEX; \
+ goto bad_tx_sw_if_index; \
+ } \
+} while(0);
+
+#define BAD_TX_SW_IF_INDEX_LABEL \
+do { \
+bad_tx_sw_if_index: \
+ ; \
+} while (0);
+
+#else
+
+#define VALIDATE_SW_IF_INDEX(mp)
+#define BAD_SW_IF_INDEX_LABEL
+#define VALIDATE_RX_SW_IF_INDEX(mp)
+#define BAD_RX_SW_IF_INDEX_LABEL
+#define VALIDATE_TX_SW_IF_INDEX(mp)
+#define BAD_TX_SW_IF_INDEX_LABEL
+
+#endif /* CLIB_DEBUG > 0 */
+
+#define foreach_vpe_api_msg \
+_(WANT_INTERFACE_EVENTS, want_interface_events) \
+_(WANT_OAM_EVENTS, want_oam_events) \
+_(OAM_ADD_DEL, oam_add_del) \
+_(SW_INTERFACE_DUMP, sw_interface_dump) \
+_(SW_INTERFACE_DETAILS, sw_interface_details) \
+_(SW_INTERFACE_SET_FLAGS, sw_interface_set_flags) \
+_(IP_ADD_DEL_ROUTE, ip_add_del_route) \
+_(IS_ADDRESS_REACHABLE, is_address_reachable) \
+_(SW_INTERFACE_ADD_DEL_ADDRESS, sw_interface_add_del_address) \
+_(SW_INTERFACE_SET_TABLE, sw_interface_set_table) \
+_(SW_INTERFACE_SET_VPATH, sw_interface_set_vpath) \
+_(SW_INTERFACE_SET_L2_XCONNECT, sw_interface_set_l2_xconnect) \
+_(SW_INTERFACE_SET_L2_BRIDGE, sw_interface_set_l2_bridge) \
+_(BRIDGE_DOMAIN_ADD_DEL, bridge_domain_add_del) \
+_(BRIDGE_DOMAIN_DUMP, bridge_domain_dump) \
+_(BRIDGE_DOMAIN_DETAILS, bridge_domain_details) \
+_(BRIDGE_DOMAIN_SW_IF_DETAILS, bridge_domain_sw_if_details) \
+_(L2FIB_ADD_DEL, l2fib_add_del) \
+_(L2_FLAGS, l2_flags) \
+_(BRIDGE_FLAGS, bridge_flags) \
+_(TAP_CONNECT, tap_connect) \
+_(TAP_MODIFY, tap_modify) \
+_(TAP_DELETE, tap_delete) \
+_(SW_INTERFACE_TAP_DUMP, sw_interface_tap_dump) \
+_(CREATE_VLAN_SUBIF, create_vlan_subif) \
+_(CREATE_SUBIF, create_subif) \
+_(MPLS_GRE_ADD_DEL_TUNNEL, mpls_gre_add_del_tunnel) \
+_(MPLS_ETHERNET_ADD_DEL_TUNNEL, mpls_ethernet_add_del_tunnel) \
+_(MPLS_ETHERNET_ADD_DEL_TUNNEL_2, mpls_ethernet_add_del_tunnel_2) \
+_(MPLS_ADD_DEL_ENCAP, mpls_add_del_encap) \
+_(MPLS_ADD_DEL_DECAP, mpls_add_del_decap) \
+_(PROXY_ARP_ADD_DEL, proxy_arp_add_del) \
+_(PROXY_ARP_INTFC_ENABLE_DISABLE, proxy_arp_intfc_enable_disable) \
+_(IP_NEIGHBOR_ADD_DEL, ip_neighbor_add_del) \
+_(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \
+_(RESET_FIB, reset_fib) \
+_(DHCP_PROXY_CONFIG,dhcp_proxy_config) \
+_(DHCP_PROXY_CONFIG_2,dhcp_proxy_config_2) \
+_(DHCP_PROXY_SET_VSS,dhcp_proxy_set_vss) \
+_(DHCP_CLIENT_CONFIG, dhcp_client_config) \
+_(SET_IP_FLOW_HASH,set_ip_flow_hash) \
+_(SW_INTERFACE_IP6ND_RA_CONFIG, sw_interface_ip6nd_ra_config) \
+_(SW_INTERFACE_IP6ND_RA_PREFIX, sw_interface_ip6nd_ra_prefix) \
+_(SW_INTERFACE_IP6_ENABLE_DISABLE, sw_interface_ip6_enable_disable ) \
+_(SW_INTERFACE_IP6_SET_LINK_LOCAL_ADDRESS, \
+ sw_interface_ip6_set_link_local_address) \
+_(SW_INTERFACE_SET_UNNUMBERED, sw_interface_set_unnumbered) \
+_(CREATE_LOOPBACK, create_loopback) \
+_(CONTROL_PING, control_ping) \
+_(CLI_REQUEST, cli_request) \
+_(SET_ARP_NEIGHBOR_LIMIT, set_arp_neighbor_limit) \
+_(L2_PATCH_ADD_DEL, l2_patch_add_del) \
+_(CLASSIFY_ADD_DEL_TABLE, classify_add_del_table) \
+_(CLASSIFY_ADD_DEL_SESSION, classify_add_del_session) \
+_(CLASSIFY_SET_INTERFACE_IP_TABLE, classify_set_interface_ip_table) \
+_(CLASSIFY_SET_INTERFACE_L2_TABLES, classify_set_interface_l2_tables) \
+_(GET_NODE_INDEX, get_node_index) \
+_(ADD_NODE_NEXT, add_node_next) \
+_(L2TPV3_CREATE_TUNNEL, l2tpv3_create_tunnel) \
+_(L2TPV3_SET_TUNNEL_COOKIES, l2tpv3_set_tunnel_cookies) \
+_(L2TPV3_INTERFACE_ENABLE_DISABLE, l2tpv3_interface_enable_disable) \
+_(L2TPV3_SET_LOOKUP_KEY, l2tpv3_set_lookup_key) \
+_(SW_IF_L2TPV3_TUNNEL_DUMP, sw_if_l2tpv3_tunnel_dump) \
+_(VXLAN_ADD_DEL_TUNNEL, vxlan_add_del_tunnel) \
+_(VXLAN_TUNNEL_DUMP, vxlan_tunnel_dump) \
+_(GRE_ADD_DEL_TUNNEL, gre_add_del_tunnel) \
+_(GRE_TUNNEL_DUMP, gre_tunnel_dump) \
+_(L2_FIB_CLEAR_TABLE, l2_fib_clear_table) \
+_(L2_INTERFACE_EFP_FILTER, l2_interface_efp_filter) \
+_(L2_INTERFACE_VLAN_TAG_REWRITE, l2_interface_vlan_tag_rewrite) \
+_(CREATE_VHOST_USER_IF, create_vhost_user_if) \
+_(MODIFY_VHOST_USER_IF, modify_vhost_user_if) \
+_(DELETE_VHOST_USER_IF, delete_vhost_user_if) \
+_(SW_INTERFACE_VHOST_USER_DUMP, sw_interface_vhost_user_dump) \
+_(IP_ADDRESS_DUMP, ip_address_dump) \
+_(IP_DUMP, ip_dump) \
+_(SW_INTERFACE_VHOST_USER_DETAILS, sw_interface_vhost_user_details) \
+_(SHOW_VERSION, show_version) \
+_(L2_FIB_TABLE_DUMP, l2_fib_table_dump) \
+_(L2_FIB_TABLE_ENTRY, l2_fib_table_entry) \
+_(VXLAN_GPE_ADD_DEL_TUNNEL, vxlan_gpe_add_del_tunnel) \
+_(VXLAN_GPE_TUNNEL_DUMP, vxlan_gpe_tunnel_dump) \
+_(INTERFACE_NAME_RENUMBER, interface_name_renumber) \
+_(WANT_IP4_ARP_EVENTS, want_ip4_arp_events) \
+_(INPUT_ACL_SET_INTERFACE, input_acl_set_interface) \
+_(IPSEC_SPD_ADD_DEL, ipsec_spd_add_del) \
+_(IPSEC_INTERFACE_ADD_DEL_SPD, ipsec_interface_add_del_spd) \
+_(IPSEC_SPD_ADD_DEL_ENTRY, ipsec_spd_add_del_entry) \
+_(IPSEC_SAD_ADD_DEL_ENTRY, ipsec_sad_add_del_entry) \
+_(IPSEC_SA_SET_KEY, ipsec_sa_set_key) \
+_(IKEV2_PROFILE_ADD_DEL, ikev2_profile_add_del) \
+_(IKEV2_PROFILE_SET_AUTH, ikev2_profile_set_auth) \
+_(IKEV2_PROFILE_SET_ID, ikev2_profile_set_id) \
+_(IKEV2_PROFILE_SET_TS, ikev2_profile_set_ts) \
+_(IKEV2_SET_LOCAL_KEY, ikev2_set_local_key) \
+_(DELETE_LOOPBACK, delete_loopback) \
+_(BD_IP_MAC_ADD_DEL, bd_ip_mac_add_del) \
+_(MAP_ADD_DOMAIN, map_add_domain) \
+_(MAP_DEL_DOMAIN, map_del_domain) \
+_(MAP_ADD_DEL_RULE, map_add_del_rule) \
+_(MAP_DOMAIN_DUMP, map_domain_dump) \
+_(MAP_RULE_DUMP, map_rule_dump) \
+_(MAP_SUMMARY_STATS, map_summary_stats) \
+_(COP_INTERFACE_ENABLE_DISABLE, cop_interface_enable_disable) \
+_(COP_WHITELIST_ENABLE_DISABLE, cop_whitelist_enable_disable) \
+_(GET_NODE_GRAPH, get_node_graph) \
+_(SW_INTERFACE_CLEAR_STATS, sw_interface_clear_stats) \
+_(TRACE_PROFILE_ADD, trace_profile_add) \
+_(TRACE_PROFILE_APPLY, trace_profile_apply) \
+_(TRACE_PROFILE_DEL, trace_profile_del) \
+_(LISP_ADD_DEL_LOCATOR_SET, lisp_add_del_locator_set) \
+_(LISP_ADD_DEL_LOCATOR, lisp_add_del_locator) \
+_(LISP_ADD_DEL_LOCAL_EID, lisp_add_del_local_eid) \
+_(LISP_GPE_ADD_DEL_FWD_ENTRY, lisp_gpe_add_del_fwd_entry) \
+_(LISP_ADD_DEL_MAP_RESOLVER, lisp_add_del_map_resolver) \
+_(LISP_GPE_ENABLE_DISABLE, lisp_gpe_enable_disable) \
+_(LISP_ENABLE_DISABLE, lisp_enable_disable) \
+_(LISP_GPE_ADD_DEL_IFACE, lisp_gpe_add_del_iface) \
+_(LISP_ADD_DEL_REMOTE_MAPPING, lisp_add_del_remote_mapping) \
+_(LISP_PITR_SET_LOCATOR_SET, lisp_pitr_set_locator_set) \
+_(LISP_EID_TABLE_ADD_DEL_MAP, lisp_eid_table_add_del_map) \
+_(LISP_LOCATOR_SET_DUMP, lisp_locator_set_dump) \
+_(LISP_LOCAL_EID_TABLE_DUMP, lisp_local_eid_table_dump) \
+_(LISP_GPE_TUNNEL_DUMP, lisp_gpe_tunnel_dump) \
+_(LISP_MAP_RESOLVER_DUMP, lisp_map_resolver_dump) \
+_(LISP_EID_TABLE_MAP_DUMP, lisp_eid_table_map_dump) \
+_(LISP_ENABLE_DISABLE_STATUS_DUMP, \
+ lisp_enable_disable_status_dump) \
+_(LISP_ADD_DEL_MAP_REQUEST_ITR_RLOCS, \
+ lisp_add_del_map_request_itr_rlocs) \
+_(LISP_GET_MAP_REQUEST_ITR_RLOCS, lisp_get_map_request_itr_rlocs) \
+_(SR_MULTICAST_MAP_ADD_DEL, sr_multicast_map_add_del) \
+_(AF_PACKET_CREATE, af_packet_create) \
+_(AF_PACKET_DELETE, af_packet_delete) \
+_(POLICER_ADD_DEL, policer_add_del) \
+_(POLICER_DUMP, policer_dump) \
+_(NETMAP_CREATE, netmap_create) \
+_(NETMAP_DELETE, netmap_delete) \
+_(MPLS_GRE_TUNNEL_DUMP, mpls_gre_tunnel_dump) \
+_(MPLS_GRE_TUNNEL_DETAILS, mpls_gre_tunnel_details) \
+_(MPLS_ETH_TUNNEL_DUMP, mpls_eth_tunnel_dump) \
+_(MPLS_ETH_TUNNEL_DETAILS, mpls_eth_tunnel_details) \
+_(MPLS_FIB_ENCAP_DUMP, mpls_fib_encap_dump) \
+_(MPLS_FIB_ENCAP_DETAILS, mpls_fib_encap_details) \
+_(MPLS_FIB_DECAP_DUMP, mpls_fib_decap_dump) \
+_(MPLS_FIB_DECAP_DETAILS, mpls_fib_decap_details) \
+_(CLASSIFY_TABLE_IDS,classify_table_ids) \
+_(CLASSIFY_TABLE_BY_INTERFACE, classify_table_by_interface) \
+_(CLASSIFY_TABLE_INFO,classify_table_info) \
+_(CLASSIFY_SESSION_DUMP,classify_session_dump) \
+_(CLASSIFY_SESSION_DETAILS,classify_session_details)
+
+#define QUOTE_(x) #x
+#define QUOTE(x) QUOTE_(x)
+
+#define foreach_registration_hash \
+_(interface_events) \
+_(to_netconf_server) \
+_(from_netconf_server) \
+_(to_netconf_client) \
+_(from_netconf_client) \
+_(oam_events)
+
+typedef enum {
+ RESOLVE_IP4_ADD_DEL_ROUTE=1,
+ RESOLVE_IP6_ADD_DEL_ROUTE,
+ RESOLVE_MPLS_ETHERNET_ADD_DEL,
+} resolve_t;
+
+typedef struct {
+ u8 resolve_type;
+ union {
+ vl_api_ip_add_del_route_t r;
+ vl_api_mpls_ethernet_add_del_tunnel_2_t t;
+ };
+} pending_route_t;
+
+typedef struct {
+
+#define _(a) uword *a##_registration_hash; \
+ vpe_client_registration_t * a##_registrations;
+foreach_registration_hash
+#undef _
+
+ /* notifications happen really early in the game */
+ u8 link_state_process_up;
+
+ /* ip4 pending route adds */
+ pending_route_t * pending_routes;
+
+ /* ip4 arp event registration pool */
+ vl_api_ip4_arp_event_t * arp_events;
+
+ /* convenience */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} vpe_api_main_t;
+
+static vlib_node_registration_t vpe_resolver_process_node;
+static vpe_api_main_t vpe_api_main;
+
+static void send_sw_interface_flags (vpe_api_main_t * am,
+ unix_shared_memory_queue_t *q,
+ vnet_sw_interface_t * swif);
+static void send_sw_interface_flags_deleted (vpe_api_main_t * am,
+ unix_shared_memory_queue_t *q,
+ u32 sw_if_index);
+
+static int arp_change_delete_callback (u32 pool_index, u8 * notused);
+
+
+/* Clean up all registrations belonging to the indicated client */
+int vl_api_memclnt_delete_callback (u32 client_index)
+{
+ vpe_api_main_t * vam = &vpe_api_main;
+ vpe_client_registration_t *rp;
+ uword * p;
+ int stats_memclnt_delete_callback (u32 client_index);
+
+ stats_memclnt_delete_callback (client_index);
+
+#define _(a) \
+ p = hash_get (vam->a##_registration_hash, client_index); \
+ if (p) { \
+ rp = pool_elt_at_index (vam->a##_registrations, p[0]); \
+ pool_put (vam->a##_registrations, rp); \
+ hash_unset (vam->a##_registration_hash, client_index); \
+ }
+ foreach_registration_hash;
+#undef _
+ return 0;
+}
+
+#define API_LINK_STATE_EVENT 1
+#define API_ADMIN_UP_DOWN_EVENT 2
+
+static int
+event_data_cmp (void * a1, void * a2)
+{
+ uword * e1 = a1;
+ uword * e2 = a2;
+
+ return (word) e1[0] - (word) e2[0];
+}
+
+static uword
+link_state_process (vlib_main_t * vm,
+ vlib_node_runtime_t * rt,
+ vlib_frame_t * f)
+{
+ vpe_api_main_t * vam = &vpe_api_main;
+ vnet_main_t * vnm = vam->vnet_main;
+ vnet_sw_interface_t * swif;
+ uword * event_data = 0;
+ vpe_client_registration_t *reg;
+ int i;
+ u32 prev_sw_if_index;
+ unix_shared_memory_queue_t * q;
+
+ vam->link_state_process_up = 1;
+
+ while (1) {
+ vlib_process_wait_for_event (vm);
+
+ /* Unified list of changed link or admin state sw_if_indices */
+ vlib_process_get_events_with_type
+ (vm, &event_data, API_LINK_STATE_EVENT);
+ vlib_process_get_events_with_type
+ (vm, &event_data, API_ADMIN_UP_DOWN_EVENT);
+
+ /* Sort, so we can eliminate duplicates */
+ vec_sort_with_function (event_data, event_data_cmp);
+
+ prev_sw_if_index = ~0;
+
+ for (i = 0; i < vec_len(event_data); i++) {
+ /* Only one message per swif */
+ if (prev_sw_if_index == event_data[i])
+ continue;
+ prev_sw_if_index = event_data[i];
+
+ pool_foreach(reg, vam->interface_events_registrations,
+ ({
+ q = vl_api_client_index_to_input_queue (reg->client_index);
+ if (q) {
+ // sw_interface may be deleted already
+ if (!pool_is_free_index (vnm->interface_main.sw_interfaces,
+ event_data[i]))
+ {
+ swif = vnet_get_sw_interface (vnm, event_data[i]);
+ send_sw_interface_flags (vam, q, swif);
+ }
+ }
+ }));
+ }
+ vec_reset_length (event_data);
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+link_up_down_function (vnet_main_t *vm, u32 hw_if_index, u32 flags);
+static clib_error_t *
+admin_up_down_function (vnet_main_t *vm, u32 hw_if_index, u32 flags);
+
+VLIB_REGISTER_NODE (link_state_process_node,static) = {
+ .function = link_state_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "vpe-link-state-process",
+};
+
+VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (admin_up_down_function);
+VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION (link_up_down_function);
+
+static clib_error_t *
+link_up_down_function (vnet_main_t *vm, u32 hw_if_index, u32 flags)
+{
+ vpe_api_main_t * vam = &vpe_api_main;
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vm, hw_if_index);
+
+ if (vam->link_state_process_up)
+ vlib_process_signal_event (vam->vlib_main,
+ link_state_process_node.index,
+ API_LINK_STATE_EVENT,
+ hi->sw_if_index);
+ return 0;
+}
+
+static clib_error_t *
+admin_up_down_function (vnet_main_t *vm, u32 sw_if_index, u32 flags)
+{
+ vpe_api_main_t * vam = &vpe_api_main;
+
+ /*
+ * Note: it's perfectly fair to set a subif admin up / admin down.
+ * Note the subtle distinction between this routine and the previous
+ * routine.
+ */
+ if (vam->link_state_process_up)
+ vlib_process_signal_event (vam->vlib_main,
+ link_state_process_node.index,
+ API_ADMIN_UP_DOWN_EVENT,
+ sw_if_index);
+ return 0;
+}
+
+#define pub_sub_handler(lca,UCA) \
+static void vl_api_want_##lca##_t_handler ( \
+ vl_api_want_##lca##_t *mp) \
+{ \
+ vpe_api_main_t *vam = &vpe_api_main; \
+ vpe_client_registration_t *rp; \
+ vl_api_want_##lca##_reply_t *rmp; \
+ uword *p; \
+ i32 rv = 0; \
+ \
+ p = hash_get (vam->lca##_registration_hash, mp->client_index); \
+ if (p) { \
+ if (mp->enable_disable) { \
+ clib_warning ("pid %d: already enabled...", mp->pid); \
+ rv = VNET_API_ERROR_INVALID_REGISTRATION; \
+ goto reply; \
+ } else { \
+ rp = pool_elt_at_index (vam->lca##_registrations, p[0]); \
+ pool_put (vam->lca##_registrations, rp); \
+ hash_unset (vam->lca##_registration_hash, \
+ mp->client_index); \
+ goto reply; \
+ } \
+ } \
+ if (mp->enable_disable == 0) { \
+ clib_warning ("pid %d: already disabled...", mp->pid); \
+ rv = VNET_API_ERROR_INVALID_REGISTRATION; \
+ goto reply; \
+ } \
+ pool_get (vam->lca##_registrations, rp); \
+ rp->client_index = mp->client_index; \
+ rp->client_pid = mp->pid; \
+ hash_set (vam->lca##_registration_hash, rp->client_index, \
+ rp - vam->lca##_registrations); \
+ \
+reply: \
+ REPLY_MACRO (VL_API_WANT_##UCA##_REPLY); \
+}
+
+pub_sub_handler (interface_events,INTERFACE_EVENTS)
+pub_sub_handler (oam_events,OAM_EVENTS)
+
+#define RESOLUTION_EVENT 1
+#define RESOLUTION_PENDING_EVENT 2
+#define IP4_ARP_EVENT 3
+
+static int ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t *mp);
+static int ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t *mp);
+static int mpls_ethernet_add_del_tunnel_2_t_handler
+(vl_api_mpls_ethernet_add_del_tunnel_2_t *mp);
+
+void handle_ip4_arp_event (u32 pool_index)
+{
+ vpe_api_main_t * vam = &vpe_api_main;
+ vnet_main_t * vnm = vam->vnet_main;
+ vlib_main_t * vm = vam->vlib_main;
+ vl_api_ip4_arp_event_t * event;
+ vl_api_ip4_arp_event_t * mp;
+ unix_shared_memory_queue_t * q;
+
+ /* Client can cancel, die, etc. */
+ if (pool_is_free_index (vam->arp_events, pool_index))
+ return;
+
+ event = pool_elt_at_index (vam->arp_events, pool_index);
+
+ q = vl_api_client_index_to_input_queue (event->client_index);
+ if (!q) {
+ (void) vnet_add_del_ip4_arp_change_event
+ (vnm, arp_change_delete_callback,
+ event->pid, &event->address,
+ vpe_resolver_process_node.index, IP4_ARP_EVENT,
+ ~0 /* pool index, notused */, 0 /* is_add */);
+ return;
+ }
+
+ if (q->cursize < q->maxsize) {
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ clib_memcpy (mp, event, sizeof (*mp));
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+ } else {
+ static f64 last_time;
+ /*
+ * Throttle syslog msgs.
+ * It's pretty tempting to just revoke the registration...
+ */
+ if (vlib_time_now (vm) > last_time + 10.0) {
+ clib_warning ("arp event for %U to pid %d: queue stuffed!",
+ format_ip4_address, &event->address, event->pid);
+ last_time = vlib_time_now(vm);
+ }
+ }
+}
+
+static uword
+resolver_process (vlib_main_t * vm,
+ vlib_node_runtime_t * rt,
+ vlib_frame_t * f)
+{
+ uword event_type;
+ uword *event_data = 0;
+ f64 timeout = 100.0;
+ vpe_api_main_t * vam = &vpe_api_main;
+ pending_route_t * pr;
+ vl_api_ip_add_del_route_t * adr;
+ vl_api_mpls_ethernet_add_del_tunnel_2_t *pme;
+ u32 * resolution_failures = 0;
+ int i, rv;
+ clib_error_t * e;
+
+ while (1) {
+ vlib_process_wait_for_event_or_clock (vm, timeout);
+
+ event_type = vlib_process_get_events (vm, &event_data);
+
+ switch (event_type) {
+ case RESOLUTION_PENDING_EVENT:
+ timeout = 1.0;
+ break;
+
+ case RESOLUTION_EVENT:
+ for (i = 0; i < vec_len(event_data); i++) {
+ /*
+ * Resolution events can occur long after the
+ * original request has timed out. $$$ add a cancel
+ * mechanism..
+ */
+ if (pool_is_free_index (vam->pending_routes, event_data[i]))
+ continue;
+
+ pr = pool_elt_at_index (vam->pending_routes, event_data[i]);
+ adr = &pr->r;
+ pme = &pr->t;
+
+ switch (pr->resolve_type) {
+ case RESOLVE_IP4_ADD_DEL_ROUTE:
+ rv = ip4_add_del_route_t_handler (adr);
+ clib_warning ("resolver: add %U/%d via %U %s",
+ format_ip4_address,
+ (ip4_address_t *)&(adr->dst_address),
+ adr->dst_address_length,
+ format_ip4_address,
+ (ip4_address_t *)&(adr->next_hop_address),
+ (rv >= 0) ? "succeeded" : "failed");
+ break;
+
+ case RESOLVE_IP6_ADD_DEL_ROUTE:
+ rv = ip6_add_del_route_t_handler (adr);
+ clib_warning ("resolver: add %U/%d via %U %s",
+ format_ip6_address,
+ (ip6_address_t *)&(adr->dst_address),
+ adr->dst_address_length,
+ format_ip6_address,
+ (ip6_address_t *)&(adr->next_hop_address),
+ (rv >= 0) ? "succeeded" : "failed");
+ break;
+
+ case RESOLVE_MPLS_ETHERNET_ADD_DEL:
+ rv = mpls_ethernet_add_del_tunnel_2_t_handler (pme);
+ clib_warning ("resolver: add mpls-o-e via %U %s",
+ format_ip4_address,
+ (ip4_address_t *)&(pme->next_hop_ip4_address_in_outer_vrf),
+ (rv >= 0) ? "succeeded" : "failed");
+ break;
+
+ default:
+ clib_warning ("resolver: BOGUS TYPE %d", pr->resolve_type);
+ }
+ pool_put (vam->pending_routes, pr);
+ }
+ break;
+
+ case IP4_ARP_EVENT:
+ for (i = 0; i < vec_len(event_data); i++)
+ handle_ip4_arp_event (event_data[i]);
+ break;
+
+ case ~0: /* timeout, retry pending resolutions */
+ pool_foreach (pr, vam->pending_routes,
+ ({
+ int is_adr = 1;
+ adr = &pr->r;
+ pme = &pr->t;
+
+ /* May fail, e.g. due to interface down */
+ switch (pr->resolve_type) {
+ case RESOLVE_IP4_ADD_DEL_ROUTE:
+ e = ip4_probe_neighbor
+ (vm, (ip4_address_t *)&(adr->next_hop_address),
+ ntohl(adr->next_hop_sw_if_index));
+ break;
+
+ case RESOLVE_IP6_ADD_DEL_ROUTE:
+ e = ip6_probe_neighbor
+ (vm, (ip6_address_t *)&(adr->next_hop_address),
+ ntohl(adr->next_hop_sw_if_index));
+ break;
+
+ case RESOLVE_MPLS_ETHERNET_ADD_DEL:
+ is_adr = 0;
+ e = ip4_probe_neighbor
+ (vm,
+ (ip4_address_t *)&(pme->next_hop_ip4_address_in_outer_vrf),
+ pme->resolve_opaque);
+ break;
+
+ default:
+ e = clib_error_return (0, "resolver: BOGUS TYPE %d",
+ pr->resolve_type);
+ }
+ if (e) {
+ clib_error_report (e);
+ if (is_adr)
+ adr->resolve_attempts = 1;
+ else
+ pme->resolve_attempts = 1;
+
+ }
+ if (is_adr) {
+ adr->resolve_attempts -= 1;
+ if (adr->resolve_attempts == 0)
+ vec_add1 (resolution_failures,
+ pr - vam->pending_routes);
+ } else {
+ pme->resolve_attempts -= 1;
+ if (pme->resolve_attempts == 0)
+ vec_add1 (resolution_failures,
+ pr - vam->pending_routes);
+ }
+
+ }));
+ for (i = 0; i < vec_len (resolution_failures); i++) {
+ pr = pool_elt_at_index (vam->pending_routes,
+ resolution_failures[i]);
+ adr = &pr->r;
+ pme = &pr->t;
+
+ switch (pr->resolve_type) {
+ case RESOLVE_IP4_ADD_DEL_ROUTE:
+ clib_warning ("resolver: add %U/%d via %U retry failure",
+ format_ip4_address,
+ (ip4_address_t *)&(adr->dst_address),
+ adr->dst_address_length,
+ format_ip4_address,
+ (ip4_address_t *)&(adr->next_hop_address));
+ break;
+
+ case RESOLVE_IP6_ADD_DEL_ROUTE:
+ clib_warning ("resolver: add %U/%d via %U retry failure",
+ format_ip6_address,
+ (ip6_address_t *)&(adr->dst_address),
+ adr->dst_address_length,
+ format_ip6_address,
+ (ip6_address_t *)&(adr->next_hop_address));
+ break;
+
+ case RESOLVE_MPLS_ETHERNET_ADD_DEL:
+ clib_warning ("resolver: add mpls-o-e via %U retry failure",
+ format_ip4_address,
+ (ip4_address_t *)&(pme->next_hop_ip4_address_in_outer_vrf));
+ break;
+
+ default:
+ clib_warning ("BUG");
+ }
+ pool_put(vam->pending_routes, pr);
+ }
+ vec_reset_length (resolution_failures);
+ break;
+ }
+ if (pool_elts (vam->pending_routes) == 0)
+ timeout = 100.0;
+ vec_reset_length (event_data);
+ }
+ return 0; /* or not */
+}
+
+VLIB_REGISTER_NODE (vpe_resolver_process_node,static) = {
+ .function = resolver_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "vpe-route-resolver-process",
+};
+
+static int ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t *mp)
+{
+ ip4_main_t * im = &ip4_main;
+ ip_lookup_main_t * lm = &im->lookup_main;
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ stats_main_t * sm = &stats_main;
+ ip4_add_del_route_args_t a;
+ ip4_address_t next_hop_address;
+ u32 fib_index;
+ vpe_api_main_t * vam = &vpe_api_main;
+ vnet_main_t * vnm = vam->vnet_main;
+ vlib_main_t * vm = vlib_get_main();
+ pending_route_t * pr;
+ vl_api_ip_add_del_route_t * adr;
+ uword * p;
+ clib_error_t * e;
+ u32 ai;
+ ip_adjacency_t *nh_adj, *add_adj = 0;
+
+ p = hash_get (im->fib_index_by_table_id, ntohl(mp->vrf_id));
+ if (!p) {
+ if (mp->create_vrf_if_needed) {
+ ip4_fib_t * f;
+ f = find_ip4_fib_by_table_index_or_id (im, ntohl(mp->vrf_id),
+ 0 /* flags */);
+ fib_index = f->index;
+ } else {
+ /* No such VRF, and we weren't asked to create one */
+ return VNET_API_ERROR_NO_SUCH_FIB;
+ }
+ } else {
+ fib_index = p[0];
+ }
+
+ if (pool_is_free_index (vnm->interface_main.sw_interfaces,
+ ntohl(mp->next_hop_sw_if_index)))
+ return VNET_API_ERROR_NO_MATCHING_INTERFACE;
+
+ clib_memcpy (next_hop_address.data, mp->next_hop_address,
+ sizeof (next_hop_address.data));
+
+ /* Arp for the next_hop if necessary */
+ if (mp->is_add && mp->resolve_if_needed) {
+ u32 lookup_result;
+ ip_adjacency_t * adj;
+
+ lookup_result = ip4_fib_lookup_with_table
+ (im, fib_index, &next_hop_address, 1 /* disable default route */);
+
+ adj = ip_get_adjacency (lm, lookup_result);
+
+ if (adj->lookup_next_index == IP_LOOKUP_NEXT_ARP) {
+ pool_get (vam->pending_routes, pr);
+ pr->resolve_type = RESOLVE_IP4_ADD_DEL_ROUTE;
+ adr = &pr->r;
+ clib_memcpy (adr, mp, sizeof (*adr));
+ /* recursion block, "just in case" */
+ adr->resolve_if_needed = 0;
+ adr->resolve_attempts = ntohl(mp->resolve_attempts);
+ vnet_register_ip4_arp_resolution_event
+ (vnm, &next_hop_address, vpe_resolver_process_node.index,
+ RESOLUTION_EVENT, pr - vam->pending_routes);
+
+ vlib_process_signal_event
+ (vm, vpe_resolver_process_node.index,
+ RESOLUTION_PENDING_EVENT, 0 /* data */);
+
+ /* The interface may be down, etc. */
+ e = ip4_probe_neighbor
+ (vm, (ip4_address_t *)&(mp->next_hop_address),
+ ntohl(mp->next_hop_sw_if_index));
+
+ if (e)
+ clib_error_report(e);
+
+ return VNET_API_ERROR_IN_PROGRESS;
+ }
+ }
+
+ if (mp->is_multipath) {
+ u32 flags;
+
+ dslock (sm, 1 /* release hint */, 10 /* tag */);
+
+ if (mp->is_add)
+ flags = IP4_ROUTE_FLAG_ADD;
+ else
+ flags = IP4_ROUTE_FLAG_DEL;
+
+ if (mp->not_last)
+ flags |= IP4_ROUTE_FLAG_NOT_LAST_IN_GROUP;
+
+ ip4_add_del_route_next_hop (im, flags,
+ (ip4_address_t *) mp->dst_address,
+ (u32) mp->dst_address_length,
+ (ip4_address_t *) mp->next_hop_address,
+ ntohl(mp->next_hop_sw_if_index),
+ (u32) mp->next_hop_weight,
+ ~0 /* adj_index */,
+ fib_index);
+ dsunlock(sm);
+ return 0;
+ }
+
+ memset (&a, 0, sizeof (a));
+ clib_memcpy (a.dst_address.data, mp->dst_address, sizeof (a.dst_address.data));
+
+ a.dst_address_length = mp->dst_address_length;
+
+ a.flags = (mp->is_add ? IP4_ROUTE_FLAG_ADD : IP4_ROUTE_FLAG_DEL);
+ a.flags |= IP4_ROUTE_FLAG_FIB_INDEX;
+ a.table_index_or_table_id = fib_index;
+ a.add_adj = 0;
+ a.n_add_adj = 0;
+
+ if (mp->not_last)
+ a.flags |= IP4_ROUTE_FLAG_NOT_LAST_IN_GROUP;
+
+ dslock (sm, 1 /* release hint */, 2 /* tag */);
+
+ if (mp->is_add) {
+ if (mp->is_drop)
+ ai = lm->drop_adj_index;
+ else if (mp->is_local)
+ ai = lm->local_adj_index;
+ else if (mp->is_classify) {
+ ip_adjacency_t cadj;
+ memset(&cadj, 0, sizeof(cadj));
+ cadj.lookup_next_index = IP_LOOKUP_NEXT_CLASSIFY;
+ cadj.classify.table_index = ntohl(mp->classify_table_index);
+ if (pool_is_free_index (cm->tables, cadj.classify.table_index)) {
+ dsunlock(sm);
+ return VNET_API_ERROR_NO_SUCH_TABLE;
+ }
+ vec_add1 (add_adj, cadj);
+ goto do_add_del;
+ }
+ else {
+ ai = ip4_fib_lookup_with_table
+ (im, fib_index, &next_hop_address,
+ 1 /* disable default route */);
+ if (ai == lm->miss_adj_index) {
+ dsunlock(sm);
+ return VNET_API_ERROR_NEXT_HOP_NOT_IN_FIB;
+ }
+ }
+
+ nh_adj = ip_get_adjacency (lm, ai);
+ if (nh_adj->lookup_next_index == IP_LOOKUP_NEXT_ARP &&
+ nh_adj->arp.next_hop.ip4.as_u32 == 0) {
+ /* the next-hop resovles via a glean adj. create and use
+ * a ARP adj for the next-hop */
+ a.adj_index = vnet_arp_glean_add(fib_index, &next_hop_address);
+ a.add_adj = NULL;
+ a.n_add_adj = 0;
+ ip4_add_del_route (im, &a);
+
+ goto done;
+ }
+ vec_add1 (add_adj, nh_adj[0]);
+ if (mp->lookup_in_vrf) {
+ p = hash_get (im->fib_index_by_table_id, ntohl(mp->lookup_in_vrf));
+ if (p)
+ add_adj[0].explicit_fib_index = p[0];
+ else {
+ vec_free (add_adj);
+ dsunlock(sm);
+ return VNET_API_ERROR_NO_SUCH_INNER_FIB;
+ }
+ }
+ } else {
+ ip_adjacency_t * adj;
+ int disable_default_route = 1;
+
+ /* Trying to delete the default route? */
+ if (a.dst_address.as_u32 == 0 &&
+ a.dst_address_length == 0)
+ disable_default_route = 0;
+
+ ai = ip4_fib_lookup_with_table
+ (im, fib_index, &a.dst_address, disable_default_route);
+ if (ai == lm->miss_adj_index) {
+ dsunlock(sm);
+ return VNET_API_ERROR_UNKNOWN_DESTINATION;
+ }
+
+ adj = ip_get_adjacency (lm, ai);
+ if (adj->lookup_next_index == IP_LOOKUP_NEXT_ARP) {
+ dsunlock(sm);
+ return VNET_API_ERROR_ADDRESS_MATCHES_INTERFACE_ADDRESS;
+ }
+ }
+
+do_add_del:
+ a.adj_index = ~0;
+ a.add_adj = add_adj;
+ a.n_add_adj = vec_len(add_adj);
+ ip4_add_del_route (im, &a);
+
+ vec_free (add_adj);
+
+done:
+ dsunlock (sm);
+ return 0;
+}
+
+static int ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t *mp)
+{
+ ip6_main_t * im = &ip6_main;
+ ip_lookup_main_t * lm = &im->lookup_main;
+ vnet_main_t * vnm = vnet_get_main();
+ vlib_main_t * vm = vlib_get_main();
+ vpe_api_main_t * vam = &vpe_api_main;
+ stats_main_t * sm = &stats_main;
+ ip6_add_del_route_args_t a;
+ ip6_address_t next_hop_address;
+ pending_route_t * pr;
+ vl_api_ip_add_del_route_t * adr;
+
+ u32 fib_index;
+ uword * p;
+ clib_error_t * e;
+ ip_adjacency_t * nh_adj, * add_adj = 0;
+ u32 ai;
+
+ p = hash_get (im->fib_index_by_table_id, ntohl(mp->vrf_id));
+
+ if (!p) {
+ if (mp->create_vrf_if_needed) {
+ ip6_fib_t * f;
+ f = find_ip6_fib_by_table_index_or_id (im, ntohl(mp->vrf_id),
+ 0 /* flags */);
+ fib_index = f->index;
+ } else {
+ /* No such VRF, and we weren't asked to create one */
+ return VNET_API_ERROR_NO_SUCH_FIB;
+ }
+ } else {
+ fib_index = p[0];
+ }
+
+ if (pool_is_free_index (vnm->interface_main.sw_interfaces,
+ ntohl(mp->next_hop_sw_if_index)))
+ return VNET_API_ERROR_NO_MATCHING_INTERFACE;
+
+ clib_memcpy (next_hop_address.as_u8, mp->next_hop_address,
+ sizeof (next_hop_address.as_u8));
+
+ /* Arp for the next_hop if necessary */
+ if (mp->is_add && mp->resolve_if_needed) {
+ u32 lookup_result;
+ ip_adjacency_t * adj;
+
+ lookup_result = ip6_fib_lookup_with_table
+ (im, fib_index, &next_hop_address);
+
+ adj = ip_get_adjacency (lm, lookup_result);
+
+ if (adj->lookup_next_index == IP_LOOKUP_NEXT_ARP) {
+ pool_get (vam->pending_routes, pr);
+ adr = &pr->r;
+ pr->resolve_type = RESOLVE_IP6_ADD_DEL_ROUTE;
+ clib_memcpy (adr, mp, sizeof (*adr));
+ /* recursion block, "just in case" */
+ adr->resolve_if_needed = 0;
+ adr->resolve_attempts = ntohl(mp->resolve_attempts);
+ vnet_register_ip6_neighbor_resolution_event
+ (vnm, &next_hop_address, vpe_resolver_process_node.index,
+ RESOLUTION_EVENT, pr - vam->pending_routes);
+
+ vlib_process_signal_event
+ (vm, vpe_resolver_process_node.index,
+ RESOLUTION_PENDING_EVENT, 0 /* data */);
+
+ /* The interface may be down, etc. */
+ e = ip6_probe_neighbor
+ (vm, (ip6_address_t *)&(mp->next_hop_address),
+ ntohl(mp->next_hop_sw_if_index));
+
+ if (e)
+ clib_error_report(e);
+
+ return VNET_API_ERROR_IN_PROGRESS;
+ }
+ }
+
+ if (mp->is_multipath) {
+ u32 flags;
+
+ dslock (sm, 1 /* release hint */, 11 /* tag */);
+
+ if (mp->is_add)
+ flags = IP6_ROUTE_FLAG_ADD;
+ else
+ flags = IP6_ROUTE_FLAG_DEL;
+
+ if (mp->not_last)
+ flags |= IP6_ROUTE_FLAG_NOT_LAST_IN_GROUP;
+
+ ip6_add_del_route_next_hop (im, flags, (ip6_address_t *)mp->dst_address,
+ (u32) mp->dst_address_length,
+ (ip6_address_t *)mp->next_hop_address,
+ ntohl(mp->next_hop_sw_if_index),
+ (u32) mp->next_hop_weight,
+ ~0 /* adj_index */,
+ fib_index);
+ dsunlock(sm);
+ return 0;
+ }
+
+ memset (&a, 0, sizeof (a));
+ clib_memcpy (a.dst_address.as_u8, mp->dst_address, sizeof (a.dst_address.as_u8));
+
+ a.dst_address_length = mp->dst_address_length;
+
+ a.flags = (mp->is_add ? IP6_ROUTE_FLAG_ADD : IP6_ROUTE_FLAG_DEL);
+ a.flags |= IP6_ROUTE_FLAG_FIB_INDEX;
+ a.table_index_or_table_id = fib_index;
+ a.add_adj = 0;
+ a.n_add_adj = 0;
+
+ if (mp->not_last)
+ a.flags |= IP6_ROUTE_FLAG_NOT_LAST_IN_GROUP;
+
+ dslock (sm, 1 /* release hint */, 3 /* tag */);
+
+ if (mp->is_add) {
+ if (mp->is_drop)
+ ai = lm->drop_adj_index;
+ else if (mp->is_local)
+ ai = lm->local_adj_index;
+ else {
+ ai = ip6_fib_lookup_with_table
+ (im, fib_index, &next_hop_address);
+ if (ai == lm->miss_adj_index) {
+ dsunlock(sm);
+ return VNET_API_ERROR_NEXT_HOP_NOT_IN_FIB;
+ }
+ }
+
+ nh_adj = ip_get_adjacency (lm, ai);
+ vec_add1 (add_adj, nh_adj[0]);
+ if (mp->lookup_in_vrf) {
+ p = hash_get (im->fib_index_by_table_id, ntohl(mp->lookup_in_vrf));
+ if (p)
+ add_adj[0].explicit_fib_index = p[0];
+ else {
+ vec_free (add_adj);
+ dsunlock(sm);
+ return VNET_API_ERROR_NO_SUCH_INNER_FIB;
+ }
+ }
+ } else {
+ ip_adjacency_t * adj;
+
+ ai = ip6_fib_lookup_with_table
+ (im, fib_index, &a.dst_address);
+ if (ai == lm->miss_adj_index) {
+ dsunlock(sm);
+ return VNET_API_ERROR_UNKNOWN_DESTINATION;
+ }
+ adj = ip_get_adjacency (lm, ai);
+ if (adj->lookup_next_index == IP_LOOKUP_NEXT_ARP) {
+ dsunlock(sm);
+ return VNET_API_ERROR_ADDRESS_MATCHES_INTERFACE_ADDRESS;
+ }
+ }
+
+ a.adj_index = ~0;
+ a.add_adj = add_adj;
+ a.n_add_adj = vec_len(add_adj);
+ ip6_add_del_route (im, &a);
+
+ vec_free (add_adj);
+
+ dsunlock (sm);
+ return 0;
+}
+
+void vl_api_ip_add_del_route_t_handler (
+ vl_api_ip_add_del_route_t *mp)
+{
+ vl_api_ip_add_del_route_reply_t * rmp;
+ int rv;
+ vnet_main_t * vnm = vnet_get_main();
+
+ vnm->api_errno = 0;
+
+ if (mp->is_ipv6)
+ rv = ip6_add_del_route_t_handler (mp);
+ else
+ rv = ip4_add_del_route_t_handler (mp);
+
+ rv = (rv == 0) ? vnm->api_errno : rv;
+
+ REPLY_MACRO(VL_API_IP_ADD_DEL_ROUTE_REPLY);
+}
+
+void api_config_default_ip_route (u8 is_ipv6, u8 is_add, u32 vrf_id,
+ u32 sw_if_index, u8 *next_hop_addr)
+{
+ vl_api_ip_add_del_route_t mp;
+ int rv;
+
+ memset (&mp, 0, sizeof(vl_api_ip_add_del_route_t));
+
+ /*
+ * Configure default IP route:
+ * - ip route add 0.0.0.0/1 via <GW IP>
+ * - ip route add 128.0.0.0/1 via <GW IP>
+ */
+ mp.next_hop_sw_if_index = ntohl(sw_if_index);
+ mp.vrf_id = vrf_id;
+ mp.resolve_attempts = ~0;
+ mp.resolve_if_needed = 1;
+ mp.is_add = is_add;
+ mp.is_ipv6 = is_ipv6;
+ mp.next_hop_weight = 1;
+
+ clib_memcpy (&mp.next_hop_address[0], next_hop_addr, 16);
+
+ if (is_ipv6)
+ rv = ip6_add_del_route_t_handler (&mp);
+ else
+ {
+ mp.dst_address_length = 1;
+
+ mp.dst_address[0] = 0;
+ rv = ip4_add_del_route_t_handler (&mp);
+
+ mp.dst_address[0] = 128;
+ rv |= ip4_add_del_route_t_handler (&mp);
+ }
+
+ if (rv)
+ clib_error_return (0, "failed to config default IP route");
+
+}
+
+static void
+vl_api_sw_interface_add_del_address_t_handler
+(vl_api_sw_interface_add_del_address_t *mp)
+{
+ vlib_main_t *vm = vlib_get_main();
+ vl_api_sw_interface_add_del_address_reply_t * rmp;
+ int rv = 0;
+ u32 is_del;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ is_del = mp->is_add == 0;
+
+ if (mp->del_all)
+ ip_del_all_interface_addresses (vm, ntohl(mp->sw_if_index));
+ else if (mp->is_ipv6)
+ ip6_add_del_interface_address (vm, ntohl(mp->sw_if_index),
+ (void *)mp->address,
+ mp->address_length, is_del);
+ else
+ ip4_add_del_interface_address (vm, ntohl(mp->sw_if_index),
+ (void *) mp->address,
+ mp->address_length, is_del);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO(VL_API_SW_INTERFACE_ADD_DEL_ADDRESS_REPLY);
+}
+
+static void
+vl_api_sw_interface_set_table_t_handler (vl_api_sw_interface_set_table_t *mp)
+{
+ int rv = 0;
+ u32 table_id = ntohl(mp->vrf_id);
+ u32 sw_if_index = ntohl(mp->sw_if_index);
+ vl_api_sw_interface_set_table_reply_t * rmp;
+ stats_main_t * sm = &stats_main;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ dslock (sm, 1 /* release hint */, 4 /* tag */);
+
+ if (mp->is_ipv6) {
+ ip6_main_t * im = &ip6_main;
+ ip6_fib_t * fib =
+ find_ip6_fib_by_table_index_or_id (im, table_id,
+ IP6_ROUTE_FLAG_TABLE_ID);
+ if (fib) {
+ vec_validate (im->fib_index_by_sw_if_index, sw_if_index);
+ im->fib_index_by_sw_if_index[sw_if_index] = fib->index;
+ } else {
+ rv = VNET_API_ERROR_NO_SUCH_FIB;
+ }
+ } else {
+ ip4_main_t * im = &ip4_main;
+ ip4_fib_t * fib = find_ip4_fib_by_table_index_or_id
+ (im, table_id, IP4_ROUTE_FLAG_TABLE_ID);
+
+ /* Truthfully this can't fail */
+ if (fib) {
+ vec_validate (im->fib_index_by_sw_if_index, sw_if_index);
+ im->fib_index_by_sw_if_index[sw_if_index] = fib->index;
+ } else {
+ rv = VNET_API_ERROR_NO_SUCH_FIB;
+ }
+ }
+ dsunlock(sm);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO(VL_API_SW_INTERFACE_SET_TABLE_REPLY);
+}
+
+static void
+vl_api_sw_interface_set_vpath_t_handler (vl_api_sw_interface_set_vpath_t *mp)
+{
+ vlib_main_t *vm = vlib_get_main();
+ ip4_main_t * im4 = &ip4_main;
+ ip6_main_t * im6 = &ip6_main;
+ vl_api_sw_interface_set_vpath_reply_t * rmp;
+ int rv = 0;
+ u32 ci;
+ u32 sw_if_index = ntohl(mp->sw_if_index);
+ ip4_main_t *ip4m = &ip4_main;
+ ip6_main_t *ip6m = &ip6_main;
+ ip_lookup_main_t *ip4lm = &ip4m->lookup_main;
+ ip_lookup_main_t *ip6lm = &ip6m->lookup_main;
+ ip_config_main_t *rx_cm4u = &ip4lm->rx_config_mains[VNET_UNICAST];
+ ip_config_main_t *rx_cm4m = &ip4lm->rx_config_mains[VNET_MULTICAST];
+ ip_config_main_t *rx_cm6u = &ip6lm->rx_config_mains[VNET_UNICAST];
+ ip_config_main_t *rx_cm6m = &ip6lm->rx_config_mains[VNET_MULTICAST];
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ l2input_intf_bitmap_enable(sw_if_index, L2INPUT_FEAT_VPATH, mp->enable);
+ if (mp->enable) {
+ ci = rx_cm4u->config_index_by_sw_if_index[sw_if_index]; //IP4 unicast
+ ci = vnet_config_add_feature(vm, &rx_cm4u->config_main,
+ ci,
+ im4->ip4_unicast_rx_feature_vpath,
+ 0, 0);
+ rx_cm4u->config_index_by_sw_if_index[sw_if_index] = ci;
+ ci = rx_cm4m->config_index_by_sw_if_index[sw_if_index]; //IP4 mcast
+ ci = vnet_config_add_feature(vm, &rx_cm4m->config_main,
+ ci,
+ im4->ip4_multicast_rx_feature_vpath,
+ 0, 0);
+ rx_cm4m->config_index_by_sw_if_index[sw_if_index] = ci;
+ ci = rx_cm6u->config_index_by_sw_if_index[sw_if_index]; //IP6 unicast
+ ci = vnet_config_add_feature(vm, &rx_cm6u->config_main,
+ ci,
+ im6->ip6_unicast_rx_feature_vpath,
+ 0, 0);
+ rx_cm6u->config_index_by_sw_if_index[sw_if_index] = ci;
+ ci = rx_cm6m->config_index_by_sw_if_index[sw_if_index]; //IP6 mcast
+ ci = vnet_config_add_feature(vm, &rx_cm6m->config_main,
+ ci,
+ im6->ip6_multicast_rx_feature_vpath,
+ 0, 0);
+ rx_cm6m->config_index_by_sw_if_index[sw_if_index] = ci;
+ } else {
+ ci = rx_cm4u->config_index_by_sw_if_index[sw_if_index]; //IP4 unicast
+ ci = vnet_config_del_feature(vm, &rx_cm4u->config_main,
+ ci,
+ im4->ip4_unicast_rx_feature_vpath,
+ 0, 0);
+ rx_cm4u->config_index_by_sw_if_index[sw_if_index] = ci;
+ ci = rx_cm4m->config_index_by_sw_if_index[sw_if_index]; //IP4 mcast
+ ci = vnet_config_del_feature(vm, &rx_cm4m->config_main,
+ ci,
+ im4->ip4_multicast_rx_feature_vpath,
+ 0, 0);
+ rx_cm4m->config_index_by_sw_if_index[sw_if_index] = ci;
+ ci = rx_cm6u->config_index_by_sw_if_index[sw_if_index]; //IP6 unicast
+ ci = vnet_config_del_feature(vm, &rx_cm6u->config_main,
+ ci,
+ im6->ip6_unicast_rx_feature_vpath,
+ 0, 0);
+ rx_cm6u->config_index_by_sw_if_index[sw_if_index] = ci;
+ ci = rx_cm6m->config_index_by_sw_if_index[sw_if_index]; //IP6 mcast
+ ci = vnet_config_del_feature(vm, &rx_cm6m->config_main,
+ ci,
+ im6->ip6_multicast_rx_feature_vpath,
+ 0, 0);
+ rx_cm6m->config_index_by_sw_if_index[sw_if_index] = ci;
+ }
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO(VL_API_SW_INTERFACE_SET_VPATH_REPLY);
+}
+
+static void
+vl_api_sw_interface_set_l2_xconnect_t_handler (
+ vl_api_sw_interface_set_l2_xconnect_t *mp)
+{
+ vl_api_sw_interface_set_l2_xconnect_reply_t * rmp;
+ int rv = 0;
+ u32 rx_sw_if_index = ntohl(mp->rx_sw_if_index);
+ u32 tx_sw_if_index = ntohl(mp->tx_sw_if_index);
+ vlib_main_t *vm = vlib_get_main();
+ vnet_main_t *vnm = vnet_get_main();
+
+ VALIDATE_RX_SW_IF_INDEX(mp);
+
+ if (mp->enable) {
+ VALIDATE_TX_SW_IF_INDEX(mp);
+ rv = set_int_l2_mode(vm, vnm, MODE_L2_XC,
+ rx_sw_if_index, 0, 0, 0, tx_sw_if_index);
+ } else {
+ rv = set_int_l2_mode(vm, vnm, MODE_L3, rx_sw_if_index, 0, 0, 0, 0);
+ }
+
+ BAD_RX_SW_IF_INDEX_LABEL;
+ BAD_TX_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO(VL_API_SW_INTERFACE_SET_L2_XCONNECT_REPLY);
+}
+
+static void
+vl_api_sw_interface_set_l2_bridge_t_handler (
+ vl_api_sw_interface_set_l2_bridge_t *mp)
+{
+ bd_main_t * bdm = &bd_main;
+ vl_api_sw_interface_set_l2_bridge_reply_t * rmp;
+ int rv = 0;
+ u32 rx_sw_if_index = ntohl(mp->rx_sw_if_index);
+ u32 bd_id = ntohl(mp->bd_id);
+ u32 bd_index;
+ u32 bvi = mp->bvi;
+ u8 shg = mp->shg;
+ vlib_main_t *vm = vlib_get_main();
+ vnet_main_t *vnm = vnet_get_main();
+
+ VALIDATE_RX_SW_IF_INDEX(mp);
+
+ bd_index = bd_find_or_add_bd_index (bdm, bd_id);
+
+ if (mp->enable) {
+ //VALIDATE_TX_SW_IF_INDEX(mp);
+ rv = set_int_l2_mode(vm, vnm, MODE_L2_BRIDGE,
+ rx_sw_if_index, bd_index, bvi, shg, 0);
+ } else {
+ rv = set_int_l2_mode(vm, vnm, MODE_L3, rx_sw_if_index, 0, 0, 0, 0);
+ }
+
+ BAD_RX_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO(VL_API_SW_INTERFACE_SET_L2_BRIDGE_REPLY);
+}
+
+static void
+vl_api_bridge_domain_add_del_t_handler (
+ vl_api_bridge_domain_add_del_t *mp)
+{
+ vlib_main_t * vm = vlib_get_main ();
+ bd_main_t * bdm = &bd_main;
+ vl_api_bridge_domain_add_del_reply_t * rmp;
+ int rv = 0;
+ u32 enable_flags = 0, disable_flags = 0;
+ u32 bd_id = ntohl(mp->bd_id);
+ u32 bd_index;
+
+ if (mp->is_add) {
+ bd_index = bd_find_or_add_bd_index (bdm, bd_id);
+
+ if (mp->flood)
+ enable_flags |= L2_FLOOD;
+ else
+ disable_flags |= L2_FLOOD;
+
+ if (mp->uu_flood)
+ enable_flags |= L2_UU_FLOOD;
+ else
+ disable_flags |= L2_UU_FLOOD;
+
+ if (mp->forward)
+ enable_flags |= L2_FWD;
+ else
+ disable_flags |= L2_FWD;
+
+ if (mp->arp_term)
+ enable_flags |= L2_ARP_TERM;
+ else
+ disable_flags |= L2_ARP_TERM;
+
+ if (mp->learn)
+ enable_flags |= L2_LEARN;
+ else
+ disable_flags |= L2_LEARN;
+
+ if (enable_flags)
+ bd_set_flags (vm, bd_index, enable_flags, 1 /* enable */);
+
+ if (disable_flags)
+ bd_set_flags (vm, bd_index, disable_flags, 0 /* disable */);
+
+ } else
+ rv = bd_delete_bd_index(bdm, bd_id);
+
+ REPLY_MACRO(VL_API_BRIDGE_DOMAIN_ADD_DEL_REPLY);
+}
+
+static void vl_api_bridge_domain_details_t_handler (
+ vl_api_bridge_domain_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void vl_api_bridge_domain_sw_if_details_t_handler (
+ vl_api_bridge_domain_sw_if_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void send_bridge_domain_details (unix_shared_memory_queue_t *q,
+ l2_bridge_domain_t * bd_config,
+ u32 n_sw_ifs,
+ u32 context)
+{
+ vl_api_bridge_domain_details_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_BRIDGE_DOMAIN_DETAILS);
+ mp->bd_id = ntohl (bd_config->bd_id);
+ mp->flood = bd_feature_flood (bd_config);
+ mp->uu_flood = bd_feature_uu_flood (bd_config);
+ mp->forward = bd_feature_forward (bd_config);
+ mp->learn = bd_feature_learn (bd_config);
+ mp->arp_term = bd_feature_arp_term (bd_config);
+ mp->bvi_sw_if_index = ntohl (bd_config->bvi_sw_if_index);
+ mp->n_sw_ifs = ntohl (n_sw_ifs);
+ mp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void send_bd_sw_if_details (l2input_main_t * l2im,
+ unix_shared_memory_queue_t *q,
+ l2_flood_member_t * member, u32 bd_id,
+ u32 context)
+{
+ vl_api_bridge_domain_sw_if_details_t * mp;
+ l2_input_config_t * input_cfg;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_BRIDGE_DOMAIN_SW_IF_DETAILS);
+ mp->bd_id = ntohl (bd_id);
+ mp->sw_if_index = ntohl (member->sw_if_index);
+ input_cfg = vec_elt_at_index (l2im->configs, member->sw_if_index);
+ mp->shg = input_cfg->shg;
+ mp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void vl_api_bridge_domain_dump_t_handler (
+ vl_api_bridge_domain_dump_t *mp)
+{
+ bd_main_t * bdm = &bd_main;
+ l2input_main_t * l2im = &l2input_main;
+ unix_shared_memory_queue_t * q;
+ l2_bridge_domain_t * bd_config;
+ u32 bd_id, bd_index;
+ u32 end;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+
+ if (q == 0)
+ return;
+
+ bd_id = ntohl(mp->bd_id);
+
+ bd_index = (bd_id == ~0) ? 0 : bd_find_or_add_bd_index (bdm, bd_id);
+ end = (bd_id == ~0) ? vec_len (l2im->bd_configs) : bd_index + 1;
+ for (; bd_index < end; bd_index++) {
+ bd_config = l2input_bd_config_from_index (l2im, bd_index);
+ /* skip dummy bd_id 0 */
+ if (bd_config && (bd_config->bd_id > 0)) {
+ u32 n_sw_ifs;
+ l2_flood_member_t * m;
+
+ n_sw_ifs = vec_len (bd_config->members);
+ send_bridge_domain_details (q, bd_config, n_sw_ifs, mp->context);
+
+ vec_foreach (m, bd_config->members) {
+ send_bd_sw_if_details (l2im, q, m, bd_config->bd_id, mp->context);
+ }
+ }
+ }
+}
+
+static void
+vl_api_l2fib_add_del_t_handler (
+ vl_api_l2fib_add_del_t *mp)
+{
+ bd_main_t * bdm = &bd_main;
+ l2input_main_t * l2im = &l2input_main;
+ vl_api_l2fib_add_del_reply_t * rmp;
+ int rv = 0;
+ u64 mac = 0;
+ u32 sw_if_index = ntohl(mp->sw_if_index);
+ u32 bd_id = ntohl(mp->bd_id);
+ u32 bd_index;
+ u32 static_mac;
+ u32 filter_mac;
+ uword * p;
+
+ mac = mp->mac;
+
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+ if (!p) {
+ rv = VNET_API_ERROR_NO_SUCH_ENTRY;
+ goto bad_sw_if_index;
+ }
+ bd_index = p[0];
+
+ if (mp->is_add) {
+ VALIDATE_SW_IF_INDEX(mp);
+ if (vec_len(l2im->configs) <= sw_if_index) {
+ rv = VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ goto bad_sw_if_index;
+ } else {
+ l2_input_config_t * config;
+ config = vec_elt_at_index(l2im->configs, sw_if_index);
+ if (config->bridge == 0) {
+ rv = VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ goto bad_sw_if_index;
+ }
+ }
+ static_mac = mp->static_mac ? 1 : 0;
+ filter_mac = mp->filter_mac ? 1 : 0;
+ l2fib_add_entry(mac, bd_index, sw_if_index, static_mac, filter_mac,
+ 0 /* bvi_mac */);
+ } else {
+ l2fib_del_entry(mac, bd_index);
+ }
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO(VL_API_L2FIB_ADD_DEL_REPLY);
+}
+
+static void
+vl_api_l2_flags_t_handler (
+ vl_api_l2_flags_t *mp)
+{
+ vl_api_l2_flags_reply_t * rmp;
+ int rv = 0;
+ u32 sw_if_index = ntohl(mp->sw_if_index);
+ u32 flags = ntohl(mp->feature_bitmap);
+ u32 rbm = 0;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+#define _(a,b) \
+ if (flags & L2INPUT_FEAT_ ## a) \
+ rbm = l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_ ## a, mp->is_set);
+ foreach_l2input_feat;
+#undef _
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO2(VL_API_L2_FLAGS_REPLY, rmp->resulting_feature_bitmap = ntohl(rbm));
+}
+
+static void
+vl_api_bridge_flags_t_handler (
+ vl_api_bridge_flags_t *mp)
+{
+ vlib_main_t *vm = vlib_get_main();
+ bd_main_t * bdm = &bd_main;
+ vl_api_bridge_flags_reply_t * rmp;
+ int rv = 0;
+ u32 bd_id = ntohl(mp->bd_id);
+ u32 bd_index;
+ u32 flags = ntohl(mp->feature_bitmap);
+ uword * p;
+
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+ if (p == 0) {
+ rv = VNET_API_ERROR_NO_SUCH_ENTRY;
+ goto out;
+ }
+
+ bd_index = p[0];
+
+ bd_set_flags(vm, bd_index, flags, mp->is_set);
+
+out:
+ REPLY_MACRO2(VL_API_BRIDGE_FLAGS_REPLY,
+ rmp->resulting_feature_bitmap = ntohl(flags));
+}
+
+static void
+vl_api_bd_ip_mac_add_del_t_handler (
+ vl_api_bd_ip_mac_add_del_t *mp)
+{
+ bd_main_t * bdm = &bd_main;
+ vl_api_bd_ip_mac_add_del_reply_t * rmp;
+ int rv = 0;
+ u32 bd_id = ntohl(mp->bd_id);
+ u32 bd_index;
+ uword * p;
+
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+ if (p == 0) {
+ rv = VNET_API_ERROR_NO_SUCH_ENTRY;
+ goto out;
+ }
+
+ bd_index = p[0];
+ if (bd_add_del_ip_mac(bd_index, mp->ip_address,
+ mp->mac_address, mp->is_ipv6, mp->is_add))
+ rv = VNET_API_ERROR_UNSPECIFIED;
+
+out:
+ REPLY_MACRO(VL_API_BD_IP_MAC_ADD_DEL_REPLY);
+}
+
+static void
+vl_api_tap_connect_t_handler (vl_api_tap_connect_t *mp, vlib_main_t *vm)
+{
+ int rv;
+ vl_api_tap_connect_reply_t * rmp;
+ unix_shared_memory_queue_t * q;
+ u32 sw_if_index = (u32)~0;
+
+ rv = vnet_tap_connect_renumber (vm, mp->tap_name,
+ mp->use_random_mac ? 0 : mp->mac_address,
+ &sw_if_index, mp->renumber,
+ ntohl(mp->custom_dev_instance));
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_TAP_CONNECT_REPLY);
+ rmp->context = mp->context;
+ rmp->retval = ntohl(rv);
+ rmp->sw_if_index = ntohl(sw_if_index);
+
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+}
+
+static void
+vl_api_tap_modify_t_handler (vl_api_tap_modify_t *mp, vlib_main_t *vm)
+{
+ int rv;
+ vl_api_tap_modify_reply_t * rmp;
+ unix_shared_memory_queue_t * q;
+ u32 sw_if_index = (u32)~0;
+
+ rv = vnet_tap_modify (vm, ntohl(mp->sw_if_index), mp->tap_name,
+ mp->use_random_mac ? 0 : mp->mac_address,
+ &sw_if_index, mp->renumber,
+ ntohl(mp->custom_dev_instance));
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_TAP_MODIFY_REPLY);
+ rmp->context = mp->context;
+ rmp->retval = ntohl(rv);
+ rmp->sw_if_index = ntohl(sw_if_index);
+
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+}
+
+static void
+vl_api_tap_delete_t_handler (vl_api_tap_delete_t *mp, vlib_main_t *vm)
+{
+ int rv;
+ vpe_api_main_t * vam = &vpe_api_main;
+ vl_api_tap_delete_reply_t * rmp;
+ unix_shared_memory_queue_t * q;
+ u32 sw_if_index = ntohl(mp->sw_if_index);
+
+ rv = vnet_tap_delete (vm, sw_if_index);
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_TAP_DELETE_REPLY);
+ rmp->context = mp->context;
+ rmp->retval = ntohl(rv);
+
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+
+ if (!rv)
+ send_sw_interface_flags_deleted (vam, q, sw_if_index);
+}
+
+static void
+vl_api_create_vlan_subif_t_handler (vl_api_create_vlan_subif_t * mp)
+{
+ vl_api_create_vlan_subif_reply_t * rmp;
+ vnet_main_t * vnm = vnet_get_main();
+ u32 hw_if_index, sw_if_index = (u32)~0;
+ vnet_hw_interface_t * hi;
+ int rv = 0;
+ u32 id;
+ vnet_sw_interface_t template;
+ uword * p;
+ vnet_interface_main_t * im = &vnm->interface_main;
+ u64 sup_and_sub_key;
+ u64 * kp;
+ unix_shared_memory_queue_t * q;
+ clib_error_t * error;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ hw_if_index = ntohl(mp->sw_if_index);
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+
+ id = ntohl(mp->vlan_id);
+ if (id == 0 || id > 4095) {
+ rv = VNET_API_ERROR_INVALID_VLAN;
+ goto out;
+ }
+
+ sup_and_sub_key = ((u64)(hi->sw_if_index) << 32) | (u64) id;
+
+ p = hash_get_mem (im->sw_if_index_by_sup_and_sub, &sup_and_sub_key);
+ if (p) {
+ rv = VNET_API_ERROR_VLAN_ALREADY_EXISTS;
+ goto out;
+ }
+
+ kp = clib_mem_alloc (sizeof (*kp));
+ *kp = sup_and_sub_key;
+
+ memset (&template, 0, sizeof (template));
+ template.type = VNET_SW_INTERFACE_TYPE_SUB;
+ template.sup_sw_if_index = hi->sw_if_index;
+ template.sub.id = id;
+ template.sub.eth.raw_flags = 0;
+ template.sub.eth.flags.one_tag = 1;
+ template.sub.eth.outer_vlan_id = id;
+ template.sub.eth.flags.exact_match = 1;
+
+ error = vnet_create_sw_interface (vnm, &template, &sw_if_index);
+ if (error) {
+ clib_error_report(error);
+ rv = VNET_API_ERROR_INVALID_REGISTRATION;
+ goto out;
+ }
+ hash_set (hi->sub_interface_sw_if_index_by_id, id, sw_if_index);
+ hash_set_mem (im->sw_if_index_by_sup_and_sub, kp, sw_if_index);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+out:
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_CREATE_VLAN_SUBIF_REPLY);
+ rmp->context = mp->context;
+ rmp->retval = ntohl(rv);
+ rmp->sw_if_index = ntohl(sw_if_index);
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+}
+
+static void
+vl_api_create_subif_t_handler (vl_api_create_subif_t * mp)
+{
+ vl_api_create_subif_reply_t * rmp;
+ vnet_main_t * vnm = vnet_get_main();
+ u32 sw_if_index = ~0;
+ int rv = 0;
+ u32 sub_id;
+ vnet_sw_interface_t *si;
+ vnet_hw_interface_t *hi;
+ vnet_sw_interface_t template;
+ uword * p;
+ vnet_interface_main_t * im = &vnm->interface_main;
+ u64 sup_and_sub_key;
+ u64 * kp;
+ clib_error_t * error;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ si = vnet_get_sup_sw_interface (vnm, ntohl(mp->sw_if_index));
+ hi = vnet_get_sup_hw_interface (vnm, ntohl(mp->sw_if_index));
+
+ if (hi->bond_info == VNET_HW_INTERFACE_BOND_INFO_SLAVE) {
+ rv = VNET_API_ERROR_BOND_SLAVE_NOT_ALLOWED;
+ goto out;
+ }
+
+ sw_if_index = si->sw_if_index;
+ sub_id = ntohl(mp->sub_id);
+
+ sup_and_sub_key = ((u64)(sw_if_index) << 32) | (u64) sub_id;
+
+ p = hash_get_mem (im->sw_if_index_by_sup_and_sub, &sup_and_sub_key);
+ if (p) {
+ if (CLIB_DEBUG > 0)
+ clib_warning ("sup sw_if_index %d, sub id %d already exists\n",
+ sw_if_index, sub_id);
+ rv = VNET_API_ERROR_SUBIF_ALREADY_EXISTS;
+ goto out;
+ }
+
+ kp = clib_mem_alloc (sizeof (*kp));
+ *kp = sup_and_sub_key;
+
+ memset (&template, 0, sizeof (template));
+ template.type = VNET_SW_INTERFACE_TYPE_SUB;
+ template.sup_sw_if_index = sw_if_index;
+ template.sub.id = sub_id;
+ template.sub.eth.flags.no_tags = mp->no_tags;
+ template.sub.eth.flags.one_tag = mp->one_tag;
+ template.sub.eth.flags.two_tags = mp->two_tags;
+ template.sub.eth.flags.dot1ad = mp->dot1ad;
+ template.sub.eth.flags.exact_match = mp->exact_match;
+ template.sub.eth.flags.default_sub = mp->default_sub;
+ template.sub.eth.flags.outer_vlan_id_any = mp->outer_vlan_id_any;
+ template.sub.eth.flags.inner_vlan_id_any = mp->inner_vlan_id_any;
+ template.sub.eth.outer_vlan_id = ntohs(mp->outer_vlan_id);
+ template.sub.eth.inner_vlan_id = ntohs(mp->inner_vlan_id);
+
+ error = vnet_create_sw_interface (vnm, &template, &sw_if_index);
+ if (error) {
+ clib_error_report (error);
+ rv = VNET_API_ERROR_SUBIF_CREATE_FAILED;
+ goto out;
+ }
+
+ hash_set (hi->sub_interface_sw_if_index_by_id, sub_id, sw_if_index);
+ hash_set_mem (im->sw_if_index_by_sup_and_sub, kp, sw_if_index);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+out:
+
+ REPLY_MACRO2(VL_API_CREATE_SUBIF_REPLY,
+ ({
+ rmp->sw_if_index = ntohl(sw_if_index);
+ }));
+}
+
+static void
+vl_api_mpls_gre_add_del_tunnel_t_handler (vl_api_mpls_gre_add_del_tunnel_t *mp)
+{
+ vl_api_mpls_gre_add_del_tunnel_reply_t * rmp;
+ int rv = 0;
+ stats_main_t * sm = &stats_main;
+ u32 tunnel_sw_if_index = ~0;
+
+ dslock (sm, 1 /* release hint */, 5 /* tag */);
+
+ rv = vnet_mpls_gre_add_del_tunnel ((ip4_address_t *)(mp->src_address),
+ (ip4_address_t *)(mp->dst_address),
+ (ip4_address_t *)(mp->intfc_address),
+ (u32)(mp->intfc_address_length),
+ ntohl(mp->inner_vrf_id),
+ ntohl(mp->outer_vrf_id),
+ &tunnel_sw_if_index,
+ mp->l2_only,
+ mp->is_add);
+ dsunlock (sm);
+
+ REPLY_MACRO2(VL_API_MPLS_GRE_ADD_DEL_TUNNEL_REPLY,
+ ({
+ rmp->tunnel_sw_if_index = ntohl(tunnel_sw_if_index);
+ }));
+}
+
+static void
+vl_api_mpls_ethernet_add_del_tunnel_t_handler
+(vl_api_mpls_ethernet_add_del_tunnel_t *mp)
+{
+ vl_api_mpls_ethernet_add_del_tunnel_reply_t * rmp;
+ int rv = 0;
+ stats_main_t * sm = &stats_main;
+ u32 tunnel_sw_if_index;
+
+ dslock (sm, 1 /* release hint */, 5 /* tag */);
+
+ rv = vnet_mpls_ethernet_add_del_tunnel
+ (mp->dst_mac_address, (ip4_address_t *)(mp->adj_address),
+ (u32)(mp->adj_address_length), ntohl(mp->vrf_id),
+ ntohl(mp->tx_sw_if_index),
+ &tunnel_sw_if_index,
+ mp->l2_only,
+ mp->is_add);
+
+ dsunlock (sm);
+
+ REPLY_MACRO2(VL_API_MPLS_ETHERNET_ADD_DEL_TUNNEL_REPLY,
+ ({
+ rmp->tunnel_sw_if_index = ntohl(tunnel_sw_if_index);
+ }));
+}
+
+/*
+ * This piece of misery brought to you because the control-plane
+ * can't figure out the tx interface + dst-mac address all by itself
+ */
+static int mpls_ethernet_add_del_tunnel_2_t_handler
+(vl_api_mpls_ethernet_add_del_tunnel_2_t *mp)
+{
+ pending_route_t * pr;
+ vl_api_mpls_ethernet_add_del_tunnel_2_t *pme;
+ vnet_main_t * vnm = vnet_get_main();
+ vlib_main_t * vm = vlib_get_main();
+ stats_main_t * sm = &stats_main;
+ vpe_api_main_t * vam = &vpe_api_main;
+ u32 inner_fib_index, outer_fib_index;
+ ip4_main_t * im = &ip4_main;
+ ip_lookup_main_t * lm = &im->lookup_main;
+ ip_adjacency_t * adj = 0;
+ u32 lookup_result;
+ u32 tx_sw_if_index;
+ u8 * dst_mac_address;
+ clib_error_t * e;
+ uword * p;
+ int rv;
+ u32 tunnel_sw_if_index;
+
+ p = hash_get (im->fib_index_by_table_id, ntohl(mp->outer_vrf_id));
+ if (!p)
+ return VNET_API_ERROR_NO_SUCH_FIB;
+ else
+ outer_fib_index = p[0];
+
+
+ p = hash_get (im->fib_index_by_table_id, ntohl(mp->inner_vrf_id));
+ if (!p)
+ return VNET_API_ERROR_NO_SUCH_INNER_FIB;
+ else
+ inner_fib_index = p[0];
+
+ if (inner_fib_index == outer_fib_index)
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ lookup_result = ip4_fib_lookup_with_table
+ (im, outer_fib_index,
+ (ip4_address_t *)mp->next_hop_ip4_address_in_outer_vrf,
+ 1 /* disable default route */);
+
+ adj = ip_get_adjacency (lm, lookup_result);
+ tx_sw_if_index = adj->rewrite_header.sw_if_index;
+
+ if (mp->is_add && mp->resolve_if_needed) {
+ if (adj->lookup_next_index == IP_LOOKUP_NEXT_ARP) {
+ pool_get (vam->pending_routes, pr);
+ pr->resolve_type = RESOLVE_MPLS_ETHERNET_ADD_DEL;
+ pme = &pr->t;
+ clib_memcpy (pme, mp, sizeof (*pme));
+ /* recursion block, "just in case" */
+ pme->resolve_if_needed = 0;
+ pme->resolve_attempts = ntohl(mp->resolve_attempts);
+ pme->resolve_opaque = tx_sw_if_index;
+ vnet_register_ip4_arp_resolution_event
+ (vnm,
+ (ip4_address_t *)&(pme->next_hop_ip4_address_in_outer_vrf),
+ vpe_resolver_process_node.index,
+ RESOLUTION_EVENT, pr - vam->pending_routes);
+
+ vlib_process_signal_event
+ (vm, vpe_resolver_process_node.index,
+ RESOLUTION_PENDING_EVENT, 0 /* data */);
+
+ /* The interface may be down, etc. */
+ e = ip4_probe_neighbor
+ (vm, (ip4_address_t *)&(mp->next_hop_ip4_address_in_outer_vrf),
+ tx_sw_if_index);
+
+ if (e)
+ clib_error_report(e);
+
+ return VNET_API_ERROR_IN_PROGRESS;
+ }
+ }
+
+ if (adj->lookup_next_index != IP_LOOKUP_NEXT_REWRITE)
+ return VNET_API_ERROR_NEXT_HOP_NOT_IN_FIB;
+
+ dst_mac_address =
+ vnet_rewrite_get_data_internal
+ (&adj->rewrite_header, sizeof (adj->rewrite_data));
+
+ dslock (sm, 1 /* release hint */, 10 /* tag */);
+
+ rv = vnet_mpls_ethernet_add_del_tunnel
+ (dst_mac_address, (ip4_address_t *)(mp->adj_address),
+ (u32)(mp->adj_address_length), ntohl(mp->inner_vrf_id),
+ tx_sw_if_index, &tunnel_sw_if_index, mp->l2_only, mp->is_add);
+
+ dsunlock (sm);
+
+ return rv;
+}
+
+static void
+vl_api_mpls_ethernet_add_del_tunnel_2_t_handler
+(vl_api_mpls_ethernet_add_del_tunnel_2_t *mp)
+{
+ vl_api_mpls_ethernet_add_del_tunnel_reply_t * rmp;
+ int rv = 0;
+
+ rv = mpls_ethernet_add_del_tunnel_2_t_handler (mp);
+
+ REPLY_MACRO(VL_API_MPLS_ETHERNET_ADD_DEL_TUNNEL_2_REPLY);
+}
+
+
+static void
+vl_api_mpls_add_del_encap_t_handler (vl_api_mpls_add_del_encap_t *mp)
+{
+ vl_api_mpls_add_del_encap_reply_t * rmp;
+ int rv;
+ static u32 * labels;
+ int i;
+
+ vec_reset_length (labels);
+
+ for (i = 0; i < mp->nlabels; i++)
+ vec_add1 (labels, ntohl(mp->labels[i]));
+
+ /* $$$$ fixme */
+ rv = vnet_mpls_add_del_encap ((ip4_address_t *)mp->dst_address,
+ ntohl(mp->vrf_id), labels,
+ ~0 /* policy_tunnel_index */,
+ 0 /* no_dst_hash */,
+ 0 /* indexp */,
+ mp->is_add);
+
+ REPLY_MACRO(VL_API_MPLS_ADD_DEL_ENCAP_REPLY);
+}
+
+static void
+vl_api_mpls_add_del_decap_t_handler
+(vl_api_mpls_add_del_decap_t *mp)
+{
+ vl_api_mpls_add_del_decap_reply_t * rmp;
+ int rv;
+
+ rv = vnet_mpls_add_del_decap (ntohl(mp->rx_vrf_id), ntohl(mp->tx_vrf_id),
+ ntohl(mp->label), ntohl(mp->next_index),
+ mp->s_bit, mp->is_add);
+
+ REPLY_MACRO(VL_API_MPLS_ADD_DEL_DECAP_REPLY);
+}
+
+static void
+vl_api_proxy_arp_add_del_t_handler (vl_api_proxy_arp_add_del_t *mp)
+{
+ vl_api_proxy_arp_add_del_reply_t * rmp;
+ u32 fib_index;
+ int rv;
+ ip4_main_t * im = &ip4_main;
+ stats_main_t * sm = &stats_main;
+ int vnet_proxy_arp_add_del (ip4_address_t *lo_addr,
+ ip4_address_t *hi_addr,
+ u32 fib_index, int is_del);
+ uword * p;
+
+ dslock (sm, 1 /* release hint */, 6 /* tag */);
+
+ p = hash_get (im->fib_index_by_table_id, ntohl(mp->vrf_id));
+
+ if (! p) {
+ rv = VNET_API_ERROR_NO_SUCH_FIB;
+ goto out;
+ }
+
+ fib_index = p[0];
+
+ rv = vnet_proxy_arp_add_del ((ip4_address_t *)mp->low_address,
+ (ip4_address_t *)mp->hi_address,
+ fib_index, mp->is_add == 0);
+
+out:
+ dsunlock (sm);
+ REPLY_MACRO(VL_API_PROXY_ARP_ADD_DEL_REPLY);
+}
+
+static void
+vl_api_proxy_arp_intfc_enable_disable_t_handler
+(vl_api_proxy_arp_intfc_enable_disable_t *mp)
+{
+ int rv = 0;
+ vnet_main_t * vnm = vnet_get_main();
+ vl_api_proxy_arp_intfc_enable_disable_reply_t *rmp;
+ vnet_sw_interface_t * si;
+ u32 sw_if_index;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ sw_if_index = ntohl(mp->sw_if_index);
+
+ if (pool_is_free_index (vnm->interface_main.sw_interfaces,
+ sw_if_index)) {
+ rv = VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ goto out;
+ }
+
+ si = vnet_get_sw_interface (vnm, sw_if_index);
+
+ ASSERT(si);
+
+ if (mp->enable_disable)
+ si->flags |= VNET_SW_INTERFACE_FLAG_PROXY_ARP;
+ else
+ si->flags &= ~VNET_SW_INTERFACE_FLAG_PROXY_ARP;
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ out:
+ REPLY_MACRO(VL_API_PROXY_ARP_INTFC_ENABLE_DISABLE_REPLY);
+}
+
+static void
+vl_api_ip_neighbor_add_del_t_handler (vl_api_ip_neighbor_add_del_t *mp, vlib_main_t * vm)
+{
+ vl_api_ip_neighbor_add_del_reply_t * rmp;
+ vnet_main_t * vnm = vnet_get_main();
+ u32 fib_index;
+ int rv=0;
+ stats_main_t * sm = &stats_main;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ dslock (sm, 1 /* release hint */, 7 /* tag */);
+
+ if (mp->is_ipv6) {
+ if (mp->is_add)
+ rv = vnet_set_ip6_ethernet_neighbor
+ (vm, ntohl(mp->sw_if_index),
+ (ip6_address_t *)(mp->dst_address),
+ mp->mac_address, sizeof (mp->mac_address), mp->is_static);
+ else
+ rv = vnet_unset_ip6_ethernet_neighbor
+ (vm, ntohl(mp->sw_if_index),
+ (ip6_address_t *)(mp->dst_address),
+ mp->mac_address, sizeof(mp->mac_address));
+ } else {
+ ip4_main_t * im = &ip4_main;
+ ip_lookup_main_t * lm = &im->lookup_main;
+ ethernet_arp_ip4_over_ethernet_address_t a;
+ u32 ai;
+ ip_adjacency_t *nh_adj;
+
+ uword * p = hash_get (im->fib_index_by_table_id, ntohl(mp->vrf_id));
+ if (! p) {
+ rv = VNET_API_ERROR_NO_SUCH_FIB;
+ goto out;
+ }
+ fib_index = p[0];
+
+ /*
+ * Unfortunately, folks have a penchant for
+ * adding interface addresses to the ARP cache, and
+ * wondering why the forwarder eventually ASSERTs...
+ */
+ ai = ip4_fib_lookup_with_table
+ (im, fib_index, (ip4_address_t *)(mp->dst_address),
+ 1 /* disable default route */);
+
+ if (ai != 0) {
+ nh_adj = ip_get_adjacency (lm, ai);
+ /* Never allow manipulation of a local adj! */
+ if (nh_adj->lookup_next_index == IP_LOOKUP_NEXT_LOCAL) {
+ clib_warning("%U matches local adj",
+ format_ip4_address,
+ (ip4_address_t *)(mp->dst_address));
+ rv = VNET_API_ERROR_ADDRESS_MATCHES_INTERFACE_ADDRESS;
+ goto out;
+ }
+ }
+
+ clib_memcpy (&a.ethernet, mp->mac_address, 6);
+ clib_memcpy (&a.ip4, mp->dst_address, 4);
+
+ if (mp->is_add)
+ rv = vnet_arp_set_ip4_over_ethernet (vnm, ntohl(mp->sw_if_index),
+ fib_index, &a, mp->is_static);
+ else
+ rv = vnet_arp_unset_ip4_over_ethernet (vnm, ntohl(mp->sw_if_index),
+ fib_index, &a);
+ }
+
+ BAD_SW_IF_INDEX_LABEL;
+ out:
+ dsunlock (sm);
+ REPLY_MACRO(VL_API_IP_NEIGHBOR_ADD_DEL_REPLY);
+}
+
+static void
+vl_api_is_address_reachable_t_handler (vl_api_is_address_reachable_t *mp)
+{
+#if 0
+ vpe_main_t *rm = &vpe_main;
+ ip4_main_t *im4 = &ip4_main;
+ ip6_main_t *im6 = &ip6_main;
+ ip_lookup_main_t * lm;
+ union {
+ ip4_address_t ip4;
+ ip6_address_t ip6;
+ } addr;
+ u32 adj_index, sw_if_index;
+ vl_api_is_address_reachable_t *rmp;
+ ip_adjacency_t * adj;
+ unix_shared_memory_queue_t *q;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q) {
+ increment_missing_api_client_counter (rm->vlib_main);
+ return;
+ }
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ clib_memcpy (rmp, mp, sizeof (*rmp));
+
+ sw_if_index = mp->next_hop_sw_if_index;
+ clib_memcpy (&addr, mp->address, sizeof (addr));
+ if (mp->is_ipv6) {
+ lm = &im6->lookup_main;
+ adj_index =
+ ip6_fib_lookup (im6, sw_if_index, &addr.ip6);
+ } else {
+ lm = &im4->lookup_main;
+ adj_index =
+ ip4_fib_lookup (im4, sw_if_index, &addr.ip4);
+ }
+ if (adj_index == ~0) {
+ rmp->is_error = 1;
+ goto send;
+ }
+ adj = ip_get_adjacency (lm, adj_index);
+
+ if (adj->lookup_next_index == IP_LOOKUP_NEXT_REWRITE
+ && adj->rewrite_header.sw_if_index == sw_if_index) {
+ rmp->is_known = 1;
+ } else {
+ if (adj->lookup_next_index == IP_LOOKUP_NEXT_ARP
+ && adj->rewrite_header.sw_if_index == sw_if_index) {
+ if (mp->is_ipv6)
+ ip6_probe_neighbor (rm->vlib_main, &addr.ip6, sw_if_index);
+ else
+ ip4_probe_neighbor (rm->vlib_main, &addr.ip4, sw_if_index);
+ } else if (adj->lookup_next_index == IP_LOOKUP_NEXT_DROP) {
+ rmp->is_known = 1;
+ goto send;
+ }
+ rmp->is_known = 0;
+ }
+
+send:
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+#endif
+}
+
+static void vl_api_sw_interface_details_t_handler (
+ vl_api_sw_interface_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void vl_api_sw_interface_set_flags_t_handler (
+ vl_api_sw_interface_set_flags_t * mp)
+{
+ vl_api_sw_interface_set_flags_reply_t *rmp;
+ vnet_main_t * vnm = vnet_get_main();
+ int rv = 0;
+ clib_error_t * error;
+ u16 flags;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ flags = mp->admin_up_down ? VNET_SW_INTERFACE_FLAG_ADMIN_UP : 0;
+
+ error = vnet_sw_interface_set_flags (vnm,
+ ntohl(mp->sw_if_index),
+ flags);
+ if (error) {
+ rv = -1;
+ clib_error_report (error);
+ }
+
+ BAD_SW_IF_INDEX_LABEL;
+ REPLY_MACRO(VL_API_SW_INTERFACE_SET_FLAGS_REPLY);
+}
+
+static void vl_api_sw_interface_clear_stats_t_handler (
+ vl_api_sw_interface_clear_stats_t * mp)
+{
+ vl_api_sw_interface_clear_stats_reply_t *rmp;
+
+ vnet_main_t * vnm = vnet_get_main();
+ vnet_interface_main_t * im = &vnm->interface_main;
+ vlib_simple_counter_main_t * sm;
+ vlib_combined_counter_main_t * cm;
+ static vnet_main_t ** my_vnet_mains;
+ int i, j, n_counters;
+
+ int rv = 0;
+
+ vec_reset_length (my_vnet_mains);
+
+ for (i = 0; i < vec_len (vnet_mains); i++)
+ {
+ if (vnet_mains[i])
+ vec_add1 (my_vnet_mains, vnet_mains[i]);
+ }
+
+ if (vec_len (vnet_mains) == 0)
+ vec_add1 (my_vnet_mains, vnm);
+
+ n_counters = vec_len (im->combined_sw_if_counters);
+
+ for (j = 0; j < n_counters; j++)
+ {
+ for (i = 0; i < vec_len(my_vnet_mains); i++)
+ {
+ im = &my_vnet_mains[i]->interface_main;
+ cm = im->combined_sw_if_counters + j;
+ if (mp->sw_if_index == (u32)~0)
+ vlib_clear_combined_counters (cm);
+ else
+ vlib_zero_combined_counter (cm, ntohl(mp->sw_if_index));
+ }
+ }
+
+ n_counters = vec_len (im->sw_if_counters);
+
+ for (j = 0; j < n_counters; j++)
+ {
+ for (i = 0; i < vec_len(my_vnet_mains); i++)
+ {
+ im = &my_vnet_mains[i]->interface_main;
+ sm = im->sw_if_counters + j;
+ if (mp->sw_if_index == (u32)~0)
+ vlib_clear_simple_counters (sm);
+ else
+ vlib_zero_simple_counter (sm, ntohl(mp->sw_if_index));
+ }
+ }
+
+ REPLY_MACRO(VL_API_SW_INTERFACE_CLEAR_STATS_REPLY);
+}
+
+static void send_sw_interface_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t *q,
+ vnet_sw_interface_t * swif,
+ u8 * interface_name,
+ u32 context)
+{
+ vl_api_sw_interface_details_t * mp;
+ vnet_hw_interface_t * hi;
+
+ hi = vnet_get_sup_hw_interface (am->vnet_main, swif->sw_if_index);
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs(VL_API_SW_INTERFACE_DETAILS);
+ mp->sw_if_index = ntohl(swif->sw_if_index);
+ mp->sup_sw_if_index = ntohl(swif->sup_sw_if_index);
+ mp->admin_up_down = (swif->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ?
+ 1 : 0;
+ mp->link_up_down = (hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP) ?
+ 1 : 0;
+ mp->link_duplex = ((hi->flags & VNET_HW_INTERFACE_FLAG_DUPLEX_MASK) >>
+ VNET_HW_INTERFACE_FLAG_DUPLEX_SHIFT);
+ mp->link_speed = ((hi->flags & VNET_HW_INTERFACE_FLAG_SPEED_MASK) >>
+ VNET_HW_INTERFACE_FLAG_SPEED_SHIFT);
+ mp->link_mtu = ntohs(hi->max_packet_bytes);
+ mp->context = context;
+
+ strncpy ((char *) mp->interface_name,
+ (char *) interface_name, ARRAY_LEN(mp->interface_name)-1);
+
+ /* Send the L2 address for ethernet physical intfcs */
+ if (swif->sup_sw_if_index == swif->sw_if_index
+ && hi->hw_class_index == ethernet_hw_interface_class.index) {
+ ethernet_main_t *em = ethernet_get_main (am->vlib_main);
+ ethernet_interface_t *ei;
+
+ ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
+ ASSERT (sizeof (mp->l2_address) >= sizeof (ei->address));
+ clib_memcpy (mp->l2_address, ei->address, sizeof (ei->address));
+ mp->l2_address_length = ntohl(sizeof (ei->address));
+ } else if (swif->sup_sw_if_index != swif->sw_if_index) {
+ vnet_sub_interface_t *sub = &swif->sub;
+ mp->sub_id = ntohl(sub->id);
+ mp->sub_dot1ad = sub->eth.flags.dot1ad;
+ mp->sub_number_of_tags = sub->eth.flags.one_tag + sub->eth.flags.two_tags*2;
+ mp->sub_outer_vlan_id = ntohs(sub->eth.outer_vlan_id);
+ mp->sub_inner_vlan_id = ntohs(sub->eth.inner_vlan_id);
+ mp->sub_exact_match = sub->eth.flags.exact_match;
+ mp->sub_default = sub->eth.flags.default_sub;
+ mp->sub_outer_vlan_id_any = sub->eth.flags.outer_vlan_id_any;
+ mp->sub_inner_vlan_id_any = sub->eth.flags.inner_vlan_id_any;
+
+ /* vlan tag rewrite data */
+ u32 vtr_op = L2_VTR_DISABLED;
+ u32 vtr_push_dot1q = 0, vtr_tag1 = 0, vtr_tag2 = 0;
+
+ if (l2vtr_get(am->vlib_main, am->vnet_main, swif->sw_if_index,
+ &vtr_op, &vtr_push_dot1q, &vtr_tag1, &vtr_tag2) != 0) {
+ // error - default to disabled
+ mp->vtr_op = ntohl(L2_VTR_DISABLED);
+ clib_warning("cannot get vlan tag rewrite for sw_if_index %d",
+ swif->sw_if_index);
+ } else {
+ mp->vtr_op = ntohl(vtr_op);
+ mp->vtr_push_dot1q = ntohl(vtr_push_dot1q);
+ mp->vtr_tag1 = ntohl(vtr_tag1);
+ mp->vtr_tag2 = ntohl(vtr_tag2);
+ }
+ }
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void send_sw_interface_flags (vpe_api_main_t * am,
+ unix_shared_memory_queue_t *q,
+ vnet_sw_interface_t * swif)
+{
+ vl_api_sw_interface_set_flags_t *mp;
+ vnet_main_t * vnm = am->vnet_main;
+
+ vnet_hw_interface_t *hi = vnet_get_sup_hw_interface (vnm,
+ swif->sw_if_index);
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs(VL_API_SW_INTERFACE_SET_FLAGS);
+ mp->sw_if_index = ntohl(swif->sw_if_index);
+
+ mp->admin_up_down = (swif->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ?
+ 1 : 0;
+ mp->link_up_down = (hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP) ?
+ 1 : 0;
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void send_sw_interface_flags_deleted (vpe_api_main_t * am,
+ unix_shared_memory_queue_t *q,
+ u32 sw_if_index)
+ __attribute__((unused));
+
+static void send_sw_interface_flags_deleted (vpe_api_main_t * am,
+ unix_shared_memory_queue_t *q,
+ u32 sw_if_index)
+{
+ vl_api_sw_interface_set_flags_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs(VL_API_SW_INTERFACE_SET_FLAGS);
+ mp->sw_if_index = ntohl(sw_if_index);
+
+ mp->admin_up_down = 0;
+ mp->link_up_down = 0;
+ mp->deleted = 1;
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void vl_api_sw_interface_dump_t_handler (
+ vl_api_sw_interface_dump_t *mp)
+{
+ vpe_api_main_t * am = &vpe_api_main;
+ vnet_sw_interface_t * swif;
+ vnet_interface_main_t * im = &am->vnet_main->interface_main;
+ u8 * filter_string = 0, * name_string = 0;
+ unix_shared_memory_queue_t * q;
+ char * strcasestr (char *, char *); /* lnx hdr file botch */
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+
+ if (q == 0)
+ return;
+
+ if (mp->name_filter_valid) {
+ mp->name_filter [ARRAY_LEN(mp->name_filter)-1] = 0;
+ filter_string = format (0, "%s%c", mp->name_filter, 0);
+ }
+
+ pool_foreach (swif, im->sw_interfaces,
+ ({
+ name_string = format (name_string, "%U%c",
+ format_vnet_sw_interface_name,
+ am->vnet_main, swif, 0);
+
+ if (mp->name_filter_valid == 0 ||
+ strcasestr((char *) name_string, (char *) filter_string)) {
+
+ send_sw_interface_details (am, q, swif, name_string, mp->context);
+ }
+ _vec_len (name_string) = 0;
+ }));
+
+ vec_free (name_string);
+ vec_free (filter_string);
+}
+
+void send_oam_event (oam_target_t * t)
+{
+ vpe_api_main_t * vam = &vpe_api_main;
+ unix_shared_memory_queue_t * q;
+ vpe_client_registration_t *reg;
+ vl_api_oam_event_t * mp;
+
+ pool_foreach(reg, vam->oam_events_registrations,
+ ({
+ q = vl_api_client_index_to_input_queue (reg->client_index);
+ if (q) {
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_OAM_EVENT);
+ clib_memcpy (mp->dst_address, &t->dst_address, sizeof (mp->dst_address));
+ mp->state = t->state;
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+ }
+ }));
+}
+
+static void
+vl_api_oam_add_del_t_handler (vl_api_oam_add_del_t *mp)
+{
+ vl_api_oam_add_del_reply_t * rmp;
+ int rv;
+
+ rv = vpe_oam_add_del_target ((ip4_address_t *)mp->src_address,
+ (ip4_address_t *)mp->dst_address,
+ ntohl(mp->vrf_id),
+ (int)(mp->is_add));
+
+ REPLY_MACRO(VL_API_OAM_ADD_DEL_REPLY);
+}
+
+static void
+vl_api_vnet_get_summary_stats_t_handler (
+ vl_api_vnet_get_summary_stats_t *mp)
+{
+ stats_main_t * sm = &stats_main;
+ vnet_interface_main_t * im = sm->interface_main;
+ vl_api_vnet_summary_stats_reply_t *rmp;
+ vlib_combined_counter_main_t * cm;
+ vlib_counter_t v;
+ int i, which;
+ u64 total_pkts[VLIB_N_RX_TX];
+ u64 total_bytes[VLIB_N_RX_TX];
+
+ unix_shared_memory_queue_t * q =
+ vl_api_client_index_to_input_queue (mp->client_index);
+
+ if (!q)
+ return;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_VNET_SUMMARY_STATS_REPLY);
+ rmp->context = mp->context;
+ rmp->retval = 0;
+
+ memset (total_pkts, 0, sizeof (total_pkts));
+ memset (total_bytes, 0, sizeof (total_bytes));
+
+ vnet_interface_counter_lock (im);
+
+ vec_foreach (cm, im->combined_sw_if_counters) {
+ which = cm - im->combined_sw_if_counters;
+
+ for (i = 0; i < vec_len (cm->maxi); i++) {
+ vlib_get_combined_counter (cm, i, &v);
+ total_pkts[which] += v.packets;
+ total_bytes[which] += v.bytes;
+ }
+ }
+ vnet_interface_counter_unlock (im);
+
+ /* Note: in HOST byte order! */
+ rmp->total_pkts[VLIB_RX] = total_pkts[VLIB_RX];
+ rmp->total_bytes[VLIB_RX] = total_bytes[VLIB_RX];
+ rmp->total_pkts[VLIB_TX] = total_pkts[VLIB_TX];
+ rmp->total_bytes[VLIB_TX] = total_bytes[VLIB_TX];
+ rmp->vector_rate = vlib_last_vector_length_per_node (sm->vlib_main);
+
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+}
+
+typedef CLIB_PACKED (struct {
+ ip4_address_t address;
+
+ u32 address_length : 6;
+
+ u32 index : 26;
+}) ip4_route_t;
+
+static int ip4_reset_fib_t_handler (vl_api_reset_fib_t *mp)
+{
+ vnet_main_t * vnm = vnet_get_main();
+ vnet_interface_main_t * im = &vnm->interface_main;
+ ip4_main_t * im4 = &ip4_main;
+ static ip4_route_t * routes;
+ static u32 * sw_if_indices_to_shut;
+ stats_main_t * sm = &stats_main;
+ ip4_route_t * r;
+ ip4_fib_t * fib;
+ u32 sw_if_index;
+ int i;
+ int rv = VNET_API_ERROR_NO_SUCH_FIB;
+ u32 target_fib_id = ntohl(mp->vrf_id);
+
+ dslock (sm, 1 /* release hint */, 8 /* tag */);
+
+ vec_foreach (fib, im4->fibs) {
+ vnet_sw_interface_t * si;
+
+ if (fib->table_id != target_fib_id)
+ continue;
+
+ /* remove any mpls/gre tunnels in this fib */
+ vnet_mpls_gre_delete_fib_tunnels (fib->table_id);
+
+ /* remove any mpls encap/decap labels */
+ mpls_fib_reset_labels (fib->table_id);
+
+ /* remove any proxy arps in this fib */
+ vnet_proxy_arp_fib_reset (fib->table_id);
+
+ /* Set the flow hash for this fib to the default */
+ vnet_set_ip4_flow_hash (fib->table_id, IP_FLOW_HASH_DEFAULT);
+
+ vec_reset_length (sw_if_indices_to_shut);
+
+ /* Shut down interfaces in this FIB / clean out intfc routes */
+ pool_foreach (si, im->sw_interfaces,
+ ({
+ u32 sw_if_index = si->sw_if_index;
+
+ if (sw_if_index < vec_len (im4->fib_index_by_sw_if_index)
+ && (im4->fib_index_by_sw_if_index[si->sw_if_index] ==
+ fib - im4->fibs))
+ vec_add1 (sw_if_indices_to_shut, si->sw_if_index);
+ }));
+
+ for (i = 0; i < vec_len (sw_if_indices_to_shut); i++) {
+ sw_if_index = sw_if_indices_to_shut[i];
+ // vec_foreach (sw_if_index, sw_if_indices_to_shut) {
+
+ u32 flags = vnet_sw_interface_get_flags (vnm, sw_if_index);
+ flags &= ~(VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ vnet_sw_interface_set_flags (vnm, sw_if_index, flags);
+ }
+
+ vec_reset_length (routes);
+
+ for (i = 0; i < ARRAY_LEN (fib->adj_index_by_dst_address); i++) {
+ uword * hash = fib->adj_index_by_dst_address[i];
+ hash_pair_t * p;
+ ip4_route_t x;
+
+ x.address_length = i;
+
+ hash_foreach_pair (p, hash,
+ ({
+ x.address.data_u32 = p->key;
+ vec_add1 (routes, x);
+ }));
+ }
+
+ vec_foreach (r, routes) {
+ ip4_add_del_route_args_t a;
+
+ memset (&a, 0, sizeof (a));
+ a.flags = IP4_ROUTE_FLAG_FIB_INDEX | IP4_ROUTE_FLAG_DEL;
+ a.table_index_or_table_id = fib - im4->fibs;
+ a.dst_address = r->address;
+ a.dst_address_length = r->address_length;
+ a.adj_index = ~0;
+
+ ip4_add_del_route (im4, &a);
+ ip4_maybe_remap_adjacencies (im4, fib - im4->fibs,
+ IP4_ROUTE_FLAG_FIB_INDEX);
+ }
+ rv = 0;
+ break;
+ } /* vec_foreach (fib) */
+
+ dsunlock(sm);
+ return rv;
+}
+
+typedef struct {
+ ip6_address_t address;
+ u32 address_length;
+ u32 index;
+} ip6_route_t;
+
+typedef struct {
+ u32 fib_index;
+ ip6_route_t ** routep;
+} add_routes_in_fib_arg_t;
+
+static void add_routes_in_fib (clib_bihash_kv_24_8_t * kvp, void *arg)
+{
+ add_routes_in_fib_arg_t * ap = arg;
+
+ if (kvp->key[2]>>32 == ap->fib_index)
+ {
+ ip6_address_t *addr;
+ ip6_route_t * r;
+ addr = (ip6_address_t *) kvp;
+ vec_add2 (*ap->routep, r, 1);
+ r->address = addr[0];
+ r->address_length = kvp->key[2] & 0xFF;
+ r->index = kvp->value;
+ }
+}
+
+static int ip6_reset_fib_t_handler (vl_api_reset_fib_t *mp)
+{
+ vnet_main_t * vnm = vnet_get_main();
+ vnet_interface_main_t * im = &vnm->interface_main;
+ ip6_main_t * im6 = &ip6_main;
+ stats_main_t * sm = &stats_main;
+ static ip6_route_t * routes;
+ static u32 * sw_if_indices_to_shut;
+ ip6_route_t * r;
+ ip6_fib_t * fib;
+ u32 sw_if_index;
+ int i;
+ int rv = VNET_API_ERROR_NO_SUCH_FIB;
+ u32 target_fib_id = ntohl(mp->vrf_id);
+ add_routes_in_fib_arg_t _a, *a=&_a;
+ clib_bihash_24_8_t * h = &im6->ip6_lookup_table;
+
+ dslock (sm, 1 /* release hint */, 9 /* tag */);
+
+ vec_foreach (fib, im6->fibs) {
+ vnet_sw_interface_t * si;
+
+ if (fib->table_id != target_fib_id)
+ continue;
+
+ vec_reset_length (sw_if_indices_to_shut);
+
+ /* Shut down interfaces in this FIB / clean out intfc routes */
+ pool_foreach (si, im->sw_interfaces,
+ ({
+ if (im6->fib_index_by_sw_if_index[si->sw_if_index] ==
+ fib - im6->fibs)
+ vec_add1 (sw_if_indices_to_shut, si->sw_if_index);
+ }));
+
+ for (i = 0; i < vec_len (sw_if_indices_to_shut); i++) {
+ sw_if_index = sw_if_indices_to_shut[i];
+ // vec_foreach (sw_if_index, sw_if_indices_to_shut) {
+
+ u32 flags = vnet_sw_interface_get_flags (vnm, sw_if_index);
+ flags &= ~(VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ vnet_sw_interface_set_flags (vnm, sw_if_index, flags);
+ }
+
+ vec_reset_length (routes);
+
+ a->fib_index = fib - im6->fibs;
+ a->routep = &routes;
+
+ clib_bihash_foreach_key_value_pair_24_8 (h, add_routes_in_fib, a);
+
+ vec_foreach (r, routes) {
+ ip6_add_del_route_args_t a;
+
+ memset (&a, 0, sizeof (a));
+ a.flags = IP6_ROUTE_FLAG_FIB_INDEX | IP6_ROUTE_FLAG_DEL;
+ a.table_index_or_table_id = fib - im6->fibs;
+ a.dst_address = r->address;
+ a.dst_address_length = r->address_length;
+ a.adj_index = ~0;
+
+ ip6_add_del_route (im6, &a);
+ ip6_maybe_remap_adjacencies (im6, fib - im6->fibs,
+ IP6_ROUTE_FLAG_FIB_INDEX);
+ }
+ rv = 0;
+ /* Reinstall the neighbor / router discovery routes */
+ vnet_ip6_fib_init (im6, fib - im6->fibs);
+ break;
+ } /* vec_foreach (fib) */
+
+ dsunlock(sm);
+ return rv;
+}
+
+static void vl_api_reset_fib_t_handler (vl_api_reset_fib_t *mp)
+{
+ int rv;
+ vl_api_reset_fib_reply_t * rmp;
+
+ if (mp->is_ipv6)
+ rv = ip6_reset_fib_t_handler (mp);
+ else
+ rv = ip4_reset_fib_t_handler (mp);
+
+ REPLY_MACRO(VL_API_RESET_FIB_REPLY);
+}
+
+
+static void
+dhcpv4_proxy_config (vl_api_dhcp_proxy_config_t *mp)
+{
+ vl_api_dhcp_proxy_config_reply_t * rmp;
+ int rv;
+
+ rv = dhcp_proxy_set_server ((ip4_address_t *)(&mp->dhcp_server),
+ (ip4_address_t *)(&mp->dhcp_src_address),
+ (u32) ntohl(mp->vrf_id),
+ (int) mp->insert_circuit_id,
+ (int) (mp->is_add == 0));
+
+ REPLY_MACRO(VL_API_DHCP_PROXY_CONFIG_REPLY);
+}
+
+
+static void
+dhcpv6_proxy_config (vl_api_dhcp_proxy_config_t *mp)
+{
+ vl_api_dhcp_proxy_config_reply_t * rmp;
+ int rv = -1;
+
+ rv = dhcpv6_proxy_set_server ((ip6_address_t *)(&mp->dhcp_server),
+ (ip6_address_t *)(&mp->dhcp_src_address),
+ (u32) ntohl(mp->vrf_id),
+ (int) mp->insert_circuit_id,
+ (int) (mp->is_add == 0));
+
+ REPLY_MACRO(VL_API_DHCP_PROXY_CONFIG_REPLY);
+}
+
+static void
+dhcpv4_proxy_config_2 (vl_api_dhcp_proxy_config_2_t *mp)
+{
+ vl_api_dhcp_proxy_config_reply_t * rmp;
+ int rv;
+
+ rv = dhcp_proxy_set_server_2 ((ip4_address_t *)(&mp->dhcp_server),
+ (ip4_address_t *)(&mp->dhcp_src_address),
+ (u32) ntohl(mp->rx_vrf_id),
+ (u32) ntohl(mp->server_vrf_id),
+ (int) mp->insert_circuit_id,
+ (int) (mp->is_add == 0));
+
+ REPLY_MACRO(VL_API_DHCP_PROXY_CONFIG_2_REPLY);
+}
+
+
+static void
+dhcpv6_proxy_config_2 (vl_api_dhcp_proxy_config_2_t *mp)
+{
+ vl_api_dhcp_proxy_config_reply_t * rmp;
+ int rv = -1;
+
+#if 0 // $$$$ FIXME
+ rv = dhcpv6_proxy_set_server_2 ((ip6_address_t *)(&mp->dhcp_server),
+ (ip6_address_t *)(&mp->dhcp_src_address),
+ (u32) ntohl(mp->rx_vrf_id),
+ (u32) ntohl(mp->server_vrf_id),
+ (int) mp->insert_circuit_id,
+ (int) (mp->is_add == 0));
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif
+
+ REPLY_MACRO(VL_API_DHCP_PROXY_CONFIG_2_REPLY);
+}
+
+
+static void
+vl_api_dhcp_proxy_set_vss_t_handler (vl_api_dhcp_proxy_set_vss_t *mp)
+{
+ vl_api_dhcp_proxy_set_vss_reply_t *rmp;
+ int rv;
+ if (!mp->is_ipv6)
+ rv = dhcp_proxy_set_option82_vss(ntohl(mp->tbl_id),
+ ntohl(mp->oui),
+ ntohl(mp->fib_id),
+ (int)mp->is_add == 0);
+ else
+ rv = dhcpv6_proxy_set_vss( ntohl(mp->tbl_id),
+ ntohl(mp->oui),
+ ntohl(mp->fib_id),
+ (int)mp->is_add == 0);
+
+ REPLY_MACRO(VL_API_DHCP_PROXY_SET_VSS_REPLY);
+}
+
+
+static void vl_api_dhcp_proxy_config_t_handler
+(vl_api_dhcp_proxy_config_t *mp)
+{
+ if (mp->is_ipv6 == 0)
+ dhcpv4_proxy_config (mp);
+ else
+ dhcpv6_proxy_config (mp);
+}
+
+static void vl_api_dhcp_proxy_config_2_t_handler
+(vl_api_dhcp_proxy_config_2_t *mp)
+{
+ if (mp->is_ipv6 == 0)
+ dhcpv4_proxy_config_2 (mp);
+ else
+ dhcpv6_proxy_config_2 (mp);
+}
+
+void dhcp_compl_event_callback (u32 client_index, u32 pid, u8 * hostname,
+ u8 is_ipv6, u8 * host_address, u8 * router_address, u8 * host_mac)
+{
+ unix_shared_memory_queue_t * q;
+ vl_api_dhcp_compl_event_t * mp;
+
+ q = vl_api_client_index_to_input_queue (client_index);
+ if (!q)
+ return;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ mp->client_index = client_index;
+ mp->pid = pid;
+ mp->is_ipv6 = is_ipv6;
+ clib_memcpy (&mp->hostname, hostname, vec_len(hostname));
+ mp->hostname[vec_len(hostname) + 1] = '\n';
+ clib_memcpy (&mp->host_address[0], host_address, 16);
+ clib_memcpy (&mp->router_address[0], router_address, 16);
+ clib_memcpy (&mp->host_mac[0], host_mac, 6);
+
+ mp->_vl_msg_id = ntohs (VL_API_DHCP_COMPL_EVENT);
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void vl_api_dhcp_client_config_t_handler
+(vl_api_dhcp_client_config_t *mp)
+{
+ vlib_main_t *vm = vlib_get_main();
+ vl_api_dhcp_client_config_reply_t * rmp;
+ int rv = 0;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ rv = dhcp_client_config(vm, ntohl(mp->sw_if_index),
+ mp->hostname, mp->is_add, mp->client_index,
+ mp->want_dhcp_event ? dhcp_compl_event_callback : NULL,
+ mp->pid);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO(VL_API_DHCP_CLIENT_CONFIG_REPLY);
+}
+
+static void
+vl_api_sw_interface_ip6nd_ra_config_t_handler
+(vl_api_sw_interface_ip6nd_ra_config_t *mp, vlib_main_t *vm)
+{
+ vl_api_sw_interface_ip6nd_ra_config_reply_t * rmp;
+ int rv = 0;
+ u8 is_no, surpress, managed, other, ll_option, send_unicast, cease, default_router;
+
+ is_no = mp->is_no == 1;
+ surpress = mp->surpress == 1;
+ managed = mp->managed == 1;
+ other = mp->other == 1;
+ ll_option = mp->ll_option == 1;
+ send_unicast = mp->send_unicast == 1;
+ cease = mp->cease == 1;
+ default_router = mp->default_router == 1;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ rv = ip6_neighbor_ra_config(vm, ntohl(mp->sw_if_index),
+ surpress, managed, other,
+ ll_option, send_unicast, cease,
+ default_router, ntohl (mp->lifetime),
+ ntohl(mp->initial_count), ntohl(mp->initial_interval),
+ ntohl(mp->max_interval), ntohl( mp->min_interval),
+ is_no);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO(VL_API_SW_INTERFACE_IP6ND_RA_CONFIG_REPLY);
+}
+
+static void
+vl_api_sw_interface_ip6nd_ra_prefix_t_handler
+(vl_api_sw_interface_ip6nd_ra_prefix_t *mp, vlib_main_t *vm)
+{
+ vl_api_sw_interface_ip6nd_ra_prefix_reply_t * rmp;
+ int rv = 0;
+ u8 is_no, use_default, no_advertise, off_link, no_autoconfig, no_onlink;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ is_no = mp->is_no == 1;
+ use_default = mp->use_default == 1;
+ no_advertise = mp->no_advertise == 1;
+ off_link = mp->off_link == 1;
+ no_autoconfig = mp->no_autoconfig == 1;
+ no_onlink = mp->no_onlink == 1;
+
+ rv = ip6_neighbor_ra_prefix(vm, ntohl(mp->sw_if_index),
+ (ip6_address_t *)mp->address, mp->address_length,
+ use_default, ntohl(mp->val_lifetime), ntohl(mp->pref_lifetime),
+ no_advertise, off_link, no_autoconfig, no_onlink,
+ is_no);
+
+ BAD_SW_IF_INDEX_LABEL;
+ REPLY_MACRO(VL_API_SW_INTERFACE_IP6ND_RA_PREFIX_REPLY);
+}
+
+static void
+vl_api_sw_interface_ip6_enable_disable_t_handler
+(vl_api_sw_interface_ip6_enable_disable_t *mp, vlib_main_t *vm)
+{
+ vl_api_sw_interface_ip6_enable_disable_reply_t * rmp;
+ vnet_main_t * vnm = vnet_get_main();
+ int rv = 0;
+ clib_error_t * error;
+
+ vnm->api_errno = 0;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ error = ( mp->enable == 1) ? enable_ip6_interface(vm,ntohl(mp->sw_if_index)) :
+ disable_ip6_interface(vm,ntohl(mp->sw_if_index));
+
+ if (error) {
+ clib_error_report(error);
+ rv = VNET_API_ERROR_UNSPECIFIED;
+ } else {
+ rv = vnm->api_errno;
+ }
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO(VL_API_SW_INTERFACE_IP6_ENABLE_DISABLE_REPLY);
+}
+
+static void
+vl_api_sw_interface_ip6_set_link_local_address_t_handler
+(vl_api_sw_interface_ip6_set_link_local_address_t *mp, vlib_main_t *vm)
+{
+ vl_api_sw_interface_ip6_set_link_local_address_reply_t * rmp;
+ int rv = 0;
+ clib_error_t * error;
+ vnet_main_t * vnm = vnet_get_main();
+
+ vnm->api_errno = 0;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ error = set_ip6_link_local_address(vm,
+ ntohl(mp->sw_if_index),
+ (ip6_address_t *)mp->address,
+ mp->address_length);
+ if (error) {
+ clib_error_report(error);
+ rv = VNET_API_ERROR_UNSPECIFIED;
+ } else {
+ rv = vnm->api_errno;
+ }
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO(VL_API_SW_INTERFACE_IP6_SET_LINK_LOCAL_ADDRESS_REPLY);
+}
+
+static void set_ip6_flow_hash (vl_api_set_ip_flow_hash_t *mp)
+{
+ vl_api_set_ip_flow_hash_reply_t *rmp;
+ int rv = VNET_API_ERROR_UNIMPLEMENTED;
+
+ clib_warning ("unimplemented...");
+
+ REPLY_MACRO(VL_API_SET_IP_FLOW_HASH_REPLY);
+}
+
+static void set_ip4_flow_hash (vl_api_set_ip_flow_hash_t *mp)
+{
+ vl_api_set_ip_flow_hash_reply_t *rmp;
+ int rv;
+ u32 table_id;
+ u32 flow_hash_config = 0;
+
+ table_id = ntohl(mp->vrf_id);
+
+#define _(a,b) if (mp->a) flow_hash_config |= b;
+ foreach_flow_hash_bit;
+#undef _
+
+ rv = vnet_set_ip4_flow_hash (table_id, flow_hash_config);
+
+ REPLY_MACRO(VL_API_SET_IP_FLOW_HASH_REPLY);
+}
+
+
+static void vl_api_set_ip_flow_hash_t_handler
+(vl_api_set_ip_flow_hash_t *mp)
+{
+ if (mp->is_ipv6 == 0)
+ set_ip4_flow_hash (mp);
+ else
+ set_ip6_flow_hash (mp);
+}
+
+static void vl_api_sw_interface_set_unnumbered_t_handler
+(vl_api_sw_interface_set_unnumbered_t *mp)
+{
+ vl_api_sw_interface_set_unnumbered_reply_t * rmp;
+ int rv = 0;
+ vnet_sw_interface_t * si;
+ vnet_main_t *vnm = vnet_get_main();
+ u32 sw_if_index, unnumbered_sw_if_index;
+
+ sw_if_index = ntohl(mp->sw_if_index);
+ unnumbered_sw_if_index = ntohl(mp->unnumbered_sw_if_index);
+
+ /*
+ * The API message field names are backwards from
+ * the underlying data structure names.
+ * It's not worth changing them now.
+ */
+ if (pool_is_free_index (vnm->interface_main.sw_interfaces,
+ unnumbered_sw_if_index)) {
+ rv = VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ goto done;
+ }
+
+ /* Only check the "use loop0" field when setting the binding */
+ if (mp->is_add &&
+ pool_is_free_index (vnm->interface_main.sw_interfaces, sw_if_index)) {
+ rv = VNET_API_ERROR_INVALID_SW_IF_INDEX_2;
+ goto done;
+ }
+
+ si = vnet_get_sw_interface (vnm, unnumbered_sw_if_index);
+
+ if (mp->is_add) {
+ si->flags |= VNET_SW_INTERFACE_FLAG_UNNUMBERED;
+ si->unnumbered_sw_if_index = sw_if_index;
+ } else {
+ si->flags &= ~(VNET_SW_INTERFACE_FLAG_UNNUMBERED);
+ si->unnumbered_sw_if_index = (u32)~0;
+ }
+
+ done:
+ REPLY_MACRO(VL_API_SW_INTERFACE_SET_UNNUMBERED_REPLY);
+}
+
+static void vl_api_create_loopback_t_handler
+(vl_api_create_loopback_t *mp)
+{
+ vl_api_create_loopback_reply_t * rmp;
+ u32 sw_if_index;
+ int rv;
+
+ rv = vnet_create_loopback_interface (&sw_if_index, mp->mac_address);
+
+ REPLY_MACRO2(VL_API_CREATE_LOOPBACK_REPLY,
+ ({
+ rmp->sw_if_index = ntohl (sw_if_index);
+ }));
+}
+
+static void vl_api_delete_loopback_t_handler
+(vl_api_delete_loopback_t *mp)
+{
+ vl_api_delete_loopback_reply_t * rmp;
+ u32 sw_if_index;
+ int rv;
+
+ sw_if_index = ntohl (mp->sw_if_index);
+ rv = vnet_delete_loopback_interface (sw_if_index);
+
+ REPLY_MACRO(VL_API_DELETE_LOOPBACK_REPLY);
+}
+
+static void vl_api_control_ping_t_handler
+(vl_api_control_ping_t *mp)
+{
+ vl_api_control_ping_reply_t * rmp;
+ int rv = 0;
+
+ REPLY_MACRO2(VL_API_CONTROL_PING_REPLY,
+ ({
+ rmp->vpe_pid = ntohl (getpid());
+ }));
+}
+
+static void shmem_cli_output (uword arg, u8 * buffer, uword buffer_bytes)
+{
+ u8 **shmem_vecp = (u8 **)arg;
+ u8 *shmem_vec;
+ void *oldheap;
+ api_main_t * am = &api_main;
+ u32 offset;
+
+ shmem_vec = *shmem_vecp;
+
+ offset = vec_len (shmem_vec);
+
+ pthread_mutex_lock (&am->vlib_rp->mutex);
+ oldheap = svm_push_data_heap (am->vlib_rp);
+
+ vec_validate (shmem_vec, offset + buffer_bytes - 1);
+
+ clib_memcpy (shmem_vec + offset, buffer, buffer_bytes);
+
+ svm_pop_heap (oldheap);
+ pthread_mutex_unlock (&am->vlib_rp->mutex);
+
+ *shmem_vecp = shmem_vec;
+}
+
+
+static void vl_api_cli_request_t_handler
+(vl_api_cli_request_t *mp)
+{
+ vl_api_cli_reply_t *rp;
+ unix_shared_memory_queue_t *q;
+ vlib_main_t * vm = vlib_get_main();
+ api_main_t * am = &api_main;
+ unformat_input_t input;
+ u8 *shmem_vec=0;
+ void *oldheap;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ rp = vl_msg_api_alloc (sizeof (*rp));
+ rp->_vl_msg_id = ntohs(VL_API_CLI_REPLY);
+ rp->context = mp->context;
+
+ unformat_init_vector (&input, (u8 *)(uword)mp->cmd_in_shmem);
+
+ vlib_cli_input (vm, &input, shmem_cli_output,
+ (uword)&shmem_vec);
+
+ pthread_mutex_lock (&am->vlib_rp->mutex);
+ oldheap = svm_push_data_heap (am->vlib_rp);
+
+ vec_add1(shmem_vec, 0);
+
+ svm_pop_heap (oldheap);
+ pthread_mutex_unlock (&am->vlib_rp->mutex);
+
+ rp->reply_in_shmem = (uword)shmem_vec;
+
+ vl_msg_api_send_shmem (q, (u8 *)&rp);
+}
+
+static void vl_api_set_arp_neighbor_limit_t_handler (vl_api_set_arp_neighbor_limit_t *mp)
+{
+ int rv;
+ vl_api_set_arp_neighbor_limit_reply_t * rmp;
+ vnet_main_t *vnm = vnet_get_main();
+ clib_error_t * error;
+
+ vnm->api_errno = 0;
+
+ if (mp->is_ipv6)
+ error = ip6_set_neighbor_limit (ntohl(mp->arp_neighbor_limit));
+ else
+ error = ip4_set_arp_limit (ntohl(mp->arp_neighbor_limit));
+
+ if (error) {
+ clib_error_report(error);
+ rv = VNET_API_ERROR_UNSPECIFIED;
+ } else {
+ rv = vnm->api_errno;
+ }
+
+ REPLY_MACRO(VL_API_SET_ARP_NEIGHBOR_LIMIT_REPLY);
+}
+
+static void vl_api_sr_tunnel_add_del_t_handler
+(vl_api_sr_tunnel_add_del_t *mp)
+{
+#if IPV6SR == 0
+ clib_warning ("unimplemented");
+#else
+ ip6_sr_add_del_tunnel_args_t _a, *a=&_a;
+ int rv = 0;
+ vl_api_sr_tunnel_add_del_reply_t * rmp;
+ ip6_address_t * segments = 0, * seg;
+ ip6_address_t * tags = 0, *tag;
+ ip6_address_t * this_address;
+ int i;
+
+ if (mp->n_segments == 0) {
+ rv = -11;
+ goto out;
+ }
+
+ memset (a, 0, sizeof (*a));
+ a->src_address = (ip6_address_t *)&mp->src_address;
+ a->dst_address = (ip6_address_t *)&mp->dst_address;
+ a->dst_mask_width = mp->dst_mask_width;
+ a->flags_net_byte_order = mp->flags_net_byte_order;
+ a->is_del = (mp->is_add == 0);
+ a->rx_table_id = ntohl(mp->outer_vrf_id);
+ a->tx_table_id = ntohl(mp->inner_vrf_id);
+
+ a->name = format(0, "%s", mp->name);
+ if (!(vec_len(a->name)))
+ a->name = 0;
+
+ a->policy_name = format(0, "%s", mp->policy_name);
+ if (!(vec_len(a->policy_name)))
+ a->policy_name = 0;
+
+ /* Yank segments and tags out of the API message */
+ this_address = (ip6_address_t *)mp->segs_and_tags;
+ for (i = 0; i < mp->n_segments; i++) {
+ vec_add2 (segments, seg, 1);
+ clib_memcpy (seg->as_u8, this_address->as_u8, sizeof (*this_address));
+ this_address++;
+ }
+ for (i = 0; i < mp->n_tags; i++) {
+ vec_add2 (tags, tag, 1);
+ clib_memcpy (tag->as_u8, this_address->as_u8, sizeof (*this_address));
+ this_address++;
+ }
+
+ a->segments = segments;
+ a->tags = tags;
+
+ rv = ip6_sr_add_del_tunnel (a);
+
+out:
+
+ REPLY_MACRO(VL_API_SR_TUNNEL_ADD_DEL_REPLY);
+#endif
+}
+
+static void vl_api_sr_policy_add_del_t_handler
+(vl_api_sr_policy_add_del_t *mp)
+{
+#if IPV6SR == 0
+ clib_warning ("unimplemented");
+#else
+ ip6_sr_add_del_policy_args_t _a, *a=&_a;
+ int rv = 0;
+ vl_api_sr_policy_add_del_reply_t * rmp;
+ int i;
+
+ memset (a, 0, sizeof (*a));
+ a->is_del = (mp->is_add == 0);
+
+ a->name = format(0, "%s", mp->name);
+ if (!(vec_len(a->name)))
+ {
+ rv = VNET_API_ERROR_NO_SUCH_NODE2;
+ goto out;
+ }
+
+ if (!(mp->tunnel_names[0]))
+ {
+ rv = VNET_API_ERROR_NO_SUCH_NODE2;
+ goto out;
+ }
+
+ // start deserializing tunnel_names
+ int num_tunnels = mp->tunnel_names[0]; //number of tunnels
+ u8 * deser_tun_names = mp->tunnel_names;
+ deser_tun_names += 1; //moving along
+
+ u8 * tun_name = 0;
+ int tun_name_len = 0;
+
+ for (i=0; i < num_tunnels; i++)
+ {
+ tun_name_len= *deser_tun_names;
+ deser_tun_names += 1;
+ vec_resize (tun_name, tun_name_len);
+ memcpy(tun_name, deser_tun_names, tun_name_len);
+ vec_add1 (a->tunnel_names, tun_name);
+ deser_tun_names += tun_name_len;
+ tun_name = 0;
+ }
+
+ rv = ip6_sr_add_del_policy (a);
+
+out:
+
+ REPLY_MACRO(VL_API_SR_POLICY_ADD_DEL_REPLY);
+#endif
+}
+
+static void vl_api_sr_multicast_map_add_del_t_handler
+(vl_api_sr_multicast_map_add_del_t *mp)
+{
+#if IPV6SR == 0
+ clib_warning ("unimplemented");
+#else
+ ip6_sr_add_del_multicastmap_args_t _a, *a=&_a;
+ int rv = 0;
+ vl_api_sr_multicast_map_add_del_reply_t * rmp;
+
+ memset (a, 0, sizeof (*a));
+ a->is_del = (mp->is_add == 0);
+
+ a->multicast_address = (ip6_address_t *)&mp->multicast_address;
+ a->policy_name = format(0, "%s", mp->policy_name);
+
+ if (a->multicast_address == 0)
+ {
+ rv = -1 ;
+ goto out;
+ }
+
+ if (!(a->policy_name))
+ {
+ rv = -2 ;
+ goto out;
+ }
+
+#if DPDK > 0 /* Cannot call replicate without DPDK */
+ rv = ip6_sr_add_del_multicastmap (a);
+#else
+ clib_warning ("multicast replication without DPDK not implemented");
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif /* DPDK */
+
+out:
+
+ REPLY_MACRO(VL_API_SR_MULTICAST_MAP_ADD_DEL_REPLY);
+#endif
+}
+
+#define foreach_classify_add_del_table_field \
+_(table_index) \
+_(nbuckets) \
+_(memory_size) \
+_(skip_n_vectors) \
+_(match_n_vectors) \
+_(next_table_index) \
+_(miss_next_index)
+
+static void vl_api_classify_add_del_table_t_handler
+(vl_api_classify_add_del_table_t * mp)
+{
+ vl_api_classify_add_del_table_reply_t * rmp;
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ vnet_classify_table_t * t;
+ int rv;
+
+#define _(a) u32 a;
+ foreach_classify_add_del_table_field;
+#undef _
+
+#define _(a) a = ntohl(mp->a);
+ foreach_classify_add_del_table_field;
+#undef _
+
+ /* The underlying API fails silently, on purpose, so check here */
+ if (mp->is_add == 0)
+ if (pool_is_free_index (cm->tables, table_index)) {
+ rv = VNET_API_ERROR_NO_SUCH_TABLE;
+ goto out;
+ }
+
+ rv = vnet_classify_add_del_table
+ (cm, mp->mask, nbuckets, memory_size,
+ skip_n_vectors, match_n_vectors,
+ next_table_index, miss_next_index,
+ &table_index, mp->is_add);
+
+out:
+ REPLY_MACRO2(VL_API_CLASSIFY_ADD_DEL_TABLE_REPLY,
+ ({
+ if (rv == 0 && mp->is_add) {
+ t = pool_elt_at_index (cm->tables, table_index);
+ rmp->skip_n_vectors = ntohl(t->skip_n_vectors);
+ rmp->match_n_vectors = ntohl(t->match_n_vectors);
+ rmp->new_table_index = ntohl(table_index);
+ } else {
+ rmp->skip_n_vectors = ~0;
+ rmp->match_n_vectors = ~0;
+ rmp->new_table_index = ~0;
+ }
+ }));
+}
+
+static void vl_api_classify_add_del_session_t_handler
+(vl_api_classify_add_del_session_t * mp)
+{
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ vl_api_classify_add_del_session_reply_t * rmp;
+ int rv;
+ u32 table_index, hit_next_index, opaque_index;
+ i32 advance;
+
+ table_index = ntohl (mp->table_index);
+ hit_next_index = ntohl (mp->hit_next_index);
+ opaque_index = ntohl (mp->opaque_index);
+ advance = ntohl (mp->advance);
+
+ rv = vnet_classify_add_del_session
+ (cm, table_index, mp->match, hit_next_index, opaque_index,
+ advance, mp->is_add);
+
+ REPLY_MACRO(VL_API_CLASSIFY_ADD_DEL_SESSION_REPLY);
+}
+
+static void vl_api_classify_set_interface_ip_table_t_handler
+(vl_api_classify_set_interface_ip_table_t * mp)
+{
+ vlib_main_t * vm = vlib_get_main();
+ vl_api_classify_set_interface_ip_table_reply_t * rmp;
+ int rv;
+ u32 table_index, sw_if_index;
+
+ table_index = ntohl (mp->table_index);
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ if (mp->is_ipv6)
+ rv = vnet_set_ip6_classify_intfc (vm, sw_if_index, table_index);
+ else
+ rv = vnet_set_ip4_classify_intfc (vm, sw_if_index, table_index);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO(VL_API_CLASSIFY_SET_INTERFACE_IP_TABLE_REPLY);
+}
+
+static void vl_api_classify_set_interface_l2_tables_t_handler
+(vl_api_classify_set_interface_l2_tables_t * mp)
+{
+ vl_api_classify_set_interface_l2_tables_reply_t * rmp;
+ int rv;
+ u32 sw_if_index, ip4_table_index, ip6_table_index, other_table_index;
+ int enable;
+
+ ip4_table_index = ntohl(mp->ip4_table_index);
+ ip6_table_index = ntohl(mp->ip6_table_index);
+ other_table_index = ntohl(mp->other_table_index);
+ sw_if_index = ntohl(mp->sw_if_index);
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ rv = vnet_l2_classify_set_tables (sw_if_index, ip4_table_index,
+ ip6_table_index, other_table_index);
+
+ if (rv == 0) {
+ if (ip4_table_index != ~0 || ip6_table_index != ~0
+ || other_table_index != ~0)
+ enable = 1;
+ else
+ enable = 0;
+
+ vnet_l2_classify_enable_disable (sw_if_index, enable);
+ }
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO(VL_API_CLASSIFY_SET_INTERFACE_L2_TABLES_REPLY);
+}
+
+static void
+vl_api_l2_fib_clear_table_t_handler (vl_api_l2_fib_clear_table_t *mp)
+{
+ int rv = 0;
+ vl_api_l2_fib_clear_table_reply_t * rmp;
+
+ /* DAW-FIXME: This API should only clear non-static l2fib entries, but
+ * that is not currently implemented. When that TODO is fixed
+ * this call should be changed to pass 1 instead of 0.
+ */
+ l2fib_clear_table (0);
+
+ REPLY_MACRO(VL_API_L2_FIB_CLEAR_TABLE_REPLY);
+}
+
+extern void l2_efp_filter_configure(vnet_main_t * vnet_main,
+ u32 sw_if_index,
+ u32 enable);
+
+static void
+vl_api_l2_interface_efp_filter_t_handler (vl_api_l2_interface_efp_filter_t *mp)
+{
+ int rv;
+ vl_api_l2_interface_efp_filter_reply_t * rmp;
+ vnet_main_t *vnm = vnet_get_main();
+
+ // enable/disable the feature
+ l2_efp_filter_configure (vnm, mp->sw_if_index, mp->enable_disable);
+ rv = vnm->api_errno;
+
+ REPLY_MACRO(VL_API_L2_INTERFACE_EFP_FILTER_REPLY);
+}
+
+static void
+vl_api_l2_interface_vlan_tag_rewrite_t_handler (vl_api_l2_interface_vlan_tag_rewrite_t *mp)
+{
+ int rv = 0;
+ vl_api_l2_interface_vlan_tag_rewrite_reply_t * rmp;
+ vnet_main_t * vnm = vnet_get_main();
+ vlib_main_t * vm = vlib_get_main();
+ u32 vtr_op;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ vtr_op = ntohl(mp->vtr_op);
+
+ /* The L2 code is unsuspicious */
+ switch(vtr_op) {
+ case L2_VTR_DISABLED:
+ case L2_VTR_PUSH_1:
+ case L2_VTR_PUSH_2:
+ case L2_VTR_POP_1:
+ case L2_VTR_POP_2:
+ case L2_VTR_TRANSLATE_1_1:
+ case L2_VTR_TRANSLATE_1_2:
+ case L2_VTR_TRANSLATE_2_1:
+ case L2_VTR_TRANSLATE_2_2:
+ break;
+
+ default:
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto bad_sw_if_index;
+ }
+
+ rv = l2vtr_configure (vm, vnm, ntohl(mp->sw_if_index), vtr_op,
+ ntohl(mp->push_dot1q), ntohl(mp->tag1),
+ ntohl(mp->tag2));
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO(VL_API_L2_INTERFACE_VLAN_TAG_REWRITE_REPLY);
+}
+
+static void
+vl_api_create_vhost_user_if_t_handler (vl_api_create_vhost_user_if_t *mp)
+{
+#if DPDK > 0
+ int rv = 0;
+ vl_api_create_vhost_user_if_reply_t * rmp;
+ u32 sw_if_index = (u32)~0;
+
+ vnet_main_t * vnm = vnet_get_main();
+ vlib_main_t * vm = vlib_get_main();
+
+ rv = dpdk_vhost_user_create_if(vnm, vm, (char *)mp->sock_filename,
+ mp->is_server, &sw_if_index, (u64)~0,
+ mp->renumber, ntohl(mp->custom_dev_instance),
+ (mp->use_custom_mac)?mp->mac_address:NULL);
+
+ REPLY_MACRO2(VL_API_CREATE_VHOST_USER_IF_REPLY,
+ ({
+ rmp->sw_if_index = ntohl (sw_if_index);
+ }));
+#endif
+}
+
+static void
+vl_api_modify_vhost_user_if_t_handler (vl_api_modify_vhost_user_if_t *mp)
+{
+#if DPDK > 0
+ int rv = 0;
+ vl_api_modify_vhost_user_if_reply_t * rmp;
+ u32 sw_if_index = ntohl(mp->sw_if_index);
+
+ vnet_main_t * vnm = vnet_get_main();
+ vlib_main_t * vm = vlib_get_main();
+
+ rv = dpdk_vhost_user_modify_if(vnm, vm, (char *)mp->sock_filename,
+ mp->is_server, sw_if_index, (u64)~0,
+ mp->renumber, ntohl(mp->custom_dev_instance));
+
+ REPLY_MACRO(VL_API_MODIFY_VHOST_USER_IF_REPLY);
+#endif
+}
+
+static void
+vl_api_delete_vhost_user_if_t_handler (vl_api_delete_vhost_user_if_t *mp)
+{
+#if DPDK > 0
+ int rv = 0;
+ vpe_api_main_t * vam = &vpe_api_main;
+ vl_api_delete_vhost_user_if_reply_t * rmp;
+ u32 sw_if_index = ntohl(mp->sw_if_index);
+
+ vnet_main_t * vnm = vnet_get_main();
+ vlib_main_t * vm = vlib_get_main();
+
+ rv = dpdk_vhost_user_delete_if(vnm, vm, sw_if_index);
+
+ REPLY_MACRO(VL_API_DELETE_VHOST_USER_IF_REPLY);
+ if (!rv) {
+ unix_shared_memory_queue_t * q =
+ vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ send_sw_interface_flags_deleted (vam, q, sw_if_index);
+ }
+#endif
+}
+
+static void vl_api_sw_interface_vhost_user_details_t_handler (
+ vl_api_sw_interface_vhost_user_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+#if DPDK > 0
+static void send_sw_interface_vhost_user_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t *q,
+ vhost_user_intf_details_t * vui,
+ u32 context)
+{
+ vl_api_sw_interface_vhost_user_details_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs(VL_API_SW_INTERFACE_VHOST_USER_DETAILS);
+ mp->sw_if_index = ntohl(vui->sw_if_index);
+ mp->virtio_net_hdr_sz = ntohl (vui->virtio_net_hdr_sz);
+ mp->features = clib_net_to_host_u64 (vui->features);
+ mp->is_server = vui->is_server;
+ mp->num_regions = ntohl(vui->num_regions);
+ mp->sock_errno = ntohl(vui->sock_errno);
+ mp->context = context;
+
+ strncpy ((char *) mp->sock_filename,
+ (char *) vui->sock_filename, ARRAY_LEN(mp->sock_filename)-1);
+ strncpy ((char *) mp->interface_name,
+ (char *) vui->if_name, ARRAY_LEN(mp->interface_name)-1);
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+#endif
+
+static void
+vl_api_sw_interface_vhost_user_dump_t_handler (
+ vl_api_sw_interface_vhost_user_dump_t *mp)
+{
+#if DPDK > 0
+ int rv = 0;
+ vpe_api_main_t * am = &vpe_api_main;
+ vnet_main_t * vnm = vnet_get_main();
+ vlib_main_t * vm = vlib_get_main();
+ vhost_user_intf_details_t *ifaces = NULL;
+ vhost_user_intf_details_t *vuid = NULL;
+ unix_shared_memory_queue_t * q;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ rv = dpdk_vhost_user_dump_ifs(vnm, vm, &ifaces);
+ if (rv)
+ return;
+
+ vec_foreach (vuid, ifaces) {
+ send_sw_interface_vhost_user_details (am, q, vuid, mp->context);
+ }
+ vec_free(ifaces);
+#endif
+}
+
+static void send_sw_if_l2tpv3_tunnel_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t *q,
+ l2t_session_t *s,
+ l2t_main_t * lm,
+ u32 context)
+{
+ vl_api_sw_if_l2tpv3_tunnel_details_t * mp;
+ u8 * if_name = NULL;
+ vnet_sw_interface_t * si = NULL;
+
+ si = vnet_get_hw_sw_interface (lm->vnet_main, s->hw_if_index);
+
+ if_name = format(if_name, "%U",
+ format_vnet_sw_interface_name, lm->vnet_main, si);
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs(VL_API_SW_IF_L2TPV3_TUNNEL_DETAILS);
+ strncpy((char *)mp->interface_name,
+ (char *)if_name, ARRAY_LEN(mp->interface_name)-1);
+ mp->sw_if_index = ntohl(si->sw_if_index);
+ mp->local_session_id = s->local_session_id;
+ mp->remote_session_id = s->remote_session_id;
+ mp->local_cookie[0] = s->local_cookie[0];
+ mp->local_cookie[1] = s->local_cookie[1];
+ mp->remote_cookie = s->remote_cookie;
+ clib_memcpy(mp->client_address, &s->client_address, sizeof(s->client_address));
+ clib_memcpy(mp->our_address, &s->our_address, sizeof(s->our_address));
+ mp->l2_sublayer_present = s->l2_sublayer_present;
+ mp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void send_ip_address_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t * q,
+ u8 * ip,
+ u16 prefix_length,
+ u8 is_ipv6,
+ u32 context)
+{
+ vl_api_ip_address_details_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs(VL_API_IP_ADDRESS_DETAILS);
+
+ if (is_ipv6) {
+ clib_memcpy(&mp->ip, ip, sizeof(mp->ip));
+ } else {
+ u32 * tp = (u32 *)mp->ip;
+ *tp = ntohl(*(u32*)ip);
+ }
+ mp->prefix_length = prefix_length;
+ mp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void
+vl_api_ip_address_dump_t_handler (vl_api_ip_address_dump_t *mp)
+{
+ vpe_api_main_t * am = &vpe_api_main;
+ unix_shared_memory_queue_t * q;
+ ip6_address_t * r6;
+ ip4_address_t * r4;
+ ip6_main_t * im6 = &ip6_main;
+ ip4_main_t * im4 = &ip4_main;
+ ip_lookup_main_t * lm6 = &im6->lookup_main;
+ ip_lookup_main_t * lm4 = &im4->lookup_main;
+ ip_interface_address_t * ia = 0;
+ u32 sw_if_index = ~0;
+ int rv __attribute__ ((unused)) = 0;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ sw_if_index = ntohl(mp->sw_if_index);
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0) {
+ return;
+ }
+
+ if (mp->is_ipv6) {
+ foreach_ip_interface_address (lm6, ia, sw_if_index,
+ 1 /* honor unnumbered */,
+ ({
+ r6 = ip_interface_address_get_address (lm6, ia);
+ u16 prefix_length = ia->address_length;
+ send_ip_address_details(am, q, (u8*)r6, prefix_length, 1, mp->context);
+ }));
+ } else {
+ foreach_ip_interface_address (lm4, ia, sw_if_index,
+ 1 /* honor unnumbered */,
+ ({
+ r4 = ip_interface_address_get_address (lm4, ia);
+ u16 prefix_length = ia->address_length;
+ send_ip_address_details(am, q, (u8*)r4, prefix_length, 0, mp->context);
+ }));
+ }
+ BAD_SW_IF_INDEX_LABEL;
+}
+
+static void send_ip_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t *q,
+ u32 sw_if_index,
+ u32 context)
+{
+ vl_api_ip_details_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs(VL_API_IP_DETAILS);
+
+ mp->sw_if_index = ntohl(sw_if_index);
+ mp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void
+vl_api_sw_if_l2tpv3_tunnel_dump_t_handler (
+ vl_api_sw_if_l2tpv3_tunnel_dump_t *mp)
+{
+ vpe_api_main_t * am = &vpe_api_main;
+ l2t_main_t * lm = &l2t_main;
+ unix_shared_memory_queue_t * q;
+ l2t_session_t *session;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ pool_foreach (session, lm->sessions,
+ ({
+ send_sw_if_l2tpv3_tunnel_details (am, q, session, lm, mp->context);
+ }));
+}
+
+
+static void send_sw_interface_tap_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t *q,
+ tapcli_interface_details_t *tap_if,
+ u32 context)
+{
+ vl_api_sw_interface_tap_details_t * mp;
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs(VL_API_SW_INTERFACE_TAP_DETAILS);
+ mp->sw_if_index = ntohl(tap_if->sw_if_index);
+ strncpy((char *)mp->dev_name,
+ (char *)tap_if->dev_name, ARRAY_LEN(mp->dev_name)-1);
+ mp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void
+vl_api_sw_interface_tap_dump_t_handler (
+ vl_api_sw_interface_tap_dump_t *mp)
+{
+ int rv = 0;
+ vpe_api_main_t * am = &vpe_api_main;
+ unix_shared_memory_queue_t * q;
+ tapcli_interface_details_t *tapifs = NULL;
+ tapcli_interface_details_t *tap_if = NULL;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ rv = vnet_tap_dump_ifs(&tapifs);
+ if (rv)
+ return;
+
+ vec_foreach(tap_if, tapifs) {
+ send_sw_interface_tap_details(am, q, tap_if, mp->context);
+ }
+
+ vec_free(tapifs);
+}
+
+static void
+vl_api_ip_dump_t_handler (vl_api_ip_dump_t *mp)
+{
+ vpe_api_main_t * am = &vpe_api_main;
+ vnet_main_t * vnm = vnet_get_main();
+ vlib_main_t * vm = vlib_get_main();
+ vnet_interface_main_t * im = &vnm->interface_main;
+ unix_shared_memory_queue_t * q;
+ vnet_sw_interface_t * si, * sorted_sis;
+ u32 sw_if_index = ~0;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0) {
+ return;
+ }
+
+ /* Gather interfaces. */
+ sorted_sis = vec_new (vnet_sw_interface_t, pool_elts (im->sw_interfaces));
+ _vec_len (sorted_sis) = 0;
+ pool_foreach (si, im->sw_interfaces, ({ vec_add1 (sorted_sis, si[0]); }));
+
+ vec_foreach (si, sorted_sis) {
+ if (!(si->flags & VNET_SW_INTERFACE_FLAG_UNNUMBERED)) {
+ if (mp->is_ipv6 && !ip6_interface_enabled(vm, si->sw_if_index)) {
+ continue;
+ }
+ sw_if_index = si->sw_if_index;
+ send_ip_details(am, q, sw_if_index, mp->context);
+ }
+ }
+}
+
+static void vl_api_l2_fib_table_entry_t_handler (
+ vl_api_l2_fib_table_entry_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void send_l2fib_table_entry (vpe_api_main_t * am,
+ unix_shared_memory_queue_t *q,
+ l2fib_entry_key_t * l2fe_key,
+ l2fib_entry_result_t * l2fe_res,
+ u32 context)
+{
+ vl_api_l2_fib_table_entry_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs(VL_API_L2_FIB_TABLE_ENTRY);
+
+ mp->bd_id = ntohl(l2input_main.bd_configs[l2fe_key->fields.bd_index].bd_id);
+
+ mp->mac = l2fib_make_key (l2fe_key->fields.mac, 0);
+ mp->sw_if_index = ntohl(l2fe_res->fields.sw_if_index);
+ mp->static_mac = l2fe_res->fields.static_mac;
+ mp->filter_mac = l2fe_res->fields.filter;
+ mp->bvi_mac = l2fe_res->fields.bvi;
+ mp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void
+vl_api_l2_fib_table_dump_t_handler (vl_api_l2_fib_table_dump_t *mp)
+{
+ vpe_api_main_t * am = &vpe_api_main;
+ bd_main_t * bdm = &bd_main;
+ l2fib_entry_key_t *l2fe_key = NULL;
+ l2fib_entry_result_t *l2fe_res = NULL;
+ u32 ni, bd_id = ntohl (mp->bd_id);
+ u32 bd_index;
+ unix_shared_memory_queue_t * q;
+ uword * p;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ /* see l2fib_table_dump: ~0 means "any" */
+ if (bd_id == ~0)
+ bd_index = ~0;
+ else {
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+ if (p == 0)
+ return;
+
+ bd_index = p[0];
+ }
+
+ l2fib_table_dump (bd_index, &l2fe_key, &l2fe_res);
+
+ vec_foreach_index (ni, l2fe_key) {
+ send_l2fib_table_entry (am, q, vec_elt_at_index(l2fe_key, ni),
+ vec_elt_at_index(l2fe_res, ni), mp->context);
+ }
+ vec_free(l2fe_key);
+ vec_free(l2fe_res);
+}
+
+static void
+vl_api_show_version_t_handler (vl_api_show_version_t *mp)
+{
+ vl_api_show_version_reply_t *rmp;
+ int rv = 0;
+ char * vpe_api_get_build_directory(void);
+ char * vpe_api_get_version(void);
+ char * vpe_api_get_build_date(void);
+
+ unix_shared_memory_queue_t * q =
+ vl_api_client_index_to_input_queue (mp->client_index);
+
+ if (!q)
+ return;
+
+ REPLY_MACRO2(VL_API_SHOW_VERSION_REPLY,
+ ({
+ strncpy ((char *) rmp->program, "vpe", ARRAY_LEN(rmp->program)-1);
+ strncpy ((char *) rmp->build_directory, vpe_api_get_build_directory(),
+ ARRAY_LEN(rmp->build_directory)-1);
+ strncpy ((char *) rmp->version, vpe_api_get_version(),
+ ARRAY_LEN(rmp->version)-1);
+ strncpy ((char *) rmp->build_date, vpe_api_get_build_date(),
+ ARRAY_LEN(rmp->build_date)-1);
+ }));
+}
+
+static void vl_api_get_node_index_t_handler
+(vl_api_get_node_index_t * mp)
+{
+ vlib_main_t * vm = vlib_get_main();
+ vl_api_get_node_index_reply_t * rmp;
+ vlib_node_t * n;
+ int rv = 0;
+ u32 node_index = ~0;
+
+ n = vlib_get_node_by_name (vm, mp->node_name);
+
+ if (n == 0)
+ rv = VNET_API_ERROR_NO_SUCH_NODE;
+ else
+ node_index = n->index;
+
+ REPLY_MACRO2(VL_API_GET_NODE_INDEX_REPLY,
+ ({
+ rmp->node_index = ntohl(node_index);
+ }))
+}
+
+static void vl_api_add_node_next_t_handler
+(vl_api_add_node_next_t * mp)
+{
+ vlib_main_t * vm = vlib_get_main();
+ vl_api_add_node_next_reply_t * rmp;
+ vlib_node_t * n, * next;
+ int rv = 0;
+ u32 next_index = ~0;
+
+ n = vlib_get_node_by_name (vm, mp->node_name);
+
+ if (n == 0) {
+ rv = VNET_API_ERROR_NO_SUCH_NODE;
+ goto out;
+ }
+
+ next = vlib_get_node_by_name (vm, mp->next_name);
+
+ if (next == 0)
+ rv = VNET_API_ERROR_NO_SUCH_NODE2;
+ else
+ next_index = vlib_node_add_next (vm, n->index, next->index);
+
+out:
+ REPLY_MACRO2(VL_API_GET_NODE_INDEX_REPLY,
+ ({
+ rmp->next_index = ntohl(next_index);
+ }))
+}
+
+static void vl_api_l2tpv3_create_tunnel_t_handler
+(vl_api_l2tpv3_create_tunnel_t *mp)
+{
+ vl_api_l2tpv3_create_tunnel_reply_t * rmp;
+ l2t_main_t *lm = &l2t_main;
+ u32 sw_if_index = (u32)~0;
+ int rv;
+
+ if (mp->is_ipv6 != 1) {
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ goto out;
+ }
+
+ rv = create_l2tpv3_ipv6_tunnel (lm,
+ (ip6_address_t *) mp->client_address,
+ (ip6_address_t *) mp->our_address,
+ ntohl(mp->local_session_id),
+ ntohl(mp->remote_session_id),
+ clib_net_to_host_u64(mp->local_cookie),
+ clib_net_to_host_u64(mp->remote_cookie),
+ mp->l2_sublayer_present,
+ &sw_if_index);
+
+out:
+ REPLY_MACRO2(VL_API_L2TPV3_CREATE_TUNNEL_REPLY,
+ ({
+ rmp->sw_if_index = ntohl (sw_if_index);
+ }))
+}
+
+static void vl_api_l2tpv3_set_tunnel_cookies_t_handler
+(vl_api_l2tpv3_set_tunnel_cookies_t *mp)
+{
+ vl_api_l2tpv3_set_tunnel_cookies_reply_t * rmp;
+ l2t_main_t *lm = &l2t_main;
+ int rv;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ rv = l2tpv3_set_tunnel_cookies (lm, ntohl(mp->sw_if_index),
+ clib_net_to_host_u64(mp->new_local_cookie),
+ clib_net_to_host_u64(mp->new_remote_cookie));
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_L2TPV3_SET_TUNNEL_COOKIES_REPLY);
+}
+
+static void vl_api_l2tpv3_interface_enable_disable_t_handler
+(vl_api_l2tpv3_interface_enable_disable_t * mp)
+{
+ int rv;
+ vnet_main_t * vnm = vnet_get_main();
+ vl_api_l2tpv3_interface_enable_disable_reply_t * rmp;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ rv = l2tpv3_interface_enable_disable
+ (vnm, ntohl(mp->sw_if_index), mp->enable_disable);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_L2TPV3_INTERFACE_ENABLE_DISABLE_REPLY);
+}
+
+static void vl_api_l2tpv3_set_lookup_key_t_handler
+(vl_api_l2tpv3_set_lookup_key_t * mp)
+{
+ int rv = 0;
+ l2t_main_t *lm = &l2t_main;
+ vl_api_l2tpv3_set_lookup_key_reply_t * rmp;
+
+ if (mp->key > L2T_LOOKUP_SESSION_ID) {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto out;
+ }
+
+ lm->lookup_type = mp->key;
+
+out:
+ REPLY_MACRO (VL_API_L2TPV3_SET_LOOKUP_KEY_REPLY);
+}
+
+static void vl_api_vxlan_add_del_tunnel_t_handler
+(vl_api_vxlan_add_del_tunnel_t * mp)
+{
+ vl_api_vxlan_add_del_tunnel_reply_t * rmp;
+ int rv = 0;
+ vnet_vxlan_add_del_tunnel_args_t _a, *a = &_a;
+ u32 encap_fib_index;
+ uword * p;
+ ip4_main_t * im = &ip4_main;
+ u32 sw_if_index = ~0;
+
+ p = hash_get (im->fib_index_by_table_id, ntohl(mp->encap_vrf_id));
+ if (! p) {
+ rv = VNET_API_ERROR_NO_SUCH_FIB;
+ goto out;
+ }
+ encap_fib_index = p[0];
+
+ /* Check src & dst are different */
+ if ((mp->is_ipv6 && memcmp(mp->src_address, mp->dst_address, 16) == 0) ||
+ (!mp->is_ipv6 && memcmp(mp->src_address, mp->dst_address, 4) == 0)) {
+ rv = VNET_API_ERROR_SAME_SRC_DST;
+ goto out;
+ }
+ memset (a, 0, sizeof (*a));
+
+ a->is_add = mp->is_add;
+ a->is_ip6 = mp->is_ipv6;
+
+ /* ip addresses sent in network byte order */
+ if (a->is_ip6) {
+ memcpy(&(a->src.ip6), mp->src_address, 16);
+ memcpy(&(a->dst.ip6), mp->dst_address, 16);
+ } else {
+ memcpy(&(a->src.ip4), mp->src_address, 4);
+ memcpy(&(a->dst.ip4), mp->dst_address, 4);
+ }
+
+ a->encap_fib_index = encap_fib_index;
+ a->decap_next_index = ntohl(mp->decap_next_index);
+ a->vni = ntohl(mp->vni);
+ rv = vnet_vxlan_add_del_tunnel (a, &sw_if_index);
+
+out:
+ REPLY_MACRO2(VL_API_VXLAN_ADD_DEL_TUNNEL_REPLY,
+ ({
+ rmp->sw_if_index = ntohl (sw_if_index);
+ }));
+}
+
+static void send_vxlan_tunnel_details
+(vxlan_tunnel_t * t, unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_vxlan_tunnel_details_t * rmp;
+ ip4_main_t * im4 = &ip4_main;
+ ip6_main_t * im6 = &ip6_main;
+ u8 is_ipv6 = !(t->flags & VXLAN_TUNNEL_IS_IPV4);
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_VXLAN_TUNNEL_DETAILS);
+ if (is_ipv6) {
+ memcpy(rmp->src_address, &(t->src.ip6), 16);
+ memcpy(rmp->dst_address, &(t->dst.ip6), 16);
+ rmp->encap_vrf_id = htonl(im6->fibs[t->encap_fib_index].table_id);
+ } else {
+ memcpy(rmp->src_address, &(t->src.ip4), 4);
+ memcpy(rmp->dst_address, &(t->dst.ip4), 4);
+ rmp->encap_vrf_id = htonl(im4->fibs[t->encap_fib_index].table_id);
+ }
+ rmp->vni = htonl(t->vni);
+ rmp->decap_next_index = htonl(t->decap_next_index);
+ rmp->sw_if_index = htonl(t->sw_if_index);
+ rmp->is_ipv6 = is_ipv6;
+ rmp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+}
+
+static void vl_api_vxlan_tunnel_dump_t_handler
+(vl_api_vxlan_tunnel_dump_t * mp)
+{
+ unix_shared_memory_queue_t * q;
+ vxlan_main_t * vxm = &vxlan_main;
+ vxlan_tunnel_t * t;
+ u32 sw_if_index;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0) {
+ return;
+ }
+
+ sw_if_index = ntohl(mp->sw_if_index);
+
+ if (~0 == sw_if_index) {
+ pool_foreach (t, vxm->tunnels,
+ ({
+ send_vxlan_tunnel_details(t, q, mp->context);
+ }));
+ } else {
+ if ((sw_if_index >= vec_len(vxm->tunnel_index_by_sw_if_index)) ||
+ (~0 == vxm->tunnel_index_by_sw_if_index[sw_if_index])) {
+ return;
+ }
+ t = &vxm->tunnels[vxm->tunnel_index_by_sw_if_index[sw_if_index]];
+ send_vxlan_tunnel_details(t, q, mp->context);
+ }
+}
+
+static void vl_api_gre_add_del_tunnel_t_handler
+(vl_api_gre_add_del_tunnel_t * mp)
+{
+ vl_api_gre_add_del_tunnel_reply_t * rmp;
+ int rv = 0;
+ vnet_gre_add_del_tunnel_args_t _a, *a = &_a;
+ u32 outer_table_id;
+ uword * p;
+ ip4_main_t * im = &ip4_main;
+ u32 sw_if_index = ~0;
+
+ p = hash_get (im->fib_index_by_table_id, ntohl(mp->outer_table_id));
+ if (! p) {
+ rv = VNET_API_ERROR_NO_SUCH_FIB;
+ goto out;
+ }
+ outer_table_id = p[0];
+
+ /* Check src & dst are different */
+ if (memcmp(&mp->src_address, &mp->dst_address, 4) == 0) {
+ rv = VNET_API_ERROR_SAME_SRC_DST;
+ goto out;
+ }
+
+ memset (a, 0, sizeof (*a));
+
+ a->is_add = mp->is_add;
+
+ /* ip addresses sent in network byte order */
+ a->src.as_u32 = mp->src_address;
+ a->dst.as_u32 = mp->dst_address;
+
+ a->outer_table_id = outer_table_id;
+ rv = vnet_gre_add_del_tunnel (a, &sw_if_index);
+
+out:
+ REPLY_MACRO2(VL_API_GRE_ADD_DEL_TUNNEL_REPLY,
+ ({
+ rmp->sw_if_index = ntohl (sw_if_index);
+ }));
+}
+
+static void send_gre_tunnel_details
+(gre_tunnel_t * t, unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_gre_tunnel_details_t * rmp;
+ ip4_main_t * im = &ip4_main;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_GRE_TUNNEL_DETAILS);
+ rmp->src_address = t->tunnel_src.data_u32;
+ rmp->dst_address = t->tunnel_dst.data_u32;
+ rmp->outer_table_id = htonl(im->fibs[t->outer_fib_index].table_id);
+ rmp->sw_if_index = htonl(t->sw_if_index);
+ rmp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+}
+
+static void vl_api_gre_tunnel_dump_t_handler
+(vl_api_gre_tunnel_dump_t * mp)
+{
+ unix_shared_memory_queue_t * q;
+ gre_main_t * gm = &gre_main;
+ gre_tunnel_t * t;
+ u32 sw_if_index;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0) {
+ return;
+ }
+
+ sw_if_index = ntohl(mp->sw_if_index);
+
+ if (~0 == sw_if_index) {
+ pool_foreach (t, gm->tunnels,
+ ({
+ send_gre_tunnel_details(t, q, mp->context);
+ }));
+ } else {
+ if ((sw_if_index >= vec_len(gm->tunnel_index_by_sw_if_index)) ||
+ (~0 == gm->tunnel_index_by_sw_if_index[sw_if_index])) {
+ return;
+ }
+ t = &gm->tunnels[gm->tunnel_index_by_sw_if_index[sw_if_index]];
+ send_gre_tunnel_details(t, q, mp->context);
+ }
+}
+
+static void
+vl_api_l2_patch_add_del_t_handler (vl_api_l2_patch_add_del_t *mp)
+{
+ extern int vnet_l2_patch_add_del (u32 rx_sw_if_index, u32 tx_sw_if_index,
+ int is_add);
+ vl_api_l2_patch_add_del_reply_t * rmp;
+ int vnet_l2_patch_add_del (u32 rx_sw_if_index, u32 tx_sw_if_index,
+ int is_add);
+ int rv = 0;
+
+ VALIDATE_RX_SW_IF_INDEX(mp);
+ VALIDATE_TX_SW_IF_INDEX(mp);
+
+ rv = vnet_l2_patch_add_del (ntohl(mp->rx_sw_if_index),
+ ntohl(mp->tx_sw_if_index),
+ (int)(mp->is_add != 0));
+
+ BAD_RX_SW_IF_INDEX_LABEL;
+ BAD_TX_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO(VL_API_L2_PATCH_ADD_DEL_REPLY);
+}
+
+static void
+vl_api_vxlan_gpe_add_del_tunnel_t_handler
+(vl_api_vxlan_gpe_add_del_tunnel_t * mp)
+{
+ vl_api_vxlan_gpe_add_del_tunnel_reply_t * rmp;
+ int rv = 0;
+ vnet_vxlan_gpe_add_del_tunnel_args_t _a, *a = &_a;
+ u32 encap_fib_index, decap_fib_index;
+ u8 protocol;
+ uword * p;
+ ip4_main_t * im = &ip4_main;
+ u32 sw_if_index = ~0;
+
+
+ p = hash_get (im->fib_index_by_table_id, ntohl(mp->encap_vrf_id));
+ if (! p) {
+ rv = VNET_API_ERROR_NO_SUCH_FIB;
+ goto out;
+ }
+ encap_fib_index = p[0];
+
+ protocol = mp->protocol;
+
+ /* Interpret decap_vrf_id as an opaque if sending to other-than-ip4-input */
+ if (protocol == VXLAN_GPE_INPUT_NEXT_IP4_INPUT) {
+ p = hash_get (im->fib_index_by_table_id, ntohl(mp->decap_vrf_id));
+ if (! p) {
+ rv = VNET_API_ERROR_NO_SUCH_INNER_FIB;
+ goto out;
+ }
+ decap_fib_index = p[0];
+ } else {
+ decap_fib_index = ntohl(mp->decap_vrf_id);
+ }
+
+ /* Check src & dst are different */
+ if ((mp->is_ipv6 && memcmp(mp->local, mp->remote, 16) == 0) ||
+ (!mp->is_ipv6 && memcmp(mp->local, mp->remote, 4) == 0)) {
+ rv = VNET_API_ERROR_SAME_SRC_DST;
+ goto out;
+ }
+ memset (a, 0, sizeof (*a));
+
+ a->is_add = mp->is_add;
+ a->is_ip6 = mp->is_ipv6;
+ /* ip addresses sent in network byte order */
+ if (a->is_ip6) {
+ clib_memcpy(&(a->local.ip6), mp->local, 16);
+ clib_memcpy(&(a->remote.ip6), mp->remote, 16);
+ } else {
+ clib_memcpy(&(a->local.ip4), mp->local, 4);
+ clib_memcpy(&(a->remote.ip4), mp->remote, 4);
+ }
+ a->encap_fib_index = encap_fib_index;
+ a->decap_fib_index = decap_fib_index;
+ a->protocol = protocol;
+ a->vni = ntohl(mp->vni);
+ rv = vnet_vxlan_gpe_add_del_tunnel (a, &sw_if_index);
+
+out:
+ REPLY_MACRO2(VL_API_VXLAN_GPE_ADD_DEL_TUNNEL_REPLY,
+ ({
+ rmp->sw_if_index = ntohl (sw_if_index);
+ }));
+}
+
+static void send_vxlan_gpe_tunnel_details
+(vxlan_gpe_tunnel_t * t, unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_vxlan_gpe_tunnel_details_t * rmp;
+ ip4_main_t * im4 = &ip4_main;
+ ip6_main_t * im6 = &ip6_main;
+ u8 is_ipv6 = !(t->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_VXLAN_GPE_TUNNEL_DETAILS);
+ if (is_ipv6) {
+ memcpy(rmp->local, &(t->local.ip6), 16);
+ memcpy(rmp->remote, &(t->remote.ip6), 16);
+ rmp->encap_vrf_id = htonl(im6->fibs[t->encap_fib_index].table_id);
+ rmp->decap_vrf_id = htonl(im6->fibs[t->decap_fib_index].table_id);
+ } else {
+ memcpy(rmp->local, &(t->local.ip4), 4);
+ memcpy(rmp->remote, &(t->remote.ip4), 4);
+ rmp->encap_vrf_id = htonl(im4->fibs[t->encap_fib_index].table_id);
+ rmp->decap_vrf_id = htonl(im4->fibs[t->decap_fib_index].table_id);
+ }
+ rmp->vni = htonl(t->vni);
+ rmp->protocol = t->protocol;
+ rmp->sw_if_index = htonl(t->sw_if_index);
+ rmp->is_ipv6 = is_ipv6;
+ rmp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+}
+
+static void vl_api_vxlan_gpe_tunnel_dump_t_handler
+(vl_api_vxlan_gpe_tunnel_dump_t * mp)
+{
+ unix_shared_memory_queue_t * q;
+ vxlan_gpe_main_t * vgm = &vxlan_gpe_main;
+ vxlan_gpe_tunnel_t * t;
+ u32 sw_if_index;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0) {
+ return;
+ }
+
+ sw_if_index = ntohl(mp->sw_if_index);
+
+ if (~0 == sw_if_index) {
+ pool_foreach (t, vgm->tunnels,
+ ({
+ send_vxlan_gpe_tunnel_details(t, q, mp->context);
+ }));
+ } else {
+ if ((sw_if_index >= vec_len(vgm->tunnel_index_by_sw_if_index)) ||
+ (~0 == vgm->tunnel_index_by_sw_if_index[sw_if_index])) {
+ return;
+ }
+ t = &vgm->tunnels[vgm->tunnel_index_by_sw_if_index[sw_if_index]];
+ send_vxlan_gpe_tunnel_details(t, q, mp->context);
+ }
+}
+
+static void
+vl_api_lisp_add_del_locator_set_t_handler(vl_api_lisp_add_del_locator_set_t *mp)
+{
+ vl_api_lisp_add_del_locator_set_reply_t *rmp;
+ int rv = 0;
+ vnet_lisp_add_del_locator_set_args_t _a, *a = &_a;
+ u32 ls_index = ~0;
+ u8 *locator_name = NULL;
+
+ memset(a, 0, sizeof(a[0]));
+
+ locator_name = format(0, "%s", mp->locator_set_name);
+
+ a->name = locator_name;
+ a->locators = NULL;
+ a->is_add = mp->is_add;
+ a->local = 1;
+
+ rv = vnet_lisp_add_del_locator_set(a, &ls_index);
+
+ vec_free(locator_name);
+
+ REPLY_MACRO(VL_API_LISP_ADD_DEL_LOCATOR_SET_REPLY);
+}
+
+static void
+vl_api_lisp_add_del_locator_t_handler(
+ vl_api_lisp_add_del_locator_t *mp)
+{
+ vl_api_lisp_add_del_locator_reply_t *rmp;
+ int rv = 0;
+ locator_t locator, *locators = NULL;
+ vnet_lisp_add_del_locator_set_args_t _a, *a = &_a;
+ u32 ls_index = ~0;
+ u8 *locator_name = NULL;
+
+ memset(&locator, 0, sizeof(locator));
+ memset(a, 0, sizeof(a[0]));
+
+ locator.sw_if_index = ntohl(mp->sw_if_index);
+ locator.priority = mp->priority;
+ locator.weight = mp->weight;
+ locator.local = 1;
+ vec_add1(locators, locator);
+
+ locator_name = format(0, "%s", mp->locator_set_name);
+
+ a->name = locator_name;
+ a->locators = locators;
+ a->is_add = mp->is_add;
+ a->local = 1;
+
+ rv = vnet_lisp_add_del_locator(a, NULL, &ls_index);
+
+ vec_free(locators);
+ vec_free(locator_name);
+
+ REPLY_MACRO(VL_API_LISP_ADD_DEL_LOCATOR_REPLY);
+}
+
+static void
+vl_api_lisp_add_del_local_eid_t_handler(
+ vl_api_lisp_add_del_local_eid_t *mp)
+{
+ vl_api_lisp_add_del_local_eid_reply_t *rmp;
+ lisp_cp_main_t * lcm = vnet_lisp_cp_get_main();
+ int rv = 0;
+ ip_prefix_t *prefp = NULL;
+ ip_address_t *ip_eid = NULL;
+ gid_address_t eid;
+ uword * p = NULL;
+ u32 locator_set_index = ~0, map_index = ~0;
+ vnet_lisp_add_del_mapping_args_t _a, *a = &_a;
+ u8 *name = NULL;
+ memset (a, 0, sizeof (a[0]));
+
+ prefp = &gid_address_ippref(&eid);
+ ip_eid = &ip_prefix_addr(prefp);
+ gid_address_type (&eid) = GID_ADDR_IP_PREFIX;
+
+ if (mp->is_ipv6) {
+ clib_memcpy(&ip_addr_v6(ip_eid), mp->ip_address,
+ sizeof(ip_addr_v6(ip_eid)));
+ ip_addr_version(ip_eid) = IP6;
+ } else {
+ clib_memcpy(&ip_addr_v4(ip_eid), mp->ip_address,
+ sizeof(ip_addr_v4(ip_eid)));
+ ip_addr_version(ip_eid) = IP4;
+ }
+ ip_prefix_len(prefp) = mp->prefix_len;
+
+ name = format(0, "%s", mp->locator_set_name);
+ p = hash_get_mem(lcm->locator_set_index_by_name, name);
+ if (!p) {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto out;
+ }
+ locator_set_index = p[0];
+
+ /* XXX treat batch configuration */
+ a->is_add = mp->is_add;
+ gid_address_vni (&eid) = clib_net_to_host_u32 (mp->vni);
+ a->deid = eid;
+ a->locator_set_index = locator_set_index;
+ a->local = 1;
+ rv = vnet_lisp_add_del_local_mapping(a, &map_index);
+
+out:
+ vec_free(name);
+ gid_address_free (&a->deid);
+
+ REPLY_MACRO(VL_API_LISP_ADD_DEL_LOCAL_EID_REPLY);
+}
+
+static void
+vl_api_lisp_eid_table_add_del_map_t_handler(
+ vl_api_lisp_eid_table_add_del_map_t *mp)
+{
+ vl_api_lisp_eid_table_add_del_map_reply_t *rmp;
+ int rv = 0;
+ rv = vnet_lisp_eid_table_map (clib_net_to_host_u32 (mp->vni),
+ clib_net_to_host_u32 (mp->vrf), mp->is_add);
+ REPLY_MACRO(VL_API_LISP_EID_TABLE_ADD_DEL_MAP_REPLY)
+}
+
+static void
+lisp_gpe_add_del_fwd_entry_set_address(
+ vl_api_lisp_gpe_add_del_fwd_entry_t *mp,
+ ip_address_t *slocator,
+ ip_address_t *dlocator,
+ gid_address_t *eid)
+{
+ ip_address_t *ip_eid = NULL;
+ ip_prefix_t *prefp = NULL;
+
+ prefp = &gid_address_ippref(eid);
+ ip_eid = &ip_prefix_addr(prefp);
+
+ if (mp->eid_is_ipv6) {
+ clib_memcpy(&ip_addr_v6(ip_eid), mp->eid_ip_address,
+ sizeof(ip_addr_v6(ip_eid)));
+ ip_addr_version(ip_eid) = IP6;
+ } else {
+ clib_memcpy(&ip_addr_v4(ip_eid), mp->eid_ip_address,
+ sizeof(ip_addr_v4(ip_eid)));
+ ip_addr_version(ip_eid) = IP4;
+ }
+ ip_prefix_len(prefp) = mp->eid_prefix_len;
+
+ if (mp->address_is_ipv6) {
+ clib_memcpy(&ip_addr_v6(slocator), mp->source_ip_address,
+ sizeof(ip_addr_v6(slocator)));
+ ip_addr_version(slocator) = IP6;
+ clib_memcpy(&ip_addr_v6(dlocator), mp->destination_ip_address,
+ sizeof(ip_addr_v6(dlocator)));
+ ip_addr_version(dlocator) = IP6;
+ } else {
+ clib_memcpy(&ip_addr_v4(slocator), mp->source_ip_address,
+ sizeof(ip_addr_v4(slocator)));
+ ip_addr_version(slocator) = IP4;
+ clib_memcpy(&ip_addr_v4(dlocator), mp->destination_ip_address,
+ sizeof(ip_addr_v4(dlocator)));
+ ip_addr_version(dlocator) = IP4;
+ }
+}
+
+static void
+vl_api_lisp_gpe_add_del_fwd_entry_t_handler(
+ vl_api_lisp_gpe_add_del_fwd_entry_t *mp)
+{
+ vl_api_lisp_gpe_add_del_fwd_entry_reply_t *rmp;
+ int rv = 0;
+ ip_address_t slocator, dlocator;
+ gid_address_t eid;
+ vnet_lisp_gpe_add_del_fwd_entry_args_t a;
+
+ lisp_gpe_add_del_fwd_entry_set_address(mp, &slocator, &dlocator, &eid);
+
+ memset (&a, 0, sizeof(a));
+
+ a.is_add = mp->is_add;
+ a.deid = eid;
+ a.slocator = slocator;
+ a.dlocator = dlocator;
+ rv = vnet_lisp_gpe_add_del_fwd_entry (&a, 0);
+
+ REPLY_MACRO(VL_API_LISP_GPE_ADD_DEL_FWD_ENTRY_REPLY);
+}
+
+static void
+vl_api_lisp_add_del_map_resolver_t_handler(
+ vl_api_lisp_add_del_map_resolver_t *mp)
+{
+ vl_api_lisp_add_del_map_resolver_reply_t *rmp;
+ int rv = 0;
+ ip_address_t *ip_addr = NULL;
+ vnet_lisp_add_del_map_resolver_args_t _a, * a = &_a;
+
+ a->is_add = mp->is_add;
+ ip_addr = &a->address;
+
+ if (mp->is_ipv6) {
+ clib_memcpy(&ip_addr_v6(ip_addr), mp->ip_address,
+ sizeof(ip_addr_v6(ip_addr)));
+ ip_addr_version(ip_addr) = IP6;
+ } else {
+ clib_memcpy(&ip_addr_v4(ip_addr), mp->ip_address,
+ sizeof(ip_addr_v4(ip_addr)));
+ ip_addr_version(ip_addr) = IP4;
+ }
+
+ rv = vnet_lisp_add_del_map_resolver (a);
+
+ REPLY_MACRO(VL_API_LISP_ADD_DEL_MAP_RESOLVER_REPLY);
+}
+
+static void
+vl_api_lisp_gpe_enable_disable_t_handler(
+ vl_api_lisp_gpe_enable_disable_t *mp)
+{
+ vl_api_lisp_gpe_enable_disable_reply_t *rmp;
+ int rv = 0;
+ vnet_lisp_gpe_enable_disable_args_t _a, * a = &_a;
+
+ a->is_en = mp->is_en;
+ vnet_lisp_gpe_enable_disable (a);
+
+ REPLY_MACRO(VL_API_LISP_GPE_ENABLE_DISABLE_REPLY);
+}
+
+static void
+vl_api_lisp_enable_disable_t_handler(
+ vl_api_lisp_enable_disable_t *mp)
+{
+ vl_api_lisp_enable_disable_reply_t *rmp;
+ int rv = 0;
+
+ vnet_lisp_enable_disable (mp->is_en);
+ REPLY_MACRO(VL_API_LISP_ENABLE_DISABLE_REPLY);
+}
+
+static void
+vl_api_lisp_gpe_add_del_iface_t_handler(
+ vl_api_lisp_gpe_add_del_iface_t *mp)
+{
+ vl_api_lisp_gpe_add_del_iface_reply_t *rmp;
+ int rv = 0;
+ vnet_lisp_gpe_add_del_iface_args_t _a, * a = &_a;
+
+ a->is_add = mp->is_add;
+ a->table_id = mp->table_id;
+ a->vni = mp->vni;
+ rv = vnet_lisp_gpe_add_del_iface (a, 0);
+
+ REPLY_MACRO(VL_API_LISP_GPE_ADD_DEL_IFACE_REPLY);
+}
+
+static void
+vl_api_lisp_pitr_set_locator_set_t_handler(
+ vl_api_lisp_pitr_set_locator_set_t *mp)
+{
+ vl_api_lisp_pitr_set_locator_set_reply_t *rmp;
+ int rv = 0;
+ u8 * ls_name = 0;
+
+ ls_name = format (0, "%s", mp->ls_name);
+ rv = vnet_lisp_pitr_set_locator_set (ls_name, mp->is_add);
+ vec_free (ls_name);
+
+ REPLY_MACRO(VL_API_LISP_PITR_SET_LOCATOR_SET_REPLY);
+}
+
+static void
+vl_api_lisp_add_del_map_request_itr_rlocs_t_handler
+(vl_api_lisp_add_del_map_request_itr_rlocs_t *mp)
+{
+ vl_api_lisp_add_del_map_request_itr_rlocs_reply_t *rmp;
+ int rv = 0;
+ u8 * locator_set_name = NULL;
+ vnet_lisp_add_del_mreq_itr_rloc_args_t _a, * a = &_a;
+
+ locator_set_name = format (0, "%s", mp->locator_set_name);
+
+ a->is_add = mp->is_add;
+ a->locator_set_name = locator_set_name;
+
+ rv = vnet_lisp_add_del_mreq_itr_rlocs(a);
+
+ vec_free(locator_set_name);
+
+ REPLY_MACRO(VL_API_LISP_ADD_DEL_MAP_REQUEST_ITR_RLOCS_REPLY);
+}
+
+/** Used for transferring locators via VPP API */
+typedef CLIB_PACKED(struct
+{
+ u8 is_ip4; /**< is locator an IPv4 address */
+ u8 addr[16]; /**< IPv4/IPv6 address */
+}) rloc_t;
+
+static void
+send_lisp_locator_set_details_set_address
+(vl_api_lisp_locator_set_details_t *rmp,
+ gid_address_t *gid_address)
+{
+ ip_prefix_t *ip_addr;
+
+ if (gid_address_type(gid_address) != GID_ADDR_IP_PREFIX) {
+ return;
+ }
+
+ ip_addr = &gid_address_ippref(gid_address);
+ rmp->prefix_len = ip_prefix_len(ip_addr);
+ rmp->is_ipv6 = ip_prefix_version(ip_addr);
+ ip_address_copy_addr(rmp->ip_address, &ip_prefix_addr(ip_addr));
+}
+
+static void
+vl_api_lisp_add_del_remote_mapping_t_handler (
+ vl_api_lisp_add_del_remote_mapping_t *mp)
+{
+ u32 i;
+ ip_address_t rloc, * rlocs = 0;
+ vl_api_lisp_add_del_remote_mapping_reply_t * rmp;
+ int rv = 0;
+ gid_address_t _seid, * seid = &_seid;
+ gid_address_t _deid, * deid = &_deid;
+ ip_prefix_t * seid_pref = &gid_address_ippref(seid);
+ ip_prefix_t * deid_pref = &gid_address_ippref(deid);
+
+ gid_address_type(seid) = GID_ADDR_IP_PREFIX;
+ gid_address_type(deid) = GID_ADDR_IP_PREFIX;
+ ip_address_t * seid_addr = &ip_prefix_addr(seid_pref);
+ ip_address_t * deid_addr = &ip_prefix_addr(deid_pref);
+ ip_prefix_len(seid_pref) = mp->seid_len;
+ ip_prefix_len(deid_pref) = mp->deid_len;
+ gid_address_vni (seid) = ntohl (mp->vni);
+ gid_address_vni (deid) = ntohl (mp->vni);
+
+ if (mp->eid_is_ip4) {
+ ip_prefix_version(seid_pref) = IP4;
+ ip_prefix_version(deid_pref) = IP4;
+ clib_memcpy (&ip_addr_v4(seid_addr),
+ mp->seid, sizeof (ip_addr_v4(seid_addr)));
+ clib_memcpy (&ip_addr_v4(deid_addr),
+ mp->deid, sizeof (ip_addr_v4(deid_addr)));
+ } else {
+ ip_prefix_version(seid_pref) = IP6;
+ ip_prefix_version(deid_pref) = IP6;
+ clib_memcpy (&ip_addr_v6(seid_addr),
+ mp->seid, sizeof (ip_addr_v6(seid_addr)));
+ clib_memcpy (&ip_addr_v6(deid_addr),
+ mp->deid, sizeof (ip_addr_v6(deid_addr)));
+ }
+
+ for (i = 0; i < mp->rloc_num; i++) {
+ rloc_t * r = &((rloc_t *) mp->rlocs)[i];
+ if (r->is_ip4) {
+ clib_memcpy (&ip_addr_v4(&rloc), &r->addr, sizeof (rloc_t));
+ ip_addr_version (&rloc) = IP4;
+ } else {
+ clib_memcpy (&ip_addr_v6(&rloc), &r->addr, sizeof (rloc_t));
+ ip_addr_version (&rloc) = IP6;
+ }
+ vec_add1 (rlocs, rloc);
+ }
+
+ rv = vnet_lisp_add_del_remote_mapping (deid, seid, rlocs, mp->action,
+ mp->is_add, mp->del_all);
+ vec_free (rlocs);
+ REPLY_MACRO(VL_API_LISP_GPE_ADD_DEL_IFACE_REPLY);
+}
+
+static void
+send_lisp_locator_set_details (lisp_cp_main_t *lcm,
+ locator_set_t *lsit,
+ unix_shared_memory_queue_t *q,
+ u32 context,
+ u32 index)
+{
+ vl_api_lisp_locator_set_details_t *rmp;
+ locator_t *loc = NULL;
+ u32 * locit = NULL;
+ u8 * str = NULL;
+
+ vec_foreach (locit, lsit->locator_indices) {
+ loc = pool_elt_at_index (lcm->locator_pool, locit[0]);
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_LISP_LOCATOR_SET_DETAILS);
+ rmp->local = lsit->local;
+ if (lsit->local) {
+ ASSERT(lsit->name != NULL);
+ strncpy((char *) rmp->locator_set_name,
+ (char *) lsit->name, ARRAY_LEN(rmp->locator_set_name) - 1);
+ rmp->sw_if_index = htonl(loc->sw_if_index);
+ } else {
+ str = format(0, "remote-%d", index);
+ strncpy((char *) rmp->locator_set_name, (char *) str,
+ ARRAY_LEN(rmp->locator_set_name) - 1);
+ send_lisp_locator_set_details_set_address(rmp, &loc->address);
+
+ vec_free(str);
+ }
+ rmp->priority = loc->priority;
+ rmp->weight = loc->weight;
+ rmp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+ }
+}
+
+static void
+vl_api_lisp_locator_set_dump_t_handler (vl_api_lisp_locator_set_dump_t *mp)
+{
+ unix_shared_memory_queue_t * q = NULL;
+ lisp_cp_main_t * lcm = vnet_lisp_cp_get_main();
+ locator_set_t * lsit = NULL;
+ u32 index;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0) {
+ return;
+ }
+
+ index = 0;
+ pool_foreach (lsit, lcm->locator_set_pool,
+ ({
+ send_lisp_locator_set_details(lcm, lsit, q, mp->context, index++);
+ }));
+}
+
+static void
+send_lisp_local_eid_table_details (mapping_t *mapit,
+ unix_shared_memory_queue_t *q,
+ u32 context)
+{
+ vl_api_lisp_local_eid_table_details_t *rmp = NULL;
+ lisp_cp_main_t * lcm = vnet_lisp_cp_get_main();
+ locator_set_t *ls = NULL;
+ gid_address_t *gid = NULL;
+ ip_prefix_t *ip_prefix = NULL;
+ u8 * str = NULL;
+ u8 type = ~0;
+
+ ls = pool_elt_at_index (lcm->locator_set_pool,
+ mapit->locator_set_index);
+
+ gid = &mapit->eid;
+ type = gid_address_type(gid);
+
+ if (type != GID_ADDR_IP_PREFIX) {
+ return;
+ }
+
+ ip_prefix = &gid_address_ippref(gid);
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_LISP_LOCAL_EID_TABLE_DETAILS);
+ if (ls->local) {
+ ASSERT(ls->name != NULL);
+ strncpy((char *) rmp->locator_set_name,
+ (char *) ls->name, ARRAY_LEN(rmp->locator_set_name) - 1);
+ } else {
+ str = format(0, "remote-%d", mapit->locator_set_index);
+ strncpy((char *) rmp->locator_set_name, (char *) str,
+ ARRAY_LEN(rmp->locator_set_name) - 1);
+ vec_free(str);
+ }
+
+ switch (ip_prefix_version(ip_prefix)) {
+ case IP4:
+ rmp->eid_is_ipv6 = 0;
+ clib_memcpy(rmp->eid_ip_address, &ip_prefix_v4(ip_prefix),
+ sizeof(ip_prefix_v4(ip_prefix)));
+ break;
+
+ case IP6:
+ rmp->eid_is_ipv6 = 1;
+ clib_memcpy(rmp->eid_ip_address, &ip_prefix_v6(ip_prefix),
+ sizeof(ip_prefix_v6(ip_prefix)));
+ break;
+
+ default:
+ ASSERT(0);
+ }
+ rmp->eid_prefix_len = ip_prefix_len(ip_prefix);
+ rmp->context = context;
+ rmp->vni = clib_host_to_net_u32 (gid_address_vni (gid));
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+}
+
+static void
+vl_api_lisp_local_eid_table_dump_t_handler (
+ vl_api_lisp_local_eid_table_dump_t *mp)
+{
+ unix_shared_memory_queue_t * q = NULL;
+ lisp_cp_main_t * lcm = vnet_lisp_cp_get_main();
+ mapping_t * mapit = NULL;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0) {
+ return;
+ }
+
+ pool_foreach (mapit, lcm->mapping_pool,
+ ({
+ send_lisp_local_eid_table_details(mapit, q, mp->context);
+ }));
+}
+
+static void
+send_lisp_gpe_tunnel_details (lisp_gpe_tunnel_t *tunnel,
+ unix_shared_memory_queue_t *q,
+ u32 context)
+{
+ vl_api_lisp_gpe_tunnel_details_t *rmp;
+ lisp_gpe_main_t * lgm = &lisp_gpe_main;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_LISP_GPE_TUNNEL_DETAILS);
+
+ rmp->tunnels = tunnel - lgm->tunnels;
+
+ rmp->is_ipv6 = ip_addr_version(&tunnel->src) == IP6 ? 1 : 0;
+ ip_address_copy_addr(rmp->source_ip, &tunnel->src);
+ ip_address_copy_addr(rmp->destination_ip, &tunnel->dst);
+
+ rmp->encap_fib_id = htonl(tunnel->encap_fib_index);
+ rmp->decap_fib_id = htonl(tunnel->decap_fib_index);
+ rmp->dcap_next = htonl(tunnel->decap_next_index);
+ rmp->lisp_ver = tunnel->ver_res;
+ rmp->next_protocol = tunnel->next_protocol;
+ rmp->flags = tunnel->flags;
+ rmp->ver_res = tunnel->ver_res;
+ rmp->res = tunnel->res;
+ rmp->iid = htonl(tunnel->vni);
+ rmp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+}
+
+static void
+vl_api_lisp_gpe_tunnel_dump_t_handler (
+ vl_api_lisp_gpe_tunnel_dump_t *mp)
+{
+ unix_shared_memory_queue_t * q = NULL;
+ lisp_gpe_main_t * lgm = &lisp_gpe_main;
+ lisp_gpe_tunnel_t * tunnel = NULL;
+
+ if (pool_elts(lgm->tunnels) == 0) {
+ return;
+ }
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0) {
+ return;
+ }
+
+ pool_foreach(tunnel, lgm->tunnels,
+ ({
+ send_lisp_gpe_tunnel_details(tunnel, q, mp->context);
+ }));
+}
+
+static void
+send_lisp_map_resolver_details (ip_address_t *ip,
+ unix_shared_memory_queue_t *q,
+ u32 context)
+{
+ vl_api_lisp_map_resolver_details_t *rmp = NULL;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_LISP_MAP_RESOLVER_DETAILS);
+
+ switch (ip_addr_version(ip)) {
+ case IP4:
+ rmp->is_ipv6 = 0;
+ clib_memcpy(rmp->ip_address, &ip_addr_v4(ip), sizeof(ip_addr_v4(ip)));
+ break;
+
+ case IP6:
+ rmp->is_ipv6 = 1;
+ clib_memcpy(rmp->ip_address, &ip_addr_v6(ip), sizeof(ip_addr_v6(ip)));
+ break;
+
+ default:
+ ASSERT(0);
+ }
+ rmp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+}
+
+static void
+vl_api_lisp_map_resolver_dump_t_handler (
+ vl_api_lisp_map_resolver_dump_t *mp)
+{
+ unix_shared_memory_queue_t * q = NULL;
+ lisp_cp_main_t * lcm = vnet_lisp_cp_get_main();
+ ip_address_t *ip = NULL;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0) {
+ return;
+ }
+
+ vec_foreach(ip, lcm->map_resolvers) {
+ send_lisp_map_resolver_details(ip, q, mp->context);
+ }
+
+}
+
+static void
+vl_api_lisp_eid_table_map_dump_t_handler (
+ vl_api_lisp_eid_table_map_dump_t *mp)
+{
+ unix_shared_memory_queue_t * q = NULL;
+ lisp_cp_main_t * lcm = vnet_lisp_cp_get_main();
+ hash_pair_t * p;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0) {
+ return;
+ }
+ hash_foreach_pair (p, lcm->table_id_by_vni, {
+ vl_api_lisp_eid_table_map_details_t * rmp = NULL;
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_LISP_EID_TABLE_MAP_DETAILS);
+ rmp->vni = p->key;
+ rmp->vrf = p->value[0];
+ rmp->context = mp->context;
+ });
+}
+
+static void
+send_lisp_enable_disable_details (unix_shared_memory_queue_t *q,
+ u32 context)
+{
+ vl_api_lisp_enable_disable_status_details_t *rmp = NULL;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_LISP_ENABLE_DISABLE_STATUS_DETAILS);
+
+ rmp->gpe_status = vnet_lisp_gpe_enable_disable_status ();
+ rmp->feature_status = vnet_lisp_enable_disable_status ();
+ rmp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+}
+
+static void
+vl_api_lisp_enable_disable_status_dump_t_handler
+(vl_api_lisp_enable_disable_status_dump_t *mp)
+{
+ unix_shared_memory_queue_t * q = NULL;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0) {
+ return;
+ }
+
+ send_lisp_enable_disable_details(q, mp->context);
+}
+
+static void
+vl_api_lisp_get_map_request_itr_rlocs_t_handler (
+ vl_api_lisp_get_map_request_itr_rlocs_t *mp)
+{
+ unix_shared_memory_queue_t * q = NULL;
+ vl_api_lisp_get_map_request_itr_rlocs_reply_t *rmp = NULL;
+ lisp_cp_main_t * lcm = vnet_lisp_cp_get_main();
+ locator_set_t * loc_set = 0;
+ u8 * tmp_str = 0;
+ int rv = 0;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0) {
+ return;
+ }
+
+ if (~0 == lcm->mreq_itr_rlocs) {
+ tmp_str = format(0, " ");
+ } else {
+ loc_set = pool_elt_at_index (lcm->locator_set_pool, lcm->mreq_itr_rlocs);
+ tmp_str = format(0, "%s", loc_set->name);
+ }
+
+ REPLY_MACRO2(VL_API_LISP_GET_MAP_REQUEST_ITR_RLOCS_REPLY,
+ ({
+ strncpy((char *) rmp->locator_set_name, (char *) tmp_str,
+ ARRAY_LEN(rmp->locator_set_name) - 1);
+ }));
+
+ vec_free(tmp_str);
+}
+
+static void
+vl_api_interface_name_renumber_t_handler (vl_api_interface_name_renumber_t *mp)
+{
+ vl_api_interface_name_renumber_reply_t * rmp;
+ int rv = 0;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ rv = vnet_interface_name_renumber
+ (ntohl(mp->sw_if_index), ntohl(mp->new_show_dev_instance));
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO(VL_API_INTERFACE_NAME_RENUMBER_REPLY);
+}
+
+static int arp_change_data_callback (u32 pool_index, u8 * new_mac,
+ u32 sw_if_index, u32 address)
+{
+ vpe_api_main_t * am = &vpe_api_main;
+ vlib_main_t * vm = am->vlib_main;
+ vl_api_ip4_arp_event_t * event;
+ static f64 arp_event_last_time;
+ f64 now = vlib_time_now (vm);
+
+ if (pool_is_free_index (am->arp_events, pool_index))
+ return 1;
+
+ event = pool_elt_at_index (am->arp_events, pool_index);
+ if (memcmp (&event->new_mac, new_mac, sizeof (event->new_mac))) {
+ clib_memcpy (event->new_mac, new_mac, sizeof(event->new_mac));
+ } else { /* same mac */
+ if ((sw_if_index == event->sw_if_index) &&
+ ((address == 0) ||
+ /* for BD case, also check IP address with 10 sec timeout */
+ ((address == event->address) &&
+ ((now - arp_event_last_time) < 10.0))))
+ return 1;
+ }
+
+ arp_event_last_time = now;
+ event->sw_if_index = sw_if_index;
+ if (address) event->address = address;
+ return 0;
+}
+
+static int arp_change_delete_callback (u32 pool_index, u8 * notused)
+{
+ vpe_api_main_t * am = &vpe_api_main;
+
+ if (pool_is_free_index (am->arp_events, pool_index))
+ return 1;
+
+ pool_put_index (am->arp_events, pool_index);
+ return 0;
+}
+
+static void
+vl_api_want_ip4_arp_events_t_handler
+(vl_api_want_ip4_arp_events_t * mp)
+{
+ vpe_api_main_t * am = &vpe_api_main;
+ vnet_main_t * vnm = vnet_get_main();
+ vl_api_want_ip4_arp_events_reply_t *rmp;
+ vl_api_ip4_arp_event_t * event;
+ int rv;
+
+ if (mp->enable_disable) {
+ pool_get (am->arp_events, event);
+ memset (event, 0, sizeof (*event));
+
+ event->_vl_msg_id = ntohs(VL_API_IP4_ARP_EVENT);
+ event->client_index = mp->client_index;
+ event->context = mp->context;
+ event->address = mp->address;
+ event->pid = mp->pid;
+
+ rv = vnet_add_del_ip4_arp_change_event
+ (vnm, arp_change_data_callback,
+ mp->pid,
+ &mp->address /* addr, in net byte order */,
+ vpe_resolver_process_node.index,
+ IP4_ARP_EVENT, event - am->arp_events, 1 /* is_add */);
+ } else {
+ rv = vnet_add_del_ip4_arp_change_event
+ (vnm, arp_change_delete_callback,
+ mp->pid,
+ &mp->address /* addr, in net byte order */,
+ vpe_resolver_process_node.index,
+ IP4_ARP_EVENT, ~0 /* pool index */, 0 /* is_add */);
+ }
+ REPLY_MACRO(VL_API_WANT_IP4_ARP_EVENTS_REPLY);
+}
+
+static void vl_api_input_acl_set_interface_t_handler
+(vl_api_input_acl_set_interface_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main();
+ vl_api_input_acl_set_interface_reply_t * rmp;
+ int rv;
+ u32 sw_if_index, ip4_table_index, ip6_table_index, l2_table_index;
+
+ ip4_table_index = ntohl(mp->ip4_table_index);
+ ip6_table_index = ntohl(mp->ip6_table_index);
+ l2_table_index = ntohl(mp->l2_table_index);
+ sw_if_index = ntohl(mp->sw_if_index);
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ rv = vnet_set_input_acl_intfc (vm, sw_if_index, ip4_table_index,
+ ip6_table_index, l2_table_index,
+ mp->is_add);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO(VL_API_INPUT_ACL_SET_INTERFACE_REPLY);
+}
+
+static void vl_api_ipsec_spd_add_del_t_handler
+(vl_api_ipsec_spd_add_del_t * mp)
+{
+#if IPSEC == 0
+ clib_warning ("unimplemented");
+#else
+
+ vlib_main_t *vm __attribute__((unused)) = vlib_get_main();
+ vl_api_ipsec_spd_add_del_reply_t * rmp;
+ int rv;
+
+#if DPDK > 0
+ rv = ipsec_add_del_spd (vm, ntohl(mp->spd_id), mp->is_add);
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif
+
+ REPLY_MACRO(VL_API_IPSEC_SPD_ADD_DEL_REPLY);
+#endif
+}
+
+static void vl_api_ipsec_interface_add_del_spd_t_handler
+(vl_api_ipsec_interface_add_del_spd_t * mp)
+{
+ vlib_main_t *vm __attribute__((unused)) = vlib_get_main();
+ vl_api_ipsec_interface_add_del_spd_reply_t * rmp;
+ int rv;
+ u32 sw_if_index __attribute__((unused));
+ u32 spd_id __attribute__((unused));
+
+ sw_if_index = ntohl(mp->sw_if_index);
+ spd_id = ntohl(mp->spd_id);
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+#if IPSEC > 0
+ rv = ipsec_set_interface_spd(vm, sw_if_index, spd_id, mp->is_add);
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO(VL_API_IPSEC_INTERFACE_ADD_DEL_SPD_REPLY);
+}
+
+static void vl_api_ipsec_spd_add_del_entry_t_handler
+(vl_api_ipsec_spd_add_del_entry_t * mp)
+{
+ vlib_main_t *vm __attribute__((unused)) = vlib_get_main();
+ vl_api_ipsec_spd_add_del_entry_reply_t * rmp;
+ int rv;
+
+#if IPSEC > 0
+ ipsec_policy_t p;
+
+ memset(&p, 0, sizeof(p));
+
+ p.id = ntohl(mp->spd_id);
+ p.priority = ntohl(mp->priority);
+ p.is_outbound = mp->is_outbound;
+ p.is_ipv6 = mp->is_ipv6;
+
+ clib_memcpy(&p.raddr.start, mp->remote_address_start, 16);
+ clib_memcpy(&p.raddr.stop, mp->remote_address_stop, 16);
+ clib_memcpy(&p.laddr.start, mp->local_address_start, 16);
+ clib_memcpy(&p.laddr.stop, mp->local_address_stop, 16);
+
+ p.protocol = mp->protocol;
+ p.rport.start = ntohs(mp->remote_port_start);
+ p.rport.stop = ntohs(mp->remote_port_stop);
+ p.lport.start = ntohs(mp->local_port_start);
+ p.lport.stop = ntohs(mp->local_port_stop);
+ /* policy action resolve unsupported */
+ if (mp->policy == IPSEC_POLICY_ACTION_RESOLVE) {
+ clib_warning("unsupported action: 'resolve'");
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ goto out;
+ }
+ p.policy = mp->policy;
+ p.sa_id = ntohl(mp->sa_id);
+
+ rv = ipsec_add_del_policy(vm, &p, mp->is_add);
+ if (rv)
+ goto out;
+
+ if (mp->is_ip_any) {
+ p.is_ipv6 = 1;
+ rv = ipsec_add_del_policy(vm, &p, mp->is_add);
+ }
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ goto out;
+#endif
+
+out:
+ REPLY_MACRO(VL_API_IPSEC_SPD_ADD_DEL_ENTRY_REPLY);
+}
+
+static void vl_api_ipsec_sad_add_del_entry_t_handler
+(vl_api_ipsec_sad_add_del_entry_t * mp)
+{
+ vlib_main_t *vm __attribute__((unused)) = vlib_get_main();
+ vl_api_ipsec_sad_add_del_entry_reply_t * rmp;
+ int rv;
+#if IPSEC > 0
+ ipsec_sa_t sa;
+
+ memset(&sa, 0, sizeof(sa));
+
+ sa.id = ntohl(mp->sad_id);
+ sa.spi = ntohl(mp->spi);
+ /* security protocol AH unsupported */
+ if (mp->protocol == IPSEC_PROTOCOL_AH) {
+ clib_warning("unsupported security protocol 'AH'");
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ goto out;
+ }
+ sa.protocol = mp->protocol;
+ /* check for unsupported crypto-alg */
+ if (mp->crypto_algorithm < IPSEC_CRYPTO_ALG_AES_CBC_128 ||
+ mp->crypto_algorithm > IPSEC_CRYPTO_ALG_AES_CBC_256) {
+ clib_warning("unsupported crypto-alg: '%U'", format_ipsec_crypto_alg,
+ mp->crypto_algorithm);
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ goto out;
+ }
+ sa.crypto_alg = mp->crypto_algorithm;
+ sa.crypto_key_len = mp->crypto_key_length;
+ clib_memcpy(&sa.crypto_key, mp->crypto_key, sizeof(sa.crypto_key));
+ /* check for unsupported integ-alg */
+ if (mp->integrity_algorithm < IPSEC_INTEG_ALG_SHA1_96 ||
+ mp->integrity_algorithm > IPSEC_INTEG_ALG_SHA_512_256) {
+ clib_warning("unsupported integ-alg: '%U'", format_ipsec_integ_alg,
+ mp->integrity_algorithm);
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ goto out;
+ }
+ sa.integ_alg = mp->integrity_algorithm;
+ sa.integ_key_len = mp->integrity_key_length;
+ clib_memcpy(&sa.integ_key, mp->integrity_key, sizeof(sa.integ_key));
+ sa.use_esn = mp->use_extended_sequence_number;
+ sa.is_tunnel = mp->is_tunnel;
+ sa.is_tunnel_ip6 = mp->is_tunnel_ipv6;
+ clib_memcpy(&sa.tunnel_src_addr, mp->tunnel_src_address, 16);
+ clib_memcpy(&sa.tunnel_dst_addr, mp->tunnel_dst_address, 16);
+
+ rv = ipsec_add_del_sa(vm, &sa, mp->is_add);
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ goto out;
+#endif
+
+out:
+ REPLY_MACRO(VL_API_IPSEC_SAD_ADD_DEL_ENTRY_REPLY);
+}
+
+static void
+vl_api_ikev2_profile_add_del_t_handler
+(vl_api_ikev2_profile_add_del_t * mp)
+{
+ vl_api_ikev2_profile_add_del_reply_t * rmp;
+ int rv = 0;
+
+#if IPSEC > 0
+ vlib_main_t * vm = vlib_get_main();
+ clib_error_t * error;
+ u8 * tmp = format(0, "%s", mp->name);
+ error = ikev2_add_del_profile(vm, tmp, mp->is_add);
+ vec_free (tmp);
+ if (error)
+ rv = VNET_API_ERROR_UNSPECIFIED;
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif
+
+ REPLY_MACRO(VL_API_IKEV2_PROFILE_ADD_DEL_REPLY);
+}
+
+static void
+vl_api_ikev2_profile_set_auth_t_handler
+(vl_api_ikev2_profile_set_auth_t * mp)
+{
+ vl_api_ikev2_profile_set_auth_reply_t * rmp;
+ int rv = 0;
+
+#if IPSEC > 0
+ vlib_main_t * vm = vlib_get_main();
+ clib_error_t * error;
+ u8 * tmp = format(0, "%s", mp->name);
+ u8 * data = vec_new (u8, mp->data_len);
+ clib_memcpy(data, mp->data, mp->data_len);
+ error = ikev2_set_profile_auth(vm, tmp, mp->auth_method, data, mp->is_hex);
+ vec_free (tmp);
+ vec_free (data);
+ if (error)
+ rv = VNET_API_ERROR_UNSPECIFIED;
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif
+
+ REPLY_MACRO(VL_API_IKEV2_PROFILE_SET_AUTH_REPLY);
+}
+
+static void
+vl_api_ikev2_profile_set_id_t_handler
+(vl_api_ikev2_profile_set_id_t * mp)
+{
+ vl_api_ikev2_profile_add_del_reply_t * rmp;
+ int rv = 0;
+
+#if IPSEC > 0
+ vlib_main_t * vm = vlib_get_main();
+ clib_error_t * error;
+ u8 * tmp = format(0, "%s", mp->name);
+ u8 * data = vec_new (u8, mp->data_len);
+ clib_memcpy(data, mp->data, mp->data_len);
+ error = ikev2_set_profile_id(vm, tmp, mp->id_type, data, mp->is_local);
+ vec_free (tmp);
+ vec_free (data);
+ if (error)
+ rv = VNET_API_ERROR_UNSPECIFIED;
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif
+
+ REPLY_MACRO(VL_API_IKEV2_PROFILE_SET_ID_REPLY);
+}
+
+static void
+vl_api_ikev2_profile_set_ts_t_handler
+(vl_api_ikev2_profile_set_ts_t * mp)
+{
+ vl_api_ikev2_profile_set_ts_reply_t * rmp;
+ int rv = 0;
+
+#if IPSEC > 0
+ vlib_main_t * vm = vlib_get_main();
+ clib_error_t * error;
+ u8 * tmp = format(0, "%s", mp->name);
+ error = ikev2_set_profile_ts(vm, tmp, mp->proto, mp->start_port,
+ mp->end_port, (ip4_address_t) mp->start_addr,
+ (ip4_address_t) mp->end_addr, mp->is_local);
+ vec_free (tmp);
+ if (error)
+ rv = VNET_API_ERROR_UNSPECIFIED;
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif
+
+ REPLY_MACRO(VL_API_IKEV2_PROFILE_SET_TS_REPLY);
+}
+
+static void
+vl_api_ikev2_set_local_key_t_handler
+(vl_api_ikev2_set_local_key_t * mp)
+{
+ vl_api_ikev2_profile_set_ts_reply_t * rmp;
+ int rv = 0;
+
+#if IPSEC > 0
+ vlib_main_t * vm = vlib_get_main();
+ clib_error_t * error;
+
+ error = ikev2_set_local_key(vm, mp->key_file);
+ if (error)
+ rv = VNET_API_ERROR_UNSPECIFIED;
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif
+
+ REPLY_MACRO(VL_API_IKEV2_SET_LOCAL_KEY_REPLY);
+}
+
+static void
+vl_api_map_add_domain_t_handler
+(vl_api_map_add_domain_t * mp)
+{
+ vl_api_map_add_domain_reply_t * rmp;
+ int rv = 0;
+ u32 index;
+ u8 flags = mp->is_translation ? MAP_DOMAIN_TRANSLATION : 0;
+ rv = map_create_domain((ip4_address_t *)&mp->ip4_prefix, mp->ip4_prefix_len,
+ (ip6_address_t *)&mp->ip6_prefix, mp->ip6_prefix_len,
+ (ip6_address_t *)&mp->ip6_src, mp->ip6_src_prefix_len,
+ mp->ea_bits_len, mp->psid_offset, mp->psid_length, &index, ntohs(mp->mtu), flags);
+
+ REPLY_MACRO2(VL_API_MAP_ADD_DOMAIN_REPLY,
+ ({
+ rmp->index = ntohl(index);
+ }));
+}
+
+static void
+vl_api_map_del_domain_t_handler
+(vl_api_map_del_domain_t * mp)
+{
+ vl_api_map_del_domain_reply_t * rmp;
+ int rv = 0;
+
+ rv = map_delete_domain(ntohl(mp->index));
+
+ REPLY_MACRO(VL_API_MAP_DEL_DOMAIN_REPLY);
+}
+
+static void
+vl_api_map_add_del_rule_t_handler
+(vl_api_map_add_del_rule_t * mp)
+{
+ vl_api_map_del_domain_reply_t * rmp;
+ int rv = 0;
+
+ rv = map_add_del_psid(ntohl(mp->index), ntohs(mp->psid), (ip6_address_t *)mp->ip6_dst, mp->is_add);
+
+ REPLY_MACRO(VL_API_MAP_ADD_DEL_RULE_REPLY);
+}
+
+static void
+vl_api_map_domain_dump_t_handler
+(vl_api_map_domain_dump_t * mp)
+{
+ vl_api_map_domain_details_t * rmp;
+ map_main_t *mm = &map_main;
+ map_domain_t *d;
+ unix_shared_memory_queue_t * q;
+
+ if (pool_elts (mm->domains) == 0)
+ return;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0) {
+ return;
+ }
+
+ pool_foreach(d, mm->domains, ({
+ /* Make sure every field is initiated (or don't skip the memset()) */
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_MAP_DOMAIN_DETAILS);
+ rmp->domain_index = htonl(d - mm->domains);
+ rmp->ea_bits_len = d->ea_bits_len;
+ rmp->psid_offset = d->psid_offset;
+ rmp->psid_length = d->psid_length;
+ clib_memcpy(rmp->ip4_prefix, &d->ip4_prefix, sizeof(rmp->ip4_prefix));
+ rmp->ip4_prefix_len = d->ip4_prefix_len;
+ clib_memcpy(rmp->ip6_prefix, &d->ip6_prefix, sizeof(rmp->ip6_prefix));
+ rmp->ip6_prefix_len = d->ip6_prefix_len;
+ clib_memcpy(rmp->ip6_src, &d->ip6_src, sizeof(rmp->ip6_src));
+ rmp->ip6_src_len = d->ip6_src_len;
+ rmp->mtu = htons(d->mtu);
+ rmp->is_translation = (d->flags & MAP_DOMAIN_TRANSLATION);
+ rmp->context = mp->context;
+
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+ }));
+}
+
+static void
+vl_api_map_rule_dump_t_handler
+(vl_api_map_rule_dump_t * mp)
+{
+ unix_shared_memory_queue_t * q;
+ u16 i;
+ ip6_address_t dst;
+ vl_api_map_rule_details_t * rmp;
+ map_main_t *mm = &map_main;
+ u32 domain_index = ntohl(mp->domain_index);
+ map_domain_t *d;
+
+ if (pool_elts (mm->domains) == 0)
+ return;
+
+ d = pool_elt_at_index(mm->domains, domain_index);
+ if (!d || !d->rules) {
+ return;
+ }
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0) {
+ return;
+ }
+
+ for (i = 0; i < (0x1 << d->psid_length); i++) {
+ dst = d->rules[i];
+ if (dst.as_u64[0] == 0 && dst.as_u64[1] == 0) {
+ continue;
+ }
+ rmp = vl_msg_api_alloc(sizeof(*rmp));
+ memset(rmp, 0, sizeof(*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_MAP_RULE_DETAILS);
+ rmp->psid = htons(i);
+ clib_memcpy(rmp->ip6_dst, &dst, sizeof(rmp->ip6_dst));
+ rmp->context = mp->context;
+ vl_msg_api_send_shmem(q, (u8 *)&rmp);
+ }
+}
+
+static void
+vl_api_map_summary_stats_t_handler (
+ vl_api_map_summary_stats_t *mp)
+{
+ vl_api_map_summary_stats_reply_t *rmp;
+ vlib_combined_counter_main_t *cm;
+ vlib_counter_t v;
+ int i, which;
+ u64 total_pkts[VLIB_N_RX_TX];
+ u64 total_bytes[VLIB_N_RX_TX];
+ map_main_t *mm = &map_main;
+ unix_shared_memory_queue_t *q =
+ vl_api_client_index_to_input_queue(mp->client_index);
+
+ if (!q)
+ return;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_MAP_SUMMARY_STATS_REPLY);
+ rmp->context = mp->context;
+ rmp->retval = 0;
+
+ memset (total_pkts, 0, sizeof (total_pkts));
+ memset (total_bytes, 0, sizeof (total_bytes));
+
+ map_domain_counter_lock (mm);
+ vec_foreach(cm, mm->domain_counters) {
+ which = cm - mm->domain_counters;
+
+ for (i = 0; i < vec_len(cm->maxi); i++) {
+ vlib_get_combined_counter (cm, i, &v);
+ total_pkts[which] += v.packets;
+ total_bytes[which] += v.bytes;
+ }
+ }
+
+ map_domain_counter_unlock (mm);
+
+ /* Note: in network byte order! */
+ rmp->total_pkts[MAP_DOMAIN_COUNTER_RX] = clib_host_to_net_u64(total_pkts[MAP_DOMAIN_COUNTER_RX]);
+ rmp->total_bytes[MAP_DOMAIN_COUNTER_RX] = clib_host_to_net_u64(total_bytes[MAP_DOMAIN_COUNTER_RX]);
+ rmp->total_pkts[MAP_DOMAIN_COUNTER_TX] = clib_host_to_net_u64(total_pkts[MAP_DOMAIN_COUNTER_TX]);
+ rmp->total_bytes[MAP_DOMAIN_COUNTER_TX] = clib_host_to_net_u64(total_bytes[MAP_DOMAIN_COUNTER_TX]);
+ rmp->total_bindings = clib_host_to_net_u64(pool_elts(mm->domains));
+ rmp->total_ip4_fragments = 0; // Not yet implemented. Should be a simple counter.
+ rmp->total_security_check[MAP_DOMAIN_COUNTER_TX] = clib_host_to_net_u64(map_error_counter_get(ip4_map_node.index, MAP_ERROR_ENCAP_SEC_CHECK));
+ rmp->total_security_check[MAP_DOMAIN_COUNTER_RX] = clib_host_to_net_u64(map_error_counter_get(ip4_map_node.index, MAP_ERROR_DECAP_SEC_CHECK));
+
+ vl_msg_api_send_shmem(q, (u8 *)&rmp);
+}
+
+static void vl_api_ipsec_sa_set_key_t_handler
+(vl_api_ipsec_sa_set_key_t * mp)
+{
+ vlib_main_t *vm __attribute__((unused)) = vlib_get_main();
+ vl_api_ipsec_sa_set_key_reply_t *rmp;
+ int rv;
+#if IPSEC > 0
+ ipsec_sa_t sa;
+ sa.id = ntohl(mp->sa_id);
+ sa.crypto_key_len = mp->crypto_key_length;
+ clib_memcpy(&sa.crypto_key, mp->crypto_key, sizeof(sa.crypto_key));
+ sa.integ_key_len = mp->integrity_key_length;
+ clib_memcpy(&sa.integ_key, mp->integrity_key, sizeof(sa.integ_key));
+
+ rv = ipsec_set_sa_key(vm, &sa);
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif
+
+ REPLY_MACRO(VL_API_IPSEC_SA_SET_KEY_REPLY);
+}
+
+static void vl_api_cop_interface_enable_disable_t_handler
+(vl_api_cop_interface_enable_disable_t * mp)
+{
+ vl_api_cop_interface_enable_disable_reply_t * rmp;
+ int rv;
+ u32 sw_if_index = ntohl(mp->sw_if_index);
+ int enable_disable;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ enable_disable = (int) mp->enable_disable;
+
+ rv = cop_interface_enable_disable (sw_if_index, enable_disable);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO(VL_API_COP_INTERFACE_ENABLE_DISABLE_REPLY);
+}
+
+static void vl_api_cop_whitelist_enable_disable_t_handler
+(vl_api_cop_whitelist_enable_disable_t * mp)
+{
+ vl_api_cop_whitelist_enable_disable_reply_t * rmp;
+ cop_whitelist_enable_disable_args_t _a, *a=&_a;
+ u32 sw_if_index = ntohl(mp->sw_if_index);
+ int rv;
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ a->sw_if_index = sw_if_index;
+ a->ip4 = mp->ip4;
+ a->ip6 = mp->ip6;
+ a->default_cop = mp->default_cop;
+ a->fib_id = ntohl(mp->fib_id);
+
+ rv = cop_whitelist_enable_disable (a);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO(VL_API_COP_WHITELIST_ENABLE_DISABLE_REPLY);
+}
+
+static void vl_api_get_node_graph_t_handler
+(vl_api_get_node_graph_t * mp)
+{
+ int rv = 0;
+ u8 * vector = 0;
+ api_main_t * am = &api_main;
+ vlib_main_t * vm = vlib_get_main();
+ void * oldheap;
+ vl_api_get_node_graph_reply_t * rmp;
+
+ pthread_mutex_lock (&am->vlib_rp->mutex);
+ oldheap = svm_push_data_heap (am->vlib_rp);
+
+ /*
+ * Keep the number of memcpy ops to a minimum (e.g. 1).
+ */
+ vec_validate (vector, 16384);
+ vec_reset_length (vector);
+
+ /* $$$$ FIXME */
+ vector = vlib_node_serialize (&vm->node_main, vector,
+ (u32)~0 /* all threads */,
+ 1 /* include nexts */,
+ 1 /* include stats */);
+
+ svm_pop_heap (oldheap);
+ pthread_mutex_unlock (&am->vlib_rp->mutex);
+
+ REPLY_MACRO2(VL_API_GET_NODE_GRAPH_REPLY,
+ rmp->reply_in_shmem = (uword) vector);
+}
+
+static void vl_api_trace_profile_add_t_handler
+(vl_api_trace_profile_add_t *mp)
+{
+ int rv = 0;
+ vl_api_trace_profile_add_reply_t * rmp;
+ clib_error_t *error;
+
+ /* Ignoring the profile id as currently a single profile
+ * is supported */
+ error = ip6_ioam_trace_profile_set(mp->trace_num_elt, mp->trace_type,
+ ntohl(mp->node_id), ntohl(mp->trace_app_data),
+ mp->pow_enable, mp->trace_tsp,
+ mp->trace_ppc);
+ if (error) {
+ clib_error_report(error);
+ rv = clib_error_get_code(error);
+ }
+
+ REPLY_MACRO(VL_API_TRACE_PROFILE_ADD_REPLY);
+}
+
+static void vl_api_trace_profile_apply_t_handler
+(vl_api_trace_profile_apply_t *mp)
+{
+ int rv = 0;
+ vl_api_trace_profile_apply_reply_t * rmp;
+
+ if (mp->enable != 0) {
+ rv = ip6_ioam_set_destination ((ip6_address_t *)(&mp->dest_ipv6),
+ ntohl(mp->prefix_length),
+ ntohl(mp->vrf_id),
+ mp->trace_op == IOAM_HBYH_ADD,
+ mp->trace_op == IOAM_HBYH_POP,
+ mp->trace_op == IOAM_HBYH_MOD);
+ } else {
+ //ip6_ioam_clear_destination(&ip6, mp->prefix_length, mp->vrf_id);
+ }
+ REPLY_MACRO(VL_API_TRACE_PROFILE_APPLY_REPLY);
+}
+
+static void vl_api_trace_profile_del_t_handler
+(vl_api_trace_profile_del_t *mp)
+{
+ int rv = 0;
+ vl_api_trace_profile_del_reply_t * rmp;
+ clib_error_t *error;
+
+ error = clear_ioam_rewrite_fn();
+ if (error) {
+ clib_error_report(error);
+ rv = clib_error_get_code(error);
+ }
+
+ REPLY_MACRO(VL_API_TRACE_PROFILE_DEL_REPLY);
+}
+
+static void
+vl_api_af_packet_create_t_handler
+(vl_api_af_packet_create_t *mp)
+{
+ vlib_main_t *vm = vlib_get_main();
+ vl_api_af_packet_create_reply_t *rmp;
+ int rv = 0;
+ u8 *host_if_name = NULL;
+ u32 sw_if_index;
+
+ host_if_name = format(0, "%s", mp->host_if_name);
+ vec_add1 (host_if_name, 0);
+
+ rv = af_packet_create_if(vm, host_if_name,
+ mp->use_random_hw_addr ? 0 : mp->hw_addr, &sw_if_index);
+
+ vec_free(host_if_name);
+
+ REPLY_MACRO2(VL_API_AF_PACKET_CREATE_REPLY,
+ rmp->sw_if_index = clib_host_to_net_u32(sw_if_index));
+}
+
+static void
+vl_api_af_packet_delete_t_handler
+(vl_api_af_packet_delete_t *mp)
+{
+ vlib_main_t * vm = vlib_get_main();
+ vl_api_af_packet_delete_reply_t *rmp;
+ int rv = 0;
+ u8 *host_if_name = NULL;
+
+ host_if_name = format(0, "%s", mp->host_if_name);
+ vec_add1 (host_if_name, 0);
+
+ rv = af_packet_delete_if(vm, host_if_name);
+
+ vec_free(host_if_name);
+
+ REPLY_MACRO(VL_API_AF_PACKET_DELETE_REPLY);
+}
+
+static void
+vl_api_policer_add_del_t_handler
+(vl_api_policer_add_del_t *mp)
+{
+ vlib_main_t * vm = vlib_get_main();
+ vl_api_policer_add_del_reply_t *rmp;
+ int rv = 0;
+ u8 *name = NULL;
+ sse2_qos_pol_cfg_params_st cfg;
+ clib_error_t * error;
+
+ name = format(0, "%s", mp->name);
+
+ memset (&cfg, 0, sizeof (cfg));
+ cfg.rfc = mp->type;
+ cfg.rnd_type = mp->round_type;
+ cfg.rate_type = mp->rate_type;
+ cfg.rb.kbps.cir_kbps = mp->cir;
+ cfg.rb.kbps.eir_kbps = mp->eir;
+ cfg.rb.kbps.cb_bytes = mp->cb;
+ cfg.rb.kbps.eb_bytes = mp->eb;
+
+ error = policer_add_del(vm, name, &cfg, mp->is_add);
+
+ if (error)
+ rv = VNET_API_ERROR_UNSPECIFIED;
+
+ REPLY_MACRO(VL_API_POLICER_ADD_DEL_REPLY);
+}
+
+static void
+send_policer_details (u8 *name,
+ sse2_qos_pol_cfg_params_st *config,
+ policer_read_response_type_st *templ,
+ unix_shared_memory_queue_t *q,
+ u32 context)
+{
+ vl_api_policer_details_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_POLICER_DETAILS);
+ mp->context = context;
+ mp->cir = htonl(config->rb.kbps.cir_kbps);
+ mp->eir = htonl(config->rb.kbps.eir_kbps);
+ mp->cb = htonl(config->rb.kbps.cb_bytes);
+ mp->eb = htonl(config->rb.kbps.eb_bytes);
+ mp->rate_type = config->rate_type;
+ mp->round_type = config->rnd_type;
+ mp->type = config->rfc;
+ mp->single_rate = templ->single_rate ? 1 : 0;
+ mp->color_aware = templ->color_aware ? 1 : 0;
+ mp->scale = htonl(templ->scale);
+ mp->cir_tokens_per_period = htonl(templ->cir_tokens_per_period);
+ mp->pir_tokens_per_period = htonl(templ->pir_tokens_per_period);
+ mp->current_limit = htonl(templ->current_limit);
+ mp->current_bucket = htonl(templ->current_bucket);
+ mp->extended_limit = htonl(templ->extended_limit);
+ mp->extended_bucket = htonl(templ->extended_bucket);
+ mp->last_update_time = clib_host_to_net_u64(templ->last_update_time);
+
+ strncpy ((char *) mp->name, (char *) name, ARRAY_LEN(mp->name) - 1);
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void
+vl_api_policer_dump_t_handler
+(vl_api_policer_dump_t *mp)
+{
+ unix_shared_memory_queue_t * q;
+ vnet_policer_main_t * pm = &vnet_policer_main;
+ hash_pair_t * hp;
+ uword * p;
+ u32 pool_index;
+ u8 * match_name = 0;
+ u8 * name;
+ sse2_qos_pol_cfg_params_st *config;
+ policer_read_response_type_st *templ;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ if (mp->match_name_valid) {
+ match_name = format(0, "%s%c", mp->match_name, 0);
+ }
+
+ if (mp->match_name_valid) {
+ p = hash_get_mem (pm->policer_config_by_name, match_name);
+ if (p) {
+ pool_index = p[0];
+ config = pool_elt_at_index (pm->configs, pool_index);
+ templ = pool_elt_at_index (pm->policer_templates, pool_index);
+ send_policer_details(match_name, config, templ, q, mp->context);
+ }
+ } else {
+ hash_foreach_pair (hp, pm->policer_config_by_name,
+ ({
+ name = (u8 *) hp->key;
+ pool_index = hp->value[0];
+ config = pool_elt_at_index (pm->configs, pool_index);
+ templ = pool_elt_at_index (pm->policer_templates, pool_index);
+ send_policer_details(name, config, templ, q, mp->context);
+ }));
+ }
+}
+
+static void
+vl_api_netmap_create_t_handler
+(vl_api_netmap_create_t *mp)
+{
+ vlib_main_t *vm = vlib_get_main();
+ vl_api_netmap_create_reply_t *rmp;
+ int rv = 0;
+ u8 *if_name = NULL;
+
+ if_name = format(0, "%s", mp->netmap_if_name);
+ vec_add1 (if_name, 0);
+
+ rv = netmap_create_if(vm, if_name, mp->use_random_hw_addr ? 0 : mp->hw_addr,
+ mp->is_pipe, mp->is_master, 0);
+
+ vec_free(if_name);
+
+ REPLY_MACRO(VL_API_NETMAP_CREATE_REPLY);
+}
+
+static void
+vl_api_netmap_delete_t_handler
+(vl_api_netmap_delete_t *mp)
+{
+ vlib_main_t * vm = vlib_get_main();
+ vl_api_netmap_delete_reply_t *rmp;
+ int rv = 0;
+ u8 *if_name = NULL;
+
+ if_name = format(0, "%s", mp->netmap_if_name);
+ vec_add1 (if_name, 0);
+
+ rv = netmap_delete_if(vm, if_name);
+
+ vec_free(if_name);
+
+ REPLY_MACRO(VL_API_NETMAP_DELETE_REPLY);
+}
+
+static void vl_api_mpls_gre_tunnel_details_t_handler (
+ vl_api_mpls_gre_tunnel_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void send_mpls_gre_tunnel_entry (vpe_api_main_t * am,
+ unix_shared_memory_queue_t *q,
+ mpls_gre_tunnel_t * gt,
+ u32 index,
+ u32 context)
+{
+ vl_api_mpls_gre_tunnel_details_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs(VL_API_MPLS_GRE_TUNNEL_DETAILS);
+ mp->context = context;
+
+ if (gt != NULL) {
+ mp->tunnel_index = htonl(index);
+ mp->tunnel_src = gt->tunnel_src.as_u32;
+ mp->tunnel_dst = gt->tunnel_dst.as_u32;
+ mp->intfc_address = gt->intfc_address.as_u32;
+ mp->mask_width = htonl(gt->mask_width);
+ mp->inner_fib_index = htonl(gt->inner_fib_index);
+ mp->outer_fib_index = htonl(gt->outer_fib_index);
+ mp->encap_index = htonl(gt->encap_index);
+ mp->hw_if_index = htonl(gt->hw_if_index);
+ mp->l2_only = htonl(gt->l2_only);
+ }
+
+ mpls_main_t * mm = &mpls_main;
+ mpls_encap_t * e;
+ int i;
+ u32 len = 0;
+
+ e = pool_elt_at_index (mm->encaps, gt->encap_index);
+ len = vec_len (e->labels);
+ mp->nlabels = htonl(len);
+
+ for (i = 0; i < len; i++) {
+ mp->labels[i] = htonl(vnet_mpls_uc_get_label(
+ clib_host_to_net_u32(e->labels[i].label_exp_s_ttl)));
+ }
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void
+vl_api_mpls_gre_tunnel_dump_t_handler (vl_api_mpls_gre_tunnel_dump_t *mp)
+{
+ vpe_api_main_t * am = &vpe_api_main;
+ unix_shared_memory_queue_t * q;
+ vlib_main_t * vm = &vlib_global_main;
+ mpls_main_t * mm = &mpls_main;
+ mpls_gre_tunnel_t * gt;
+ u32 index = ntohl(mp->tunnel_index);
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ if (pool_elts (mm->gre_tunnels)) {
+ if(mp->tunnel_index >= 0) {
+ vlib_cli_output (vm, "MPLS-GRE tunnel %u", index);
+ gt = pool_elt_at_index (mm->gre_tunnels, index);
+ send_mpls_gre_tunnel_entry (am, q, gt, gt - mm->gre_tunnels, mp->context);
+ } else {
+ vlib_cli_output (vm, "MPLS-GRE tunnels");
+ pool_foreach (gt, mm->gre_tunnels,
+ ({
+ send_mpls_gre_tunnel_entry (am, q, gt, gt - mm->gre_tunnels, mp->context);
+ }));
+ }
+ } else {
+ vlib_cli_output (vm, "No MPLS-GRE tunnels");
+ }
+}
+
+static void vl_api_mpls_eth_tunnel_details_t_handler (
+ vl_api_mpls_eth_tunnel_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void send_mpls_eth_tunnel_entry (vpe_api_main_t * am,
+ unix_shared_memory_queue_t *q,
+ mpls_eth_tunnel_t * et,
+ u32 index,
+ u32 context)
+{
+ vl_api_mpls_eth_tunnel_details_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs(VL_API_MPLS_ETH_TUNNEL_DETAILS);
+ mp->context = context;
+
+ if (et != NULL) {
+ mp->tunnel_index = htonl(index);
+ memcpy(mp->tunnel_dst_mac, et->tunnel_dst, 6);
+ mp->intfc_address = et->intfc_address.as_u32;
+ mp->tx_sw_if_index = htonl(et->tx_sw_if_index);
+ mp->inner_fib_index = htonl(et->inner_fib_index);
+ mp->mask_width = htonl(et->mask_width);
+ mp->encap_index = htonl(et->encap_index);
+ mp->hw_if_index = htonl(et->hw_if_index);
+ mp->l2_only = htonl(et->l2_only);
+ }
+
+ mpls_main_t * mm = &mpls_main;
+ mpls_encap_t * e;
+ int i;
+ u32 len = 0;
+
+ e = pool_elt_at_index (mm->encaps, et->encap_index);
+ len = vec_len (e->labels);
+ mp->nlabels = htonl(len);
+
+ for (i = 0; i < len; i++) {
+ mp->labels[i] = htonl(vnet_mpls_uc_get_label(
+ clib_host_to_net_u32(e->labels[i].label_exp_s_ttl)));
+ }
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void
+vl_api_mpls_eth_tunnel_dump_t_handler (vl_api_mpls_eth_tunnel_dump_t *mp)
+{
+ vpe_api_main_t * am = &vpe_api_main;
+ unix_shared_memory_queue_t * q;
+ vlib_main_t * vm = &vlib_global_main;
+ mpls_main_t * mm = &mpls_main;
+ mpls_eth_tunnel_t * et;
+ u32 index = ntohl(mp->tunnel_index);
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ clib_warning("Received mpls_eth_tunnel_dump");
+ clib_warning("Received tunnel index: %u from client %u", index, mp->client_index);
+
+ if (pool_elts (mm->eth_tunnels)) {
+ if(mp->tunnel_index >= 0) {
+ vlib_cli_output (vm, "MPLS-Ethernet tunnel %u", index);
+ et = pool_elt_at_index (mm->eth_tunnels, index);
+ send_mpls_eth_tunnel_entry (am, q, et, et - mm->eth_tunnels, mp->context);
+ } else {
+ clib_warning("MPLS-Ethernet tunnels");
+ pool_foreach (et, mm->eth_tunnels,
+ ({
+ send_mpls_eth_tunnel_entry (am, q, et, et - mm->eth_tunnels, mp->context);
+ }));
+ }
+ } else {
+ clib_warning("No MPLS-Ethernet tunnels");
+ }
+}
+
+static void vl_api_mpls_fib_encap_details_t_handler (
+ vl_api_mpls_fib_encap_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void send_mpls_fib_encap_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t *q,
+ show_mpls_fib_t *s,
+ u32 context)
+{
+ vl_api_mpls_fib_encap_details_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs(VL_API_MPLS_FIB_ENCAP_DETAILS);
+ mp->context = context;
+
+ mp->fib_index = htonl(s->fib_index);
+ mp->entry_index = htonl(s->entry_index);
+ mp->dest = s->dest;
+ mp->s_bit = htonl(s->s_bit);
+
+ mpls_main_t * mm = &mpls_main;
+ mpls_encap_t * e;
+ int i;
+ u32 len = 0;
+
+ e = pool_elt_at_index (mm->encaps, s->entry_index);
+ len = vec_len (e->labels);
+ mp->nlabels = htonl(len);
+
+ for (i = 0; i < len; i++) {
+ mp->labels[i] = htonl(vnet_mpls_uc_get_label(
+ clib_host_to_net_u32(e->labels[i].label_exp_s_ttl)));
+ }
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void
+vl_api_mpls_fib_encap_dump_t_handler (vl_api_mpls_fib_encap_dump_t *mp)
+{
+ vpe_api_main_t * am = &vpe_api_main;
+ unix_shared_memory_queue_t * q;
+ vlib_main_t * vm = &vlib_global_main;
+ u64 key;
+ u32 value;
+ show_mpls_fib_t *records = 0;
+ show_mpls_fib_t *s;
+ mpls_main_t * mm = &mpls_main;
+ ip4_main_t * im = &ip4_main;
+ ip4_fib_t * rx_fib;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ hash_foreach (key, value, mm->mpls_encap_by_fib_and_dest,
+ ({
+ vec_add2 (records, s, 1);
+ s->fib_index = (u32)(key>>32);
+ s->dest = (u32)(key & 0xFFFFFFFF);
+ s->entry_index = (u32) value;
+ }));
+
+ if (0 == vec_len(records)) {
+ vlib_cli_output(vm, "MPLS encap table empty");
+ goto out;
+ }
+
+ /* sort output by dst address within fib */
+ vec_sort_with_function(records, mpls_dest_cmp);
+ vec_sort_with_function(records, mpls_fib_index_cmp);
+ vlib_cli_output(vm, "MPLS encap table");
+ vlib_cli_output(vm, "%=6s%=16s%=16s", "Table", "Dest address", "Labels");
+ vec_foreach (s, records)
+ {
+ rx_fib = vec_elt_at_index(im->fibs, s->fib_index);
+ vlib_cli_output(vm, "%=6d%=16U%=16U", rx_fib->table_id,
+ format_ip4_address, &s->dest, format_mpls_encap_index, mm,
+ s->entry_index);
+ send_mpls_fib_encap_details (am, q, s, mp->context);
+ }
+
+out:
+ vec_free(records);
+}
+
+static void vl_api_mpls_fib_decap_details_t_handler (
+ vl_api_mpls_fib_decap_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void send_mpls_fib_decap_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t *q,
+ show_mpls_fib_t *s,
+ u32 rx_table_id,
+ u32 tx_table_id,
+ char *swif_tag,
+ u32 context)
+{
+ vl_api_mpls_fib_decap_details_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs(VL_API_MPLS_FIB_DECAP_DETAILS);
+ mp->context = context;
+
+ mp->fib_index = htonl(s->fib_index);
+ mp->entry_index = htonl(s->entry_index);
+ mp->dest = s->dest;
+ mp->s_bit = htonl(s->s_bit);
+ mp->label = htonl(s->label);
+ mp->rx_table_id = htonl(rx_table_id);
+ mp->tx_table_id = htonl(tx_table_id);
+ strncpy ((char *) mp->swif_tag,
+ (char *) swif_tag, ARRAY_LEN(mp->swif_tag)-1);
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void
+vl_api_mpls_fib_decap_dump_t_handler (vl_api_mpls_fib_decap_dump_t *mp)
+{
+ vpe_api_main_t * am = &vpe_api_main;
+ unix_shared_memory_queue_t * q;
+ vlib_main_t * vm = &vlib_global_main;
+ u64 key;
+ u32 value;
+ show_mpls_fib_t *records = 0;
+ show_mpls_fib_t *s;
+ mpls_main_t * mm = &mpls_main;
+ ip4_main_t * im = &ip4_main;
+ ip4_fib_t * rx_fib;
+ ip4_fib_t *tx_fib;
+ u32 tx_table_id;
+ char *swif_tag;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ hash_foreach (key, value, mm->mpls_decap_by_rx_fib_and_label,
+ ({
+ vec_add2 (records, s, 1);
+ s->fib_index = (u32)(key>>32);
+ s->entry_index = (u32) value;
+ s->label = ((u32) key)>>12;
+ s->s_bit = (key & (1<<8)) != 0;
+ }));
+
+ if (!vec_len(records)) {
+ vlib_cli_output(vm, "MPLS decap table empty");
+ goto out;
+ }
+
+ vec_sort_with_function(records, mpls_label_cmp);
+ vlib_cli_output(vm, "MPLS decap table");
+ vlib_cli_output(vm, "%=10s%=15s%=6s%=6s", "RX Table", "TX Table/Intfc",
+ "Label", "S-bit");
+ vec_foreach (s, records)
+ {
+ mpls_decap_t * d;
+ d = pool_elt_at_index(mm->decaps, s->entry_index);
+ if (d->next_index == MPLS_INPUT_NEXT_IP4_INPUT) {
+ tx_fib = vec_elt_at_index(im->fibs, d->tx_fib_index);
+ tx_table_id = tx_fib->table_id;
+ swif_tag = " ";
+ } else {
+ tx_table_id = d->tx_fib_index;
+ swif_tag = "(i) ";
+ }
+ rx_fib = vec_elt_at_index(im->fibs, s->fib_index);
+
+ vlib_cli_output(vm, "%=10d%=10d%=5s%=6d%=6d", rx_fib->table_id,
+ tx_table_id, swif_tag, s->label, s->s_bit);
+
+ send_mpls_fib_decap_details (am, q, s, rx_fib->table_id,
+ tx_table_id, swif_tag, mp->context);
+ }
+
+out:
+ vec_free(records);
+}
+
+static void vl_api_classify_table_ids_t_handler (vl_api_classify_table_ids_t *mp)
+{
+ unix_shared_memory_queue_t * q;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ vnet_classify_table_t * t;
+ u32 * table_ids = 0;
+ u32 count;
+
+ pool_foreach (t, cm->tables,
+ ({
+ vec_add1 (table_ids, ntohl(t - cm->tables));
+ }));
+ count = vec_len(table_ids);
+
+ vl_api_classify_table_ids_reply_t *rmp;
+ rmp = vl_msg_api_alloc_as_if_client(sizeof (*rmp) + count);
+ rmp->_vl_msg_id = ntohs (VL_API_CLASSIFY_TABLE_IDS_REPLY);
+ rmp->context = mp->context;
+ rmp->count = ntohl(count);
+ clib_memcpy(rmp->ids, table_ids, count * sizeof(u32));
+ rmp->retval = 0;
+
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+
+ vec_free (table_ids);
+}
+
+static void vl_api_classify_table_by_interface_t_handler (vl_api_classify_table_by_interface_t *mp)
+{
+ vl_api_classify_table_by_interface_reply_t *rmp;
+ int rv = 0;
+
+ u32 sw_if_index = ntohl(mp->sw_if_index);
+ u32 * acl = 0;
+
+ vec_validate (acl, INPUT_ACL_N_TABLES - 1);
+ vec_set (acl, ~0);
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ input_acl_main_t * am = &input_acl_main;
+
+ int if_idx;
+ u32 type;
+
+ for (type = 0; type < INPUT_ACL_N_TABLES; type++)
+ {
+ u32 * vec_tbl = am->classify_table_index_by_sw_if_index[type];
+ if (vec_len(vec_tbl)) {
+ for (if_idx = 0; if_idx < vec_len (vec_tbl); if_idx++)
+ {
+ if (vec_elt(vec_tbl, if_idx) == ~0 || sw_if_index != if_idx) {
+ continue;
+ }
+ acl[type] = vec_elt(vec_tbl, if_idx);
+ }
+ }
+ }
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO2(VL_API_CLASSIFY_TABLE_BY_INTERFACE_REPLY,
+ ({
+ rmp->sw_if_index = ntohl(sw_if_index);
+ rmp->l2_table_id = ntohl(acl[INPUT_ACL_TABLE_L2]);
+ rmp->ip4_table_id = ntohl(acl[INPUT_ACL_TABLE_IP4]);
+ rmp->ip6_table_id = ntohl(acl[INPUT_ACL_TABLE_IP6]);
+ }));
+ vec_free(acl);
+}
+
+static void vl_api_classify_table_info_t_handler (vl_api_classify_table_info_t *mp)
+{
+ unix_shared_memory_queue_t * q;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ vl_api_classify_table_info_reply_t *rmp = 0;
+
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ u32 table_id = ntohl(mp->table_id);
+ vnet_classify_table_t * t;
+
+ pool_foreach (t, cm->tables,
+ ({
+ if (table_id == t - cm->tables) {
+ rmp = vl_msg_api_alloc_as_if_client(sizeof (*rmp) + t->match_n_vectors * sizeof (u32x4));
+ rmp->_vl_msg_id = ntohs (VL_API_CLASSIFY_TABLE_INFO_REPLY);
+ rmp->context = mp->context;
+ rmp->table_id = ntohl(table_id);
+ rmp->nbuckets = ntohl(t->nbuckets);
+ rmp->match_n_vectors = ntohl(t->match_n_vectors);
+ rmp->skip_n_vectors = ntohl(t->skip_n_vectors);
+ rmp->active_sessions = ntohl(t->active_elements);
+ rmp->next_table_index = ntohl(t->next_table_index);
+ rmp->miss_next_index = ntohl(t->miss_next_index);
+ rmp->mask_length = ntohl(t->match_n_vectors * sizeof (u32x4));
+ clib_memcpy(rmp->mask, t->mask, t->match_n_vectors * sizeof(u32x4));
+ rmp->retval = 0;
+ break;
+ }
+ }));
+
+ if (rmp == 0) {
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs((VL_API_CLASSIFY_TABLE_INFO_REPLY));
+ rmp->context = mp->context;
+ rmp->retval = ntohl(VNET_API_ERROR_CLASSIFY_TABLE_NOT_FOUND);
+ }
+
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+}
+
+static void vl_api_classify_session_details_t_handler (vl_api_classify_session_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void send_classify_session_details (unix_shared_memory_queue_t * q,
+ u32 table_id,
+ u32 match_length,
+ vnet_classify_entry_t * e,
+ u32 context)
+{
+ vl_api_classify_session_details_t *rmp;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_CLASSIFY_SESSION_DETAILS);
+ rmp->context = context;
+ rmp->table_id = ntohl(table_id);
+ rmp->hit_next_index = ntohl(e->next_index);
+ rmp->advance = ntohl(e->advance);
+ rmp->opaque_index = ntohl(e->opaque_index);
+ rmp->match_length = ntohl(match_length);
+ clib_memcpy(rmp->match, e->key, match_length);
+
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+}
+
+static void vl_api_classify_session_dump_t_handler (vl_api_classify_session_dump_t *mp)
+{
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ unix_shared_memory_queue_t * q;
+
+ u32 table_id = ntohl(mp->table_id);
+ vnet_classify_table_t * t;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+
+ pool_foreach (t, cm->tables,
+ ({
+ if (table_id == t - cm->tables) {
+ vnet_classify_bucket_t * b;
+ vnet_classify_entry_t * v, * save_v;
+ int i, j, k;
+
+ for (i = 0; i < t->nbuckets; i++)
+ {
+ b = &t->buckets [i];
+ if (b->offset == 0)
+ continue;
+
+ save_v = vnet_classify_get_entry (t, b->offset);
+ for (j = 0; j < (1<<b->log2_pages); j++)
+ {
+ for (k = 0; k < t->entries_per_page; k++)
+ {
+ v = vnet_classify_entry_at_index (t, save_v, j*t->entries_per_page + k);
+ if (vnet_classify_entry_is_free (v))
+ continue;
+
+ send_classify_session_details(q, table_id,
+ t->match_n_vectors * sizeof (u32x4), v, mp->context);
+ }
+ }
+ }
+ break;
+ }
+ }));
+}
+
+#define BOUNCE_HANDLER(nn) \
+static void vl_api_##nn##_t_handler ( \
+ vl_api_##nn##_t *mp) \
+{ \
+ vpe_client_registration_t *reg; \
+ vpe_api_main_t * vam = &vpe_api_main; \
+ unix_shared_memory_queue_t * q; \
+ \
+ /* One registration only... */ \
+ pool_foreach(reg, vam->nn##_registrations, \
+ ({ \
+ q = vl_api_client_index_to_input_queue (reg->client_index); \
+ if (q) { \
+ /* \
+ * If the queue is stuffed, turf the msg and complain \
+ * It's unlikely that the intended recipient is \
+ * alive; avoid deadlock at all costs. \
+ */ \
+ if (q->cursize == q->maxsize) { \
+ clib_warning ("ERROR: receiver queue full, drop msg"); \
+ vl_msg_api_free (mp); \
+ return; \
+ } \
+ vl_msg_api_send_shmem (q, (u8 *)&mp); \
+ return; \
+ } \
+ })); \
+ vl_msg_api_free (mp); \
+}
+
+/*
+ * vpe_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../open-repo/vlib/memclnt_vlib.c:memclnt_process()
+ */
+
+static clib_error_t *
+vpe_api_hookup (vlib_main_t *vm)
+{
+ api_main_t * am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Manually register the sr tunnel add del msg, so we trace
+ * enough bytes to capture a typical segment list
+ */
+ vl_msg_api_set_handlers (VL_API_SR_TUNNEL_ADD_DEL,
+ "sr_tunnel_add_del",
+ vl_api_sr_tunnel_add_del_t_handler,
+ vl_noop_handler,
+ vl_api_sr_tunnel_add_del_t_endian,
+ vl_api_sr_tunnel_add_del_t_print,
+ 256, 1);
+
+
+ /*
+ * Manually register the sr policy add del msg, so we trace
+ * enough bytes to capture a typical tunnel name list
+ */
+ vl_msg_api_set_handlers (VL_API_SR_POLICY_ADD_DEL,
+ "sr_policy_add_del",
+ vl_api_sr_policy_add_del_t_handler,
+ vl_noop_handler,
+ vl_api_sr_policy_add_del_t_endian,
+ vl_api_sr_policy_add_del_t_print,
+ 256, 1);
+
+ /*
+ * Trace space for 8 MPLS encap labels, classifier mask+match
+ */
+ am->api_trace_cfg [VL_API_MPLS_ADD_DEL_ENCAP].size += 8 * sizeof(u32);
+ am->api_trace_cfg [VL_API_CLASSIFY_ADD_DEL_TABLE].size
+ += 5 * sizeof (u32x4);
+ am->api_trace_cfg [VL_API_CLASSIFY_ADD_DEL_SESSION].size
+ += 5 * sizeof (u32x4);
+ am->api_trace_cfg [VL_API_VXLAN_ADD_DEL_TUNNEL].size
+ += 16 * sizeof (u32);
+
+ /*
+ * Thread-safe API messages
+ */
+ am->is_mp_safe [VL_API_IP_ADD_DEL_ROUTE] = 1;
+ am->is_mp_safe [VL_API_GET_NODE_GRAPH] = 1;
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION(vpe_api_hookup);
+
+static clib_error_t *
+vpe_api_init (vlib_main_t *vm)
+{
+ vpe_api_main_t *am = &vpe_api_main;
+
+ am->vlib_main = vm;
+ am->vnet_main = vnet_get_main();
+ am->interface_events_registration_hash = hash_create (0, sizeof (uword));
+ am->to_netconf_server_registration_hash = hash_create (0, sizeof (uword));
+ am->from_netconf_server_registration_hash = hash_create (0, sizeof (uword));
+ am->to_netconf_client_registration_hash = hash_create (0, sizeof (uword));
+ am->from_netconf_client_registration_hash = hash_create (0, sizeof (uword));
+ am->oam_events_registration_hash = hash_create (0, sizeof (uword));
+
+ vl_api_init (vm);
+ vl_set_memory_region_name ("/vpe-api");
+ vl_enable_disable_memory_api (vm, 1 /* enable it */);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION(vpe_api_init);
+
+
+static clib_error_t *
+api_segment_config (vlib_main_t * vm, unformat_input_t * input)
+{
+ u8 * chroot_path;
+ int uid, gid, rv;
+ char *s, buf[128];
+ struct passwd _pw, *pw;
+ struct group _grp, *grp;
+ clib_error_t *e;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "prefix %s", &chroot_path))
+ {
+ vec_add1 (chroot_path, 0);
+ vl_set_memory_root_path ((char *)chroot_path);
+ }
+ else if (unformat (input, "uid %d", &uid))
+ vl_set_memory_uid (uid);
+ else if (unformat (input, "gid %d", &gid))
+ vl_set_memory_gid (gid);
+ else if (unformat (input, "uid %s", &s))
+ {
+ /* lookup the username */
+ pw = NULL;
+ rv = getpwnam_r(s, &_pw, buf, sizeof(buf), &pw);
+ if (rv < 0)
+ {
+ e = clib_error_return_code(0, rv,
+ CLIB_ERROR_ERRNO_VALID | CLIB_ERROR_FATAL,
+ "cannot fetch username %s", s);
+ vec_free (s);
+ return e;
+ }
+ if (pw == NULL)
+ {
+ e = clib_error_return_fatal(0, "username %s does not exist", s);
+ vec_free (s);
+ return e;
+ }
+ vec_free (s);
+ vl_set_memory_uid (pw->pw_uid);
+ }
+ else if (unformat (input, "gid %s", &s))
+ {
+ /* lookup the group name */
+ grp = NULL;
+ rv = getgrnam_r(s, &_grp, buf, sizeof(buf), &grp);
+ if (rv != 0)
+ {
+ e = clib_error_return_code(0, rv,
+ CLIB_ERROR_ERRNO_VALID | CLIB_ERROR_FATAL,
+ "cannot fetch group %s", s);
+ vec_free (s);
+ return e;
+ }
+ if (grp == NULL)
+ {
+ e = clib_error_return_fatal(0, "group %s does not exist", s);
+ vec_free (s);
+ return e;
+ }
+ vec_free (s);
+ vl_set_memory_gid (grp->gr_gid);
+ }
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ return 0;
+}
+VLIB_EARLY_CONFIG_FUNCTION (api_segment_config, "api-segment");
+
+void * get_unformat_vnet_sw_interface (void)
+{
+ return (void *) &unformat_vnet_sw_interface;
+}
+
+#undef vl_api_version
+#define vl_api_version(n,v) static u32 vpe_api_version = v;
+#include <vpp-api/vpe.api.h>
+#undef vl_api_version
+
+int vl_msg_api_version_check (vl_api_memclnt_create_t * mp)
+{
+ if (clib_host_to_net_u32(mp->api_versions[0]) != vpe_api_version) {
+ clib_warning ("vpe API mismatch: 0x%08x instead of 0x%08x",
+ clib_host_to_net_u32 (mp->api_versions[0]),
+ vpe_api_version);
+ return -1;
+ }
+ return 0;
+}
+
+static u8 * format_arp_event (u8 * s, va_list * args)
+{
+ vl_api_ip4_arp_event_t * event = va_arg (*args, vl_api_ip4_arp_event_t *);
+
+ s = format (s, "pid %d: %U", event->pid,
+ format_ip4_address, &event->address);
+ return s;
+}
+
+static clib_error_t *
+show_ip4_arp_events_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vpe_api_main_t * am = &vpe_api_main;
+ vl_api_ip4_arp_event_t * event;
+
+ if (pool_elts (am->arp_events) == 0) {
+ vlib_cli_output (vm, "No active arp event registrations");
+ return 0;
+ }
+
+ pool_foreach (event, am->arp_events,
+ ({
+ vlib_cli_output (vm, "%U", format_arp_event, event);
+ }));
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (show_ip4_arp_events, static) = {
+ .path = "show arp event registrations",
+ .function = show_ip4_arp_events_fn,
+ .short_help = "Show arp event registrations",
+};
diff --git a/vpp/vpp-api/custom_dump.c b/vpp/vpp-api/custom_dump.c
new file mode 100644
index 00000000..ac90a56f
--- /dev/null
+++ b/vpp/vpp-api/custom_dump.c
@@ -0,0 +1,1951 @@
+/*
+ *------------------------------------------------------------------
+ * custom_dump.c - pretty-print API messages for replay
+ *
+ * Copyright (c) 2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/unix/tuntap.h>
+#include <vnet/mpls-gre/mpls.h>
+#include <vnet/dhcp/proxy.h>
+#include <vnet/dhcpv6/proxy.h>
+#include <vnet/l2tp/l2tp.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/sr/sr_packet.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+#include <stats/stats.h>
+#include <oam/oam.h>
+
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/l2/l2_vtr.h>
+
+#include <vpp-api/vpe_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vpp-api/vpe_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vpp-api/vpe_all_api_h.h>
+#undef vl_endianfun
+
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+
+#define FINISH \
+ vec_add1 (s, 0); \
+ vl_print (handle, (char *)s); \
+ vec_free (s); \
+ return handle;
+
+
+static void *vl_api_create_loopback_t_print
+(vl_api_create_loopback_t *mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: create_loopback ");
+ s = format (s, "mac %U ", format_ethernet_address, &mp->mac_address);
+
+ FINISH;
+}
+
+static void *vl_api_delete_loopback_t_print
+(vl_api_delete_loopback_t *mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: delete_loopback ");
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_set_flags_t_print
+(vl_api_sw_interface_set_flags_t * mp, void *handle)
+{
+ u8 * s;
+ s = format (0, "SCRIPT: sw_interface_set_flags ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ if (mp->admin_up_down)
+ s = format (s, "admin-up ");
+ else
+ s = format (s, "admin-down ");
+
+ if (mp->link_up_down)
+ s = format (s, "link-up");
+ else
+ s = format (s, "link-down");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_add_del_address_t_print
+(vl_api_sw_interface_add_del_address_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: sw_interface_add_del_address ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ if (mp->is_ipv6)
+ s = format (s, "%U/%d ", format_ip6_address,
+ (ip6_address_t *) mp->address, mp->address_length);
+ else
+ s = format (s, "%U/%d ", format_ip4_address,
+ (ip4_address_t *) mp->address, mp->address_length);
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+ if (mp->del_all)
+ s = format (s, "del-all ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_set_table_t_print
+(vl_api_sw_interface_set_table_t * mp, void * handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: sw_interface_set_table ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ if (mp->vrf_id)
+ s = format (s, "vrf %d ", ntohl(mp->vrf_id));
+
+ if (mp->is_ipv6)
+ s = format (s, "ipv6 ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_set_vpath_t_print
+(vl_api_sw_interface_set_vpath_t * mp, void * handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: sw_interface_set_vpath ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ if (mp->enable)
+ s = format (s, "vPath enable ");
+ else
+ s = format (s, "vPath disable ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_set_l2_xconnect_t_print
+(vl_api_sw_interface_set_l2_xconnect_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: sw_interface_set_l2_xconnect ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->rx_sw_if_index));
+
+ if (mp->enable) {
+ s = format (s, "tx_sw_if_index %d ", ntohl(mp->tx_sw_if_index));
+ } else s = format (s, "delete ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_set_l2_bridge_t_print
+(vl_api_sw_interface_set_l2_bridge_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: sw_interface_set_l2_bridge ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->rx_sw_if_index));
+
+ if (mp->enable) {
+ s = format (s, "bd_id %d shg %d %senable ", ntohl(mp->bd_id),
+ mp->shg, ((mp->bvi)?"bvi ":" "));
+ } else s = format (s, "disable ");
+
+ FINISH;
+}
+
+static void * vl_api_bridge_domain_add_del_t_print
+(vl_api_bridge_domain_add_del_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: bridge_domain_add_del ");
+
+ s = format (s, "bd_id %d ", ntohl(mp->bd_id));
+
+ if (mp->is_add) {
+ s = format (s, "flood %d uu-flood %d forward %d learn %d arp-term %d",
+ mp->flood, mp->uu_flood, mp->forward, mp->learn,
+ mp->arp_term);
+ } else s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_bridge_domain_dump_t_print
+(vl_api_bridge_domain_dump_t * mp, void *handle)
+{
+ u8 * s;
+ u32 bd_id = ntohl (mp->bd_id);
+
+ s = format (0, "SCRIPT: bridge_domain_dump ");
+
+ if (bd_id != ~0)
+ s = format (s, "bd_id %d ", bd_id);
+
+ FINISH;
+}
+
+static void *vl_api_l2fib_add_del_t_print
+(vl_api_l2fib_add_del_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: l2fib_add_del ");
+
+ s = format (s, "mac %U ", format_ethernet_address, &mp->mac);
+
+ s = format (s, "bd_id %d ", ntohl(mp->bd_id));
+
+
+ if (mp->is_add) {
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+ if (mp->static_mac) s = format (s, "%s", "static ");
+ if (mp->filter_mac) s = format (s, "%s", "filter ");
+ } else {
+ s = format (s, "del ");
+ }
+
+ FINISH;
+}
+
+static void *vl_api_l2_flags_t_print
+(vl_api_l2_flags_t * mp, void *handle)
+{
+ u8 * s;
+ u32 flags = ntohl(mp->feature_bitmap);
+
+ s = format (0, "SCRIPT: l2_flags ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+#define _(a,b) \
+ if (flags & L2INPUT_FEAT_ ## a) s = format (s, #a " ");
+ foreach_l2input_feat;
+#undef _
+
+ FINISH;
+}
+
+static void *vl_api_bridge_flags_t_print
+(vl_api_bridge_flags_t * mp, void *handle)
+{
+ u8 * s;
+ u32 flags = ntohl(mp->feature_bitmap);
+
+ s = format (0, "SCRIPT: bridge_flags ");
+
+ s = format (s, "bd_id %d ", ntohl(mp->bd_id));
+
+ if (flags & L2_LEARN) s = format (s, "learn ");
+ if (flags & L2_FWD) s = format (s, "forward ");
+ if (flags & L2_FLOOD) s = format (s, "flood ");
+ if (flags & L2_UU_FLOOD) s = format (s, "uu-flood ");
+ if (flags & L2_ARP_TERM) s = format (s, "arp-term ");
+
+ if (mp->is_set == 0) s = format (s, "clear ");
+
+ FINISH;
+}
+
+static void *vl_api_bd_ip_mac_add_del_t_print
+(vl_api_bd_ip_mac_add_del_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: bd_ip_mac_add_del ");
+ s = format (s, "bd_id %d ", ntohl(mp->bd_id));
+
+ if (mp->is_ipv6)
+ s = format (s, "%U ", format_ip6_address,
+ (ip6_address_t *) mp->ip_address);
+ else s = format (s, "%U ", format_ip4_address,
+ (ip4_address_t *) mp->ip_address);
+
+ s = format (s, "%U ", format_ethernet_address, mp->mac_address);
+ if (mp->is_add == 0) s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_tap_connect_t_print
+(vl_api_tap_connect_t * mp, void *handle)
+{
+ u8 * s;
+ u8 null_mac[6];
+
+ memset(null_mac, 0, sizeof (null_mac));
+
+ s = format (0, "SCRIPT: tap_connect ");
+ s = format (s, "tapname %s ", mp->tap_name);
+ if (mp->use_random_mac)
+ s = format (s, "random-mac ");
+
+ if (memcmp (mp->mac_address, null_mac, 6))
+ s = format (s, "mac %U ", format_ethernet_address, mp->mac_address);
+
+ FINISH;
+}
+
+static void *vl_api_tap_modify_t_print
+(vl_api_tap_modify_t * mp, void *handle)
+{
+ u8 * s;
+ u8 null_mac[6];
+
+ memset(null_mac, 0, sizeof (null_mac));
+
+ s = format (0, "SCRIPT: tap_modify ");
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+ s = format (s, "tapname %s ", mp->tap_name);
+ if (mp->use_random_mac)
+ s = format (s, "random-mac ");
+
+ if (memcmp (mp->mac_address, null_mac, 6))
+ s = format (s, "mac %U ", format_ethernet_address, mp->mac_address);
+
+ FINISH;
+}
+
+static void *vl_api_tap_delete_t_print
+(vl_api_tap_delete_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: tap_delete ");
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_tap_dump_t_print
+(vl_api_sw_interface_tap_dump_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: sw_interface_tap_dump ");
+
+ FINISH;
+}
+
+
+static void *vl_api_ip_add_del_route_t_print
+(vl_api_ip_add_del_route_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: ip_add_del_route ");
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ if (mp->next_hop_sw_if_index)
+ s = format (s, "sw_if_index %d ", ntohl(mp->next_hop_sw_if_index));
+
+ if (mp->is_ipv6)
+ s = format (s, "%U/%d ", format_ip6_address, mp->dst_address,
+ mp->dst_address_length);
+ else
+ s = format (s, "%U/%d ", format_ip4_address, mp->dst_address,
+ mp->dst_address_length);
+ if (mp->is_local)
+ s = format (s, "local ");
+ else if (mp->is_drop)
+ s = format (s, "drop ");
+ else if (mp->is_classify)
+ s = format (s, "classify %d", ntohl (mp->classify_table_index));
+ else {
+ if (mp->is_ipv6)
+ s = format (s, "via %U ", format_ip6_address,
+ mp->next_hop_address);
+ else
+ s = format (s, "via %U ", format_ip4_address,
+ mp->next_hop_address);
+ }
+
+ if (mp->vrf_id != 0)
+ s = format (s, "vrf %d ", ntohl(mp->vrf_id));
+
+ if (mp->create_vrf_if_needed)
+ s = format (s, "create-vrf ");
+
+ if (mp->resolve_attempts != 0)
+ s = format (s, "resolve-attempts %d ", ntohl(mp->resolve_attempts));
+
+ if (mp->next_hop_weight != 1)
+ s = format (s, "weight %d ", mp->next_hop_weight);
+
+ if (mp->not_last)
+ s = format (s, "not-last ");
+
+ if (mp->is_multipath)
+ s = format (s, "multipath ");
+
+ if (mp->is_multipath)
+ s = format (s, "multipath ");
+
+ if (mp->lookup_in_vrf)
+ s = format (s, "lookup-in-vrf %d ", ntohl (mp->lookup_in_vrf));
+
+ FINISH;
+}
+
+static void *vl_api_proxy_arp_add_del_t_print
+(vl_api_proxy_arp_add_del_t * mp, void * handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: proxy_arp_add_del ");
+
+ s = format (s, "%U - %U ", format_ip4_address, mp->low_address,
+ format_ip4_address, mp->hi_address);
+
+ if (mp->vrf_id)
+ s = format (s, "vrf %d ", ntohl(mp->vrf_id));
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_proxy_arp_intfc_enable_disable_t_print
+(vl_api_proxy_arp_intfc_enable_disable_t * mp, void * handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: proxy_arp_intfc_enable_disable ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ s = format (s, "enable %d ", mp->enable_disable);
+
+ FINISH;
+}
+
+static void *vl_api_mpls_add_del_decap_t_print
+(vl_api_mpls_add_del_decap_t * mp, void * handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: mpls_add_del_decap ");
+
+ s = format (s, "rx_vrf_id %d ", ntohl(mp->rx_vrf_id));
+
+ s = format (s, "tx_vrf_id %d ", ntohl(mp->tx_vrf_id));
+
+ s = format (s, "label %d ", ntohl(mp->label));
+
+ s = format (s, "next-index %d ", ntohl(mp->next_index));
+
+ if (mp->s_bit == 0)
+ s = format (s, "s-bit-clear ");
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_mpls_add_del_encap_t_print
+(vl_api_mpls_add_del_encap_t * mp, void * handle)
+{
+ u8 * s;
+ int i;
+
+ s = format (0, "SCRIPT: mpls_add_del_encap ");
+
+ s = format (s, "vrf_id %d ", ntohl(mp->vrf_id));
+
+ s = format (s, "dst %U ", format_ip4_address, mp->dst_address);
+
+ for (i = 0; i < mp->nlabels; i++)
+ s = format (s, "label %d ", ntohl(mp->labels[i]));
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_mpls_gre_add_del_tunnel_t_print
+(vl_api_mpls_gre_add_del_tunnel_t * mp, void * handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: mpls_gre_add_del_tunnel ");
+
+ s = format (s, "src %U ", format_ip4_address, mp->src_address);
+
+ s = format (s, "dst %U ", format_ip4_address, mp->dst_address);
+
+ s = format (s, "adj %U/%d ", format_ip4_address,
+ (ip4_address_t *) mp->intfc_address, mp->intfc_address_length);
+
+ s = format (s, "inner-vrf_id %d ", ntohl(mp->inner_vrf_id));
+
+ s = format (s, "outer-vrf_id %d ", ntohl(mp->outer_vrf_id));
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ if (mp->l2_only)
+ s = format (s, "l2-only ");
+
+ FINISH;
+}
+
+static void *vl_api_mpls_ethernet_add_del_tunnel_t_print
+(vl_api_mpls_ethernet_add_del_tunnel_t * mp, void * handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: mpls_ethernet_add_del_tunnel ");
+
+ s = format (s, "tx_sw_if_index %d ", ntohl(mp->tx_sw_if_index));
+
+ s = format (s, "dst %U", format_ethernet_address, mp->dst_mac_address);
+
+ s = format (s, "adj %U/%d ", format_ip4_address,
+ (ip4_address_t *) mp->adj_address, mp->adj_address_length);
+
+ s = format (s, "vrf_id %d ", ntohl(mp->vrf_id));
+
+ if (mp->l2_only)
+ s = format (s, "l2-only ");
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_mpls_ethernet_add_del_tunnel_2_t_print
+(vl_api_mpls_ethernet_add_del_tunnel_2_t * mp, void * handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: mpls_ethernet_add_del_tunnel_2 ");
+
+ s = format (s, "adj %U/%d ", format_ip4_address,
+ (ip4_address_t *) mp->adj_address, mp->adj_address_length);
+
+ s = format (s, "next-hop %U ", format_ip4_address,
+ (ip4_address_t *) mp->next_hop_ip4_address_in_outer_vrf);
+
+ s = format (s, "inner_vrf_id %d ", ntohl(mp->inner_vrf_id));
+
+ s = format (s, "outer_vrf_id %d ", ntohl(mp->outer_vrf_id));
+
+ s = format (s, "resolve-if-needed %d ", mp->resolve_if_needed);
+
+ s = format (s, "resolve-attempts %d ", ntohl(mp->resolve_attempts));
+
+ if (mp->l2_only)
+ s = format (s, "l2-only ");
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_set_unnumbered_t_print
+(vl_api_sw_interface_set_unnumbered_t * mp, void * handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: sw_interface_set_unnumbered ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ s = format (s, "unnum_if_index %d ", ntohl(mp->unnumbered_sw_if_index));
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_ip_neighbor_add_del_t_print
+(vl_api_ip_neighbor_add_del_t * mp, void *handle)
+{
+ u8 * s;
+ u8 null_mac[6];
+
+ memset(null_mac, 0, sizeof (null_mac));
+
+ s = format (0, "SCRIPT: ip_neighbor_add_del ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ if (mp->is_static)
+ s = format (s, "is_static ");
+
+ s = format (s, "vrf_id %d ", ntohl(mp->vrf_id));
+
+ if (memcmp (mp->mac_address, null_mac, 6))
+ s = format (s, "mac %U ", format_ethernet_address, mp->mac_address);
+
+ if (mp->is_ipv6)
+ s = format (s, "dst %U ", format_ip6_address, (ip6_address_t *) mp->dst_address);
+ else
+ s = format (s, "dst %U ", format_ip4_address, (ip4_address_t *) mp->dst_address);
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_reset_vrf_t_print
+(vl_api_reset_vrf_t * mp, void * handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: reset_vrf ");
+
+ if (mp->vrf_id)
+ s = format (s, "vrf %d ", ntohl(mp->vrf_id));
+
+ if (mp->is_ipv6 != 0)
+ s = format (s, "ipv6 ");
+
+ FINISH;
+}
+
+static void *vl_api_create_vlan_subif_t_print
+(vl_api_create_vlan_subif_t * mp, void * handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: create_vlan_subif ");
+
+ if (mp->sw_if_index)
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ if (mp->vlan_id)
+ s = format (s, "vlan_id %d ", ntohl(mp->vlan_id));
+
+ FINISH;
+}
+
+#define foreach_create_subif_bit \
+_(no_tags) \
+_(one_tag) \
+_(two_tags) \
+_(dot1ad) \
+_(exact_match) \
+_(default_sub) \
+_(outer_vlan_id_any) \
+_(inner_vlan_id_any)
+
+static void *vl_api_create_subif_t_print
+(vl_api_create_subif_t * mp, void * handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: create_subif ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ s = format (s, "sub_id %d ", ntohl(mp->sub_id));
+
+ if (mp->outer_vlan_id)
+ s = format (s, "outer_vlan_id %d ", ntohs (mp->outer_vlan_id));
+
+ if (mp->outer_vlan_id)
+ s = format (s, "inner_vlan_id %d ", ntohs (mp->inner_vlan_id));
+
+#define _(a) if (mp->a) s = format (s, "%s ", #a);
+ foreach_create_subif_bit;
+#undef _
+
+
+ FINISH;
+}
+
+static void *vl_api_oam_add_del_t_print
+(vl_api_oam_add_del_t * mp, void * handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: oam_add_del ");
+
+ if (mp->vrf_id)
+ s = format (s, "vrf %d ", ntohl(mp->vrf_id));
+
+ s = format (s, "src %U ", format_ip4_address, mp->src_address);
+
+ s = format (s, "dst %U ", format_ip4_address, mp->dst_address);
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_reset_fib_t_print
+(vl_api_reset_fib_t * mp, void * handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: reset_fib ");
+
+ if (mp->vrf_id)
+ s = format (s, "vrf %d ", ntohl(mp->vrf_id));
+
+ if (mp->is_ipv6 != 0)
+ s = format (s, "ipv6 ");
+
+ FINISH;
+}
+
+static void *vl_api_dhcp_proxy_config_t_print
+(vl_api_dhcp_proxy_config_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: dhcp_proxy_config ");
+
+ s = format (s, "vrf_id %d ", ntohl(mp->vrf_id));
+
+ if (mp->is_ipv6) {
+ s = format (s, "svr %U ", format_ip6_address,
+ (ip6_address_t *) mp->dhcp_server);
+ s = format (s, "src %U ", format_ip6_address,
+ (ip6_address_t *) mp->dhcp_src_address);
+ } else {
+ s = format (s, "svr %U ", format_ip4_address,
+ (ip4_address_t *) mp->dhcp_server);
+ s = format (s, "src %U ", format_ip4_address,
+ (ip4_address_t *) mp->dhcp_src_address);
+ }
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ s = format (s, "insert-cid %d ", mp->insert_circuit_id);
+
+ FINISH;
+}
+
+static void *vl_api_dhcp_proxy_config_2_t_print
+(vl_api_dhcp_proxy_config_2_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: dhcp_proxy_config_2 ");
+
+ s = format (s, "rx_vrf_id %d ", ntohl(mp->rx_vrf_id));
+ s = format (s, "server_vrf_id %d ", ntohl(mp->server_vrf_id));
+
+ if (mp->is_ipv6) {
+ s = format (s, "svr %U ", format_ip6_address,
+ (ip6_address_t *) mp->dhcp_server);
+ s = format (s, "src %U ", format_ip6_address,
+ (ip6_address_t *) mp->dhcp_src_address);
+ } else {
+ s = format (s, "svr %U ", format_ip4_address,
+ (ip4_address_t *) mp->dhcp_server);
+ s = format (s, "src %U ", format_ip4_address,
+ (ip4_address_t *) mp->dhcp_src_address);
+ }
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ s = format (s, "insert-cid %d ", mp->insert_circuit_id);
+
+ FINISH;
+}
+
+static void *vl_api_dhcp_proxy_set_vss_t_print
+(vl_api_dhcp_proxy_set_vss_t * mp, void * handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: dhcp_proxy_set_vss ");
+
+ s = format (s, "tbl_id %d ", ntohl(mp->tbl_id));
+
+ s = format (s, "fib_id %d ", ntohl(mp->fib_id));
+
+ s = format (s, "oui %d ", ntohl(mp->oui));
+
+ if (mp->is_ipv6 != 0)
+ s = format (s, "ipv6 ");
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_dhcp_client_config_t_print
+(vl_api_dhcp_client_config_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: dhcp_client_config ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ s = format (s, "hostname %s ", mp->hostname);
+
+ s = format (s, "want_dhcp_event %d ", mp->want_dhcp_event);
+
+ s = format (s, "pid %d ", mp->pid);
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+
+static void *vl_api_set_ip_flow_hash_t_print
+(vl_api_set_ip_flow_hash_t * mp, void * handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: set_ip_flow_hash ");
+
+ s = format (s, "vrf_id %d ", ntohl(mp->vrf_id));
+
+ if (mp->src)
+ s = format (s, "src ");
+
+ if (mp->dst)
+ s = format (s, "dst ");
+
+ if (mp->sport)
+ s = format (s, "sport ");
+
+ if (mp->dport)
+ s = format (s, "dport ");
+
+ if (mp->proto)
+ s = format (s, "proto ");
+
+ if (mp->reverse)
+ s = format (s, "reverse ");
+
+ if (mp->is_ipv6 != 0)
+ s = format (s, "ipv6 ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_ip6_set_link_local_address_t_print
+(vl_api_sw_interface_ip6_set_link_local_address_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: sw_interface_ip6_set_link_local_address ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ s = format (s, "%U/%d ", format_ip6_address, mp->address,
+ mp->address_length);
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_ip6nd_ra_prefix_t_print
+(vl_api_sw_interface_ip6nd_ra_prefix_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: sw_interface_ip6nd_ra_prefix ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ s = format (s, "%U/%d ", format_ip6_address, mp->address,
+ mp->address_length);
+
+ s = format (s, "val_life %d ", ntohl(mp->val_lifetime));
+
+ s = format (s, "pref_life %d ", ntohl(mp->pref_lifetime));
+
+ if (mp->use_default)
+ s = format (s, "def ");
+
+ if (mp->no_advertise)
+ s = format (s, "noadv ");
+
+ if (mp->off_link)
+ s = format (s, "offl ");
+
+ if (mp->no_autoconfig)
+ s = format (s, "noauto ");
+
+ if (mp->no_onlink)
+ s = format (s, "nolink ");
+
+ if (mp->is_no)
+ s = format (s, "isno ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_ip6nd_ra_config_t_print
+(vl_api_sw_interface_ip6nd_ra_config_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: sw_interface_ip6nd_ra_config ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ s = format (s, "maxint %d ", ntohl(mp->max_interval));
+
+ s = format (s, "minint %d ", ntohl(mp->min_interval));
+
+ s = format (s, "life %d ", ntohl(mp->lifetime));
+
+ s = format (s, "count %d ", ntohl(mp->initial_count));
+
+ s = format (s, "interval %d ", ntohl(mp->initial_interval));
+
+ if (mp->surpress)
+ s = format (s, "surpress ");
+
+ if (mp->managed)
+ s = format (s, "managed ");
+
+ if (mp->other)
+ s = format (s, "other ");
+
+ if (mp->ll_option)
+ s = format (s, "ll ");
+
+ if (mp->send_unicast)
+ s = format (s, "send ");
+
+ if (mp->cease)
+ s = format (s, "cease ");
+
+ if (mp->is_no)
+ s = format (s, "isno ");
+
+ if (mp->default_router)
+ s = format (s, "def ");
+
+ FINISH;
+}
+
+static void *vl_api_set_arp_neighbor_limit_t_print
+(vl_api_set_arp_neighbor_limit_t * mp, void * handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: set_arp_neighbor_limit ");
+
+ s = format (s, "arp_nbr_limit %d ", ntohl(mp->arp_neighbor_limit));
+
+ if (mp->is_ipv6 != 0)
+ s = format (s, "ipv6 ");
+
+ FINISH;
+}
+
+static void *vl_api_l2_patch_add_del_t_print
+(vl_api_l2_patch_add_del_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: l2_patch_add_del ");
+
+ s = format (s, "rx_sw_if_index %d ", ntohl(mp->rx_sw_if_index));
+
+ s = format (s, "tx_sw_if_index %d ", ntohl(mp->tx_sw_if_index));
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_sr_tunnel_add_del_t_print
+(vl_api_sr_tunnel_add_del_t * mp, void *handle)
+{
+ u8 * s;
+ ip6_address_t * this_address;
+ int i;
+ u16 flags_host_byte_order;
+ u8 pl_flag;
+
+ s = format (0, "SCRIPT: sr_tunnel_add_del ");
+
+ if (mp->name[0])
+ s = format (s, "name %s ", mp->name);
+
+ s = format (s, "src %U dst %U/%d ", format_ip6_address,
+ (ip6_address_t *) mp->src_address,
+ format_ip6_address,
+ (ip6_address_t *) mp->dst_address, mp->dst_mask_width);
+
+ this_address = (ip6_address_t *)mp->segs_and_tags;
+ for (i = 0; i < mp->n_segments; i++) {
+ s = format (s, "next %U ", format_ip6_address, this_address);
+ this_address++;
+ }
+ for (i = 0; i < mp->n_tags; i++) {
+ s = format (s, "tag %U ", format_ip6_address, this_address);
+ this_address++;
+ }
+
+ flags_host_byte_order = clib_net_to_host_u16 (mp->flags_net_byte_order);
+
+ if (flags_host_byte_order & IP6_SR_HEADER_FLAG_CLEANUP)
+ s = format (s, " clean ");
+
+ if (flags_host_byte_order & IP6_SR_HEADER_FLAG_PROTECTED)
+ s = format (s, "protected ");
+
+ for (i = 1; i <= 4; i++) {
+ pl_flag = ip6_sr_policy_list_flags (flags_host_byte_order, i);
+
+ switch (pl_flag) {
+ case IP6_SR_HEADER_FLAG_PL_ELT_NOT_PRESENT:
+ continue;
+
+ case IP6_SR_HEADER_FLAG_PL_ELT_INGRESS_PE:
+ s = format (s, "InPE %d ", i);
+ break;
+
+ case IP6_SR_HEADER_FLAG_PL_ELT_EGRESS_PE:
+ s = format (s, "EgPE %d ", i);
+ break;
+
+ case IP6_SR_HEADER_FLAG_PL_ELT_ORIG_SRC_ADDR:
+ s = format (s, "OrgSrc %d ", i);
+ break;
+
+ default:
+ clib_warning ("BUG: pl elt %d value %d", i, pl_flag);
+ break;
+ }
+ }
+
+ if (mp->policy_name[0])
+ s = format (s, "policy_name %s ", mp->policy_name);
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_sr_policy_add_del_t_print
+(vl_api_sr_policy_add_del_t * mp, void *handle)
+{
+ u8 * s;
+ int i;
+
+ s = format (0, "SCRIPT: sr_policy_add_del ");
+
+ if (mp->name[0])
+ s = format (s, "name %s ", mp->name);
+
+
+ if (mp->tunnel_names[0])
+ {
+ // start deserializing tunnel_names
+ int num_tunnels = mp->tunnel_names[0]; //number of tunnels
+ u8 * deser_tun_names = mp->tunnel_names;
+ deser_tun_names += 1; //moving along
+
+ u8 * tun_name = 0;
+ int tun_name_len = 0;
+
+ for (i=0; i < num_tunnels; i++)
+ {
+ tun_name_len= *deser_tun_names;
+ deser_tun_names += 1;
+ vec_resize (tun_name, tun_name_len);
+ memcpy(tun_name, deser_tun_names, tun_name_len);
+ s = format (s, "tunnel %s ", tun_name);
+ deser_tun_names += tun_name_len;
+ tun_name = 0;
+ }
+ }
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_sr_multicast_map_add_del_t_print
+(vl_api_sr_multicast_map_add_del_t * mp, void *handle)
+{
+
+ u8 * s = 0;
+ /* int i; */
+
+ s = format (0, "SCRIPT: sr_multicast_map_add_del ");
+
+ if (mp->multicast_address[0])
+ s = format (s, "address %U ", format_ip6_address, &mp->multicast_address);
+
+ if (mp->policy_name[0])
+ s = format (s, "sr-policy %s ", &mp->policy_name);
+
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+
+static void *vl_api_classify_add_del_table_t_print
+(vl_api_classify_add_del_table_t * mp, void *handle)
+{
+ u8 * s;
+ int i;
+
+ s = format (0, "SCRIPT: classify_add_del_table ");
+
+ if (mp->is_add == 0) {
+ s = format (s, "table %d ", ntohl(mp->table_index));
+ s = format (s, "del ");
+ } else {
+ s = format (s, "nbuckets %d ", ntohl(mp->nbuckets));
+ s = format (s, "memory_size %d ", ntohl(mp->memory_size));
+ s = format (s, "skip %d ", ntohl(mp->skip_n_vectors));
+ s = format (s, "match %d ", ntohl(mp->match_n_vectors));
+ s = format (s, "next-table %d ", ntohl(mp->next_table_index));
+ s = format (s, "miss-next %d ", ntohl(mp->miss_next_index));
+ s = format (s, "mask hex ");
+ for (i = 0; i < ntohl(mp->match_n_vectors) * sizeof (u32x4); i++)
+ s = format (s, "%02x", mp->mask[i]);
+ vec_add1 (s, ' ');
+ }
+
+ FINISH;
+}
+
+static void *vl_api_classify_add_del_session_t_print
+(vl_api_classify_add_del_session_t * mp, void *handle)
+{
+ u8 * s;
+ int i, limit=0;
+
+ s = format (0, "SCRIPT: classify_add_del_session ");
+
+ s = format (s, "table_index %d ", ntohl (mp->table_index));
+ s = format (s, "hit_next_index %d ", ntohl (mp->hit_next_index));
+ s = format (s, "opaque_index %d ", ntohl (mp->opaque_index));
+ s = format (s, "advance %d ", ntohl (mp->advance));
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ s = format (s, "match hex ");
+ for (i = 5 * sizeof(u32x4)-1; i > 0; i--) {
+ if (mp->match[i] != 0) {
+ limit = i + 1;
+ break;
+ }
+ }
+
+ for (i = 0; i < limit; i++)
+ s = format (s, "%02x", mp->match[i]);
+
+ FINISH;
+}
+
+static void *vl_api_classify_set_interface_ip_table_t_print
+(vl_api_classify_set_interface_ip_table_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: classify_set_interface_ip_table ");
+
+ if (mp->is_ipv6)
+ s = format (s, "ipv6 ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+ s = format (s, "table %d ", ntohl(mp->table_index));
+
+ FINISH;
+}
+
+static void *vl_api_classify_set_interface_l2_tables_t_print
+(vl_api_classify_set_interface_l2_tables_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: classify_set_interface_l2_tables ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+ s = format (s, "ip4-table %d ", ntohl(mp->ip4_table_index));
+ s = format (s, "ip6-table %d ", ntohl(mp->ip6_table_index));
+ s = format (s, "other-table %d ", ntohl(mp->other_table_index));
+
+ FINISH;
+}
+
+static void *vl_api_add_node_next_t_print
+(vl_api_add_node_next_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: add_node_next ");
+
+ s = format (0, "node %s next %s ", mp->node_name, mp->next_name);
+
+ FINISH;
+}
+
+static void *vl_api_l2tpv3_create_tunnel_t_print
+(vl_api_l2tpv3_create_tunnel_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: l2tpv3_create_tunnel ");
+
+ s = format (s, "client_address %U our_address %U ",
+ format_ip6_address, (ip6_address_t *)(mp->client_address),
+ format_ip6_address, (ip6_address_t *)(mp->our_address));
+ s = format (s, "local_session_id %d ", ntohl(mp->local_session_id));
+ s = format (s, "remote_session_id %d ", ntohl(mp->remote_session_id));
+ s = format (s, "local_cookie %lld ",
+ clib_net_to_host_u64 (mp->local_cookie));
+ s = format (s, "remote_cookie %lld ",
+ clib_net_to_host_u64 (mp->remote_cookie));
+ if (mp->l2_sublayer_present)
+ s = format (s, "l2-sublayer-present ");
+
+ FINISH;
+}
+
+static void *vl_api_l2tpv3_set_tunnel_cookies_t_print
+(vl_api_l2tpv3_set_tunnel_cookies_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: l2tpv3_set_tunnel_cookies ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ s = format (s, "new_local_cookie %llu ",
+ clib_net_to_host_u64 (mp->new_local_cookie));
+
+ s = format (s, "new_remote_cookie %llu ",
+ clib_net_to_host_u64 (mp->new_remote_cookie));
+
+ FINISH;
+}
+
+static void *vl_api_l2tpv3_interface_enable_disable_t_print
+(vl_api_l2tpv3_interface_enable_disable_t *mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: l2tpv3_interface_enable_disable ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ if (mp->enable_disable == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void * vl_api_l2tpv3_set_lookup_key_t_print
+(vl_api_l2tpv3_set_lookup_key_t * mp, void *handle)
+{
+ u8 * s;
+ char * str = "unknown";
+
+ s = format (0, "SCRIPT: l2tpv3_set_lookup_key ");
+
+ switch (mp->key) {
+ case L2T_LOOKUP_SRC_ADDRESS:
+ str = "lookup_v6_src";
+ break;
+ case L2T_LOOKUP_DST_ADDRESS:
+ str = "lookup_v6_dst";
+ break;
+ case L2T_LOOKUP_SESSION_ID:
+ str = "lookup_session_id";
+ break;
+ default:
+ break;
+ }
+
+ s = format (s, "%s ", str);
+
+ FINISH;
+}
+
+static void * vl_api_sw_if_l2tpv3_tunnel_dump_t_print
+(vl_api_sw_if_l2tpv3_tunnel_dump_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: sw_if_l2tpv3_tunnel_dump ");
+
+ FINISH;
+}
+
+static void * vl_api_vxlan_add_del_tunnel_t_print
+(vl_api_vxlan_add_del_tunnel_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: vxlan_add_del_tunnel ");
+
+ s = format (s, "dst %U ", format_ip46_address,
+ (ip46_address_t *)&(mp->dst_address),
+ mp->is_ipv6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4);
+
+ s = format (s, "src %U ", format_ip46_address,
+ (ip46_address_t *)&(mp->src_address),
+ mp->is_ipv6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4);
+
+ if (mp->encap_vrf_id)
+ s = format (s, "encap-vrf-id %d ", ntohl(mp->encap_vrf_id));
+
+ s = format (s, "decap-next %d ", ntohl(mp->decap_next_index));
+
+ s = format (s, "vni %d ", ntohl(mp->vni));
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void * vl_api_vxlan_tunnel_dump_t_print
+(vl_api_vxlan_tunnel_dump_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: vxlan_tunnel_dump ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ FINISH;
+}
+
+static void * vl_api_gre_add_del_tunnel_t_print
+(vl_api_gre_add_del_tunnel_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: gre_add_del_tunnel ");
+
+ s = format (s, "dst %U ", format_ip4_address,
+ (ip4_address_t *)&(mp->dst_address));
+
+ s = format (s, "src %U ", format_ip4_address,
+ (ip4_address_t *)&(mp->src_address));
+
+ if (mp->outer_table_id)
+ s = format (s, "outer-fib-id %d ", ntohl(mp->outer_table_id));
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void * vl_api_gre_tunnel_dump_t_print
+(vl_api_gre_tunnel_dump_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: gre_tunnel_dump ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ FINISH;
+}
+
+static void *vl_api_l2_fib_clear_table_t_print
+(vl_api_l2_fib_clear_table_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: l2_fib_clear_table ");
+
+ FINISH;
+}
+
+static void *vl_api_l2_interface_efp_filter_t_print
+(vl_api_l2_interface_efp_filter_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: l2_interface_efp_filter ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+ if (mp->enable_disable)
+ s = format (s, "enable ");
+ else
+ s = format (s, "disable ");
+
+ FINISH;
+}
+
+static void *vl_api_l2_interface_vlan_tag_rewrite_t_print
+(vl_api_l2_interface_vlan_tag_rewrite_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: l2_interface_vlan_tag_rewrite ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+ s = format (s, "vtr_op %d ", ntohl(mp->vtr_op));
+ s = format (s, "push_dot1q %d ", ntohl(mp->push_dot1q));
+ s = format (s, "tag1 %d ", ntohl(mp->tag1));
+ s = format (s, "tag2 %d ", ntohl(mp->tag2));
+
+ FINISH;
+}
+
+static void *vl_api_create_vhost_user_if_t_print
+(vl_api_create_vhost_user_if_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: create_vhost_user_if ");
+
+ s = format (s, "socket %s ", mp->sock_filename);
+ if (mp->is_server)
+ s = format (s, "server ");
+ if (mp->renumber)
+ s = format (s, "renumber %d ", ntohl(mp->custom_dev_instance));
+
+ FINISH;
+}
+
+static void *vl_api_modify_vhost_user_if_t_print
+(vl_api_modify_vhost_user_if_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: modify_vhost_user_if ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+ s = format (s, "socket %s ", mp->sock_filename);
+ if (mp->is_server)
+ s = format (s, "server ");
+ if (mp->renumber)
+ s = format (s, "renumber %d ", ntohl(mp->custom_dev_instance));
+
+ FINISH;
+}
+
+static void *vl_api_delete_vhost_user_if_t_print
+(vl_api_delete_vhost_user_if_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: delete_vhost_user_if ");
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_vhost_user_dump_t_print
+(vl_api_sw_interface_vhost_user_dump_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: sw_interface_vhost_user_dump ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_dump_t_print
+(vl_api_sw_interface_dump_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: sw_interface_dump ");
+
+ if (mp->name_filter_valid)
+ s = format (s, "name_filter %s ", mp->name_filter);
+ else
+ s = format (s, "all ");
+
+ FINISH;
+}
+
+static void *vl_api_l2_fib_table_dump_t_print
+(vl_api_l2_fib_table_dump_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: l2_fib_table_dump ");
+
+ s = format (s, "bd_id %d ", ntohl(mp->bd_id));
+
+ FINISH;
+}
+
+static void *vl_api_control_ping_t_print
+(vl_api_control_ping_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: control_ping ");
+
+ FINISH;
+}
+
+static void *vl_api_want_interface_events_t_print
+(vl_api_want_interface_events_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: want_interface_events pid %d enable %d ",
+ ntohl(mp->pid), ntohl(mp->enable_disable));
+
+ FINISH;
+}
+
+static void *vl_api_cli_request_t_print
+(vl_api_cli_request_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: cli_request ");
+
+ FINISH;
+}
+
+static void *vl_api_memclnt_create_t_print
+(vl_api_memclnt_create_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: memclnt_create name %s ", mp->name);
+
+ FINISH;
+}
+
+static void *vl_api_show_version_t_print
+(vl_api_show_version_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: show_version ");
+
+ FINISH;
+}
+
+static void *vl_api_vxlan_gpe_add_del_tunnel_t_print
+(vl_api_vxlan_gpe_add_del_tunnel_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: vxlan_gpe_add_del_tunnel ");
+
+ s = format (s, "local %U ", format_ip46_address, &mp->local, mp->is_ipv6);
+
+ s = format (s, "remote %U ", format_ip46_address, &mp->remote, mp->is_ipv6);
+
+ s = format (s, "protocol %d ", ntohl(mp->protocol));
+
+ s = format (s, "vni %d ", ntohl(mp->vni));
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ if (mp->encap_vrf_id)
+ s = format (s, "encap-vrf-id %d ", ntohl(mp->encap_vrf_id));
+
+ if (mp->decap_vrf_id)
+ s = format (s, "decap-vrf-id %d ", ntohl(mp->decap_vrf_id));
+
+ FINISH;
+}
+
+static void * vl_api_vxlan_gpe_tunnel_dump_t_print
+(vl_api_vxlan_gpe_tunnel_dump_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: vxlan_gpe_tunnel_dump ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ FINISH;
+}
+
+static void *vl_api_interface_name_renumber_t_print
+(vl_api_interface_name_renumber_t * mp, void * handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: interface_renumber ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ s = format (s, "new_show_dev_instance %d ",
+ ntohl(mp->new_show_dev_instance));
+
+ FINISH;
+}
+
+static void *vl_api_want_ip4_arp_events_t_print
+(vl_api_want_ip4_arp_events_t * mp, void * handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: want_ip4_arp_events ");
+ s = format (s, "pid %d address %U ", mp->pid,
+ format_ip4_address, &mp->address);
+ if (mp->enable_disable == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_input_acl_set_interface_t_print
+(vl_api_input_acl_set_interface_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: input_acl_set_interface ");
+
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+ s = format (s, "ip4-table %d ", ntohl(mp->ip4_table_index));
+ s = format (s, "ip6-table %d ", ntohl(mp->ip6_table_index));
+ s = format (s, "l2-table %d ", ntohl(mp->l2_table_index));
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void * vl_api_ip_address_dump_t_print
+(vl_api_ip_address_dump_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: ip6_address_dump ");
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+ s = format (s, "is_ipv6 %d ", mp->is_ipv6 != 0);
+
+ FINISH;
+}
+
+static void * vl_api_ip_dump_t_print
+(vl_api_ip_dump_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: ip_dump ");
+ s = format (s, "is_ipv6 %d ", mp->is_ipv6 != 0);
+
+ FINISH;
+}
+
+static void * vl_api_cop_interface_enable_disable_t_print
+(vl_api_cop_interface_enable_disable_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: cop_interface_enable_disable ");
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+ if (mp->enable_disable)
+ s = format (s, "enable ");
+ else
+ s = format (s, "disable ");
+
+ FINISH;
+}
+
+static void * vl_api_cop_whitelist_enable_disable_t_print
+(vl_api_cop_whitelist_enable_disable_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: cop_whitelist_enable_disable ");
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+ s = format (s, "fib-id %d ", ntohl(mp->fib_id));
+ if (mp->ip4)
+ s = format (s, "ip4 ");
+ if (mp->ip6)
+ s = format (s, "ip6 ");
+ if (mp->default_cop)
+ s = format (s, "default ");
+
+ FINISH;
+}
+
+static void * vl_api_af_packet_create_t_print
+(vl_api_af_packet_create_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: af_packet_create ");
+ s = format (s, "host_if_name %s ", mp->host_if_name);
+ if ( mp->use_random_hw_addr)
+ s = format (s, "hw_addr random ");
+ else
+ s = format (s, "hw_addr %U ", format_ethernet_address, mp->hw_addr);
+
+ FINISH;
+}
+
+static void * vl_api_af_packet_delete_t_print
+(vl_api_af_packet_delete_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: af_packet_delete ");
+ s = format (s, "host_if_name %s ", mp->host_if_name);
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_clear_stats_t_print
+(vl_api_sw_interface_clear_stats_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: sw_interface_clear_stats ");
+ if (mp->sw_if_index != ~0)
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ FINISH;
+}
+
+static void *vl_api_mpls_gre_tunnel_dump_t_print
+(vl_api_mpls_gre_tunnel_dump_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: mpls_gre_tunnel_dump ");
+
+ s = format (s, "tunnel_index %d ", ntohl(mp->tunnel_index));
+
+ FINISH;
+}
+
+static void *vl_api_mpls_eth_tunnel_dump_t_print
+(vl_api_mpls_eth_tunnel_dump_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: mpls_eth_tunnel_dump ");
+
+ s = format (s, "tunnel_index %d ", ntohl(mp->tunnel_index));
+
+ FINISH;
+}
+
+static void *vl_api_mpls_fib_encap_dump_t_print
+(vl_api_mpls_fib_encap_dump_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: mpls_fib_encap_dump ");
+
+ FINISH;
+}
+
+static void *vl_api_mpls_fib_decap_dump_t_print
+(vl_api_mpls_fib_decap_dump_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: mpls_fib_decap_dump ");
+
+ FINISH;
+}
+
+static void *vl_api_classify_table_ids_t_print
+(vl_api_classify_table_ids_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: classify_table_ids ");
+
+ FINISH;
+}
+
+static void *vl_api_classify_table_by_interface_t_print
+(vl_api_classify_table_by_interface_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: classify_table_by_interface ");
+ if (mp->sw_if_index != ~0)
+ s = format (s, "sw_if_index %d ", ntohl(mp->sw_if_index));
+
+ FINISH;
+}
+
+static void *vl_api_classify_table_info_t_print
+(vl_api_classify_table_info_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: classify_table_info ");
+ if (mp->table_id != ~0)
+ s = format (s, "table_id %d ", ntohl(mp->table_id));
+
+ FINISH;
+}
+
+static void *vl_api_classify_session_dump_t_print
+(vl_api_classify_session_dump_t * mp, void *handle)
+{
+ u8 * s;
+
+ s = format (0, "SCRIPT: classify_session_dump ");
+ if (mp->table_id != ~0)
+ s = format (s, "table_id %d ", ntohl(mp->table_id));
+
+ FINISH;
+}
+
+#define foreach_custom_print_function \
+_(CREATE_LOOPBACK, create_loopback) \
+_(SW_INTERFACE_SET_FLAGS, sw_interface_set_flags) \
+_(SW_INTERFACE_ADD_DEL_ADDRESS, sw_interface_add_del_address) \
+_(SW_INTERFACE_SET_TABLE, sw_interface_set_table) \
+_(SW_INTERFACE_SET_VPATH, sw_interface_set_vpath) \
+_(TAP_CONNECT, tap_connect) \
+_(TAP_MODIFY, tap_modify) \
+_(TAP_DELETE, tap_delete) \
+_(SW_INTERFACE_TAP_DUMP, sw_interface_tap_dump) \
+_(IP_ADD_DEL_ROUTE, ip_add_del_route) \
+_(PROXY_ARP_ADD_DEL, proxy_arp_add_del) \
+_(PROXY_ARP_INTFC_ENABLE_DISABLE, proxy_arp_intfc_enable_disable) \
+_(MPLS_ADD_DEL_DECAP, mpls_add_del_decap) \
+_(MPLS_ADD_DEL_ENCAP, mpls_add_del_encap) \
+_(MPLS_GRE_ADD_DEL_TUNNEL, mpls_gre_add_del_tunnel) \
+_(MPLS_ETHERNET_ADD_DEL_TUNNEL, mpls_ethernet_add_del_tunnel) \
+_(MPLS_ETHERNET_ADD_DEL_TUNNEL_2, mpls_ethernet_add_del_tunnel_2) \
+_(SW_INTERFACE_SET_UNNUMBERED, sw_interface_set_unnumbered) \
+_(IP_NEIGHBOR_ADD_DEL, ip_neighbor_add_del) \
+_(RESET_VRF, reset_vrf) \
+_(CREATE_VLAN_SUBIF, create_vlan_subif) \
+_(CREATE_SUBIF, create_subif) \
+_(OAM_ADD_DEL, oam_add_del) \
+_(RESET_FIB, reset_fib) \
+_(DHCP_PROXY_CONFIG, dhcp_proxy_config) \
+_(DHCP_PROXY_SET_VSS, dhcp_proxy_set_vss) \
+_(SET_IP_FLOW_HASH, set_ip_flow_hash) \
+_(SW_INTERFACE_IP6_SET_LINK_LOCAL_ADDRESS, \
+ sw_interface_ip6_set_link_local_address) \
+_(SW_INTERFACE_IP6ND_RA_PREFIX, sw_interface_ip6nd_ra_prefix) \
+_(SW_INTERFACE_IP6ND_RA_CONFIG, sw_interface_ip6nd_ra_config) \
+_(SET_ARP_NEIGHBOR_LIMIT, set_arp_neighbor_limit) \
+_(L2_PATCH_ADD_DEL, l2_patch_add_del) \
+_(SR_TUNNEL_ADD_DEL, sr_tunnel_add_del) \
+_(SR_POLICY_ADD_DEL, sr_policy_add_del) \
+_(SR_MULTICAST_MAP_ADD_DEL, sr_multicast_map_add_del) \
+_(SW_INTERFACE_SET_L2_XCONNECT, sw_interface_set_l2_xconnect) \
+_(L2FIB_ADD_DEL, l2fib_add_del) \
+_(L2_FLAGS, l2_flags) \
+_(BRIDGE_FLAGS, bridge_flags) \
+_(CLASSIFY_ADD_DEL_TABLE, classify_add_del_table) \
+_(CLASSIFY_ADD_DEL_SESSION, classify_add_del_session) \
+_(SW_INTERFACE_SET_L2_BRIDGE, sw_interface_set_l2_bridge) \
+_(BRIDGE_DOMAIN_ADD_DEL, bridge_domain_add_del) \
+_(BRIDGE_DOMAIN_DUMP, bridge_domain_dump) \
+_(CLASSIFY_SET_INTERFACE_IP_TABLE, classify_set_interface_ip_table) \
+_(CLASSIFY_SET_INTERFACE_L2_TABLES, classify_set_interface_l2_tables) \
+_(ADD_NODE_NEXT, add_node_next) \
+_(DHCP_PROXY_CONFIG_2, dhcp_proxy_config_2) \
+_(DHCP_CLIENT_CONFIG, dhcp_client_config) \
+_(L2TPV3_CREATE_TUNNEL, l2tpv3_create_tunnel) \
+_(L2TPV3_SET_TUNNEL_COOKIES, l2tpv3_set_tunnel_cookies) \
+_(L2TPV3_INTERFACE_ENABLE_DISABLE, l2tpv3_interface_enable_disable) \
+_(L2TPV3_SET_LOOKUP_KEY, l2tpv3_set_lookup_key) \
+_(SW_IF_L2TPV3_TUNNEL_DUMP, sw_if_l2tpv3_tunnel_dump) \
+_(VXLAN_ADD_DEL_TUNNEL, vxlan_add_del_tunnel) \
+_(VXLAN_TUNNEL_DUMP, vxlan_tunnel_dump) \
+_(GRE_ADD_DEL_TUNNEL, gre_add_del_tunnel) \
+_(GRE_TUNNEL_DUMP, gre_tunnel_dump) \
+_(L2_FIB_CLEAR_TABLE, l2_fib_clear_table) \
+_(L2_INTERFACE_EFP_FILTER, l2_interface_efp_filter) \
+_(L2_INTERFACE_VLAN_TAG_REWRITE, l2_interface_vlan_tag_rewrite) \
+_(CREATE_VHOST_USER_IF, create_vhost_user_if) \
+_(MODIFY_VHOST_USER_IF, modify_vhost_user_if) \
+_(DELETE_VHOST_USER_IF, delete_vhost_user_if) \
+_(SW_INTERFACE_DUMP, sw_interface_dump) \
+_(CONTROL_PING, control_ping) \
+_(WANT_INTERFACE_EVENTS, want_interface_events) \
+_(CLI_REQUEST, cli_request) \
+_(MEMCLNT_CREATE, memclnt_create) \
+_(SW_INTERFACE_VHOST_USER_DUMP, sw_interface_vhost_user_dump) \
+_(SHOW_VERSION, show_version) \
+_(L2_FIB_TABLE_DUMP, l2_fib_table_dump) \
+_(VXLAN_GPE_ADD_DEL_TUNNEL, vxlan_gpe_add_del_tunnel) \
+_(VXLAN_GPE_TUNNEL_DUMP, vxlan_gpe_tunnel_dump) \
+_(INTERFACE_NAME_RENUMBER, interface_name_renumber) \
+_(WANT_IP4_ARP_EVENTS, want_ip4_arp_events) \
+_(INPUT_ACL_SET_INTERFACE, input_acl_set_interface) \
+_(IP_ADDRESS_DUMP, ip_address_dump) \
+_(IP_DUMP, ip_dump) \
+_(DELETE_LOOPBACK, delete_loopback) \
+_(BD_IP_MAC_ADD_DEL, bd_ip_mac_add_del) \
+_(COP_INTERFACE_ENABLE_DISABLE, cop_interface_enable_disable) \
+_(COP_WHITELIST_ENABLE_DISABLE, cop_whitelist_enable_disable) \
+_(AF_PACKET_CREATE, af_packet_create) \
+_(AF_PACKET_DELETE, af_packet_delete) \
+_(SW_INTERFACE_CLEAR_STATS, sw_interface_clear_stats) \
+_(MPLS_GRE_TUNNEL_DUMP, mpls_gre_tunnel_dump) \
+_(MPLS_ETH_TUNNEL_DUMP, mpls_eth_tunnel_dump) \
+_(MPLS_FIB_ENCAP_DUMP, mpls_fib_encap_dump) \
+_(MPLS_FIB_DECAP_DUMP, mpls_fib_decap_dump) \
+_(CLASSIFY_TABLE_IDS,classify_table_ids) \
+_(CLASSIFY_TABLE_BY_INTERFACE, classify_table_by_interface) \
+_(CLASSIFY_TABLE_INFO,classify_table_info) \
+_(CLASSIFY_SESSION_DUMP,classify_session_dump)
+
+void vl_msg_api_custom_dump_configure (api_main_t *am)
+{
+#define _(n,f) am->msg_print_handlers[VL_API_##n] \
+ = (void *) vl_api_##f##_t_print;
+ foreach_custom_print_function;
+#undef _
+}
diff --git a/vpp/vpp-api/gmon.c b/vpp/vpp-api/gmon.c
new file mode 100644
index 00000000..9d37155f
--- /dev/null
+++ b/vpp/vpp-api/gmon.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <netinet/in.h>
+#include <signal.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <time.h>
+#include <fcntl.h>
+#include <string.h>
+#include <vppinfra/clib.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/bitmap.h>
+#include <vppinfra/fifo.h>
+#include <vppinfra/time.h>
+#include <vppinfra/mheap.h>
+#include <vppinfra/heap.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/format.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/api_errno.h>
+
+#include <svmdb.h>
+
+typedef struct {
+ svmdb_client_t *svmdb_client;
+ f64 *vector_rate_ptr;
+ f64 *input_rate_ptr;
+ f64 *sig_error_rate_ptr;
+ pid_t *vpef_pid_ptr;
+ u64 last_sig_errors;
+ u64 current_sig_errors;
+ uword * sig_error_bitmap;
+ vlib_main_t *vlib_main;
+ vlib_main_t ** my_vlib_mains;
+
+} gmon_main_t;
+
+#if DPDK == 0
+static inline u64 vnet_get_aggregate_rx_packets (void)
+{ return 0; }
+#else
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/devices/dpdk/dpdk.h>
+#endif
+
+gmon_main_t gmon_main;
+
+static u64 get_significant_errors(gmon_main_t * gm)
+{
+ vlib_main_t * this_vlib_main;
+ vlib_error_main_t * em;
+ uword code;
+ int vm_index;
+ u64 significant_errors = 0;
+
+ clib_bitmap_foreach (code, gm->sig_error_bitmap,
+ ({
+ for (vm_index = 0; vm_index < vec_len (gm->my_vlib_mains); vm_index++)
+ {
+ this_vlib_main = gm->my_vlib_mains[vm_index];
+ em = &this_vlib_main->error_main;
+ significant_errors += em->counters[code] -
+ ((vec_len(em->counters_last_clear) > code) ?
+ em->counters_last_clear[code] : 0);
+ }
+ }));
+
+ return (significant_errors);
+}
+
+static clib_error_t *
+publish_pid (vlib_main_t *vm)
+{
+ gmon_main_t *gm = &gmon_main;
+
+ *gm->vpef_pid_ptr = getpid();
+
+ return 0;
+}
+VLIB_API_INIT_FUNCTION(publish_pid);
+
+
+static uword
+gmon_process (vlib_main_t * vm,
+ vlib_node_runtime_t * rt,
+ vlib_frame_t * f)
+{
+ f64 vector_rate;
+ u64 input_packets, last_input_packets, new_sig_errors;
+ f64 last_runtime, dt, now;
+ gmon_main_t *gm = &gmon_main;
+ int i;
+
+ last_runtime = 0.0;
+ last_input_packets = 0;
+
+ last_runtime = 0.0;
+ last_input_packets = 0;
+
+ /* Initial wait for the world to settle down */
+ vlib_process_suspend (vm, 5.0);
+
+ if (vec_len(vlib_mains) == 0)
+ vec_add1(gm->my_vlib_mains, &vlib_global_main);
+ else
+ {
+ for (i = 0; i < vec_len(vlib_mains); i++)
+ vec_add1 (gm->my_vlib_mains, vlib_mains[i]);
+ }
+
+ while (1) {
+ vlib_process_suspend (vm, 5.0);
+ vector_rate = vlib_last_vector_length_per_node (vm);
+ *gm->vector_rate_ptr = vector_rate;
+ now = vlib_time_now(vm);
+ dt = now - last_runtime;
+ input_packets = vnet_get_aggregate_rx_packets();
+ *gm->input_rate_ptr = (f64)(input_packets - last_input_packets) / dt;
+ last_runtime = now;
+ last_input_packets = input_packets;
+
+ new_sig_errors = get_significant_errors(gm);
+ *gm->sig_error_rate_ptr =
+ ((f64)(new_sig_errors - gm->last_sig_errors)) / dt;
+ gm->last_sig_errors = new_sig_errors;
+ }
+
+ return 0; /* not so much */
+}
+
+VLIB_REGISTER_NODE (gmon_process_node,static) = {
+ .function = gmon_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "gmon-process",
+};
+
+static clib_error_t *
+gmon_init (vlib_main_t *vm)
+{
+ gmon_main_t *gm = &gmon_main;
+ api_main_t * am = &api_main;
+ pid_t *swp = 0;
+ f64 *v = 0;
+ clib_error_t * error;
+
+ if ((error = vlib_call_init_function(vm, vpe_api_init)))
+ return(error);
+
+ /* Make sure that /global-vm is owned as directed */
+ svm_region_init_chroot_uid_gid (am->root_path, am->api_uid, am->api_gid);
+
+ gm->vlib_main = vm;
+ gm->svmdb_client = svmdb_map_chroot(am->root_path);
+
+ /* Find or create, set to zero */
+ vec_add1 (v, 0.0);
+ svmdb_local_set_vec_variable(gm->svmdb_client,
+ "vpp_vector_rate",
+ (char *)v, sizeof (*v));
+ vec_free(v);
+ vec_add1 (v, 0.0);
+ svmdb_local_set_vec_variable(gm->svmdb_client,
+ "vpp_input_rate",
+ (char *)v, sizeof (*v));
+ vec_free(v);
+ vec_add1 (v, 0.0);
+ svmdb_local_set_vec_variable(gm->svmdb_client,
+ "vpp_sig_error_rate",
+ (char *)v, sizeof (*v));
+ vec_free(v);
+
+ vec_add1 (swp, 0.0);
+ svmdb_local_set_vec_variable(gm->svmdb_client,
+ "vpp_pid",
+ (char *)swp, sizeof (*swp));
+ vec_free(swp);
+
+ /* the value cells will never move, so acquire references to them */
+ gm->vector_rate_ptr =
+ svmdb_local_get_variable_reference (gm->svmdb_client,
+ SVMDB_NAMESPACE_VEC,
+ "vpp_vector_rate");
+ gm->input_rate_ptr =
+ svmdb_local_get_variable_reference (gm->svmdb_client,
+ SVMDB_NAMESPACE_VEC,
+ "vpp_input_rate");
+ gm->sig_error_rate_ptr =
+ svmdb_local_get_variable_reference (gm->svmdb_client,
+ SVMDB_NAMESPACE_VEC,
+ "vpp_sig_error_rate");
+ gm->vpef_pid_ptr =
+ svmdb_local_get_variable_reference (gm->svmdb_client,
+ SVMDB_NAMESPACE_VEC,
+ "vpp_pid");
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (gmon_init);
+
+static clib_error_t *gmon_exit (vlib_main_t *vm)
+{
+ gmon_main_t *gm = &gmon_main;
+
+ if (gm->vector_rate_ptr) {
+ *gm->vector_rate_ptr = 0.0;
+ *gm->vpef_pid_ptr = 0;
+ *gm->input_rate_ptr = 0.0;
+ *gm->sig_error_rate_ptr = 0.0;
+ svm_region_unmap ((void *) gm->svmdb_client->db_rp);
+ vec_free(gm->svmdb_client);
+ }
+ return 0;
+}
+VLIB_MAIN_LOOP_EXIT_FUNCTION (gmon_exit);
+
+static int
+significant_error_enable_disable (gmon_main_t * gm,
+ u32 index, int enable)
+{
+ vlib_main_t * vm = gm->vlib_main;
+ vlib_error_main_t * em = &vm->error_main;
+
+ if (index >= vec_len (em->counters))
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ gm->sig_error_bitmap = clib_bitmap_set (gm->sig_error_bitmap, index, enable);
+ return 0;
+}
+
+static clib_error_t *
+set_significant_error_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ u32 index;
+ int enable = 1;
+ int rv;
+ gmon_main_t *gm = &gmon_main;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, "%d", &index))
+ ;
+ else if (unformat (input, "disable"))
+ enable = 0;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ rv = significant_error_enable_disable (gm, index, enable);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ default:
+ return clib_error_return
+ (0, "significant_error_enable_disable returned %d", rv);
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (set_significant_error_command, static) = {
+ .path = "set significant error",
+ .short_help = "set significant error <counter-index-nnn>",
+ .function = set_significant_error_command_fn,
+};
diff --git a/vpp/vpp-api/summary_stats_client.c b/vpp/vpp-api/summary_stats_client.c
new file mode 100644
index 00000000..97e9f9d1
--- /dev/null
+++ b/vpp/vpp-api/summary_stats_client.c
@@ -0,0 +1,279 @@
+/*
+ *------------------------------------------------------------------
+ * summary_stats_client -
+ *
+ * Copyright (c) 2010 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <netinet/in.h>
+#include <netdb.h>
+#include <signal.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <time.h>
+#include <fcntl.h>
+#include <string.h>
+#include <vppinfra/clib.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/bitmap.h>
+#include <vppinfra/fifo.h>
+#include <vppinfra/time.h>
+#include <vppinfra/mheap.h>
+#include <vppinfra/heap.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/format.h>
+#include <vppinfra/error.h>
+
+#include <vnet/vnet.h>
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+#include <vpp-api/vpe_msg_enum.h>
+
+#include <vnet/ip/ip.h>
+
+#define f64_endian(a)
+#define f64_print(a,b)
+
+#define vl_typedefs /* define message structures */
+#include <vpp-api/vpe_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vpp-api/vpe_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...)
+#define vl_printfun
+#include <vpp-api/vpe_all_api_h.h>
+#undef vl_printfun
+
+vl_shmem_hdr_t *shmem_hdr;
+
+typedef struct {
+ volatile int sigterm_received;
+
+ struct sockaddr_in send_data_addr;
+ int send_data_socket;
+ u8 * display_name;
+
+ /* convenience */
+ unix_shared_memory_queue_t * vl_input_queue;
+ u32 my_client_index;
+} test_main_t;
+
+test_main_t test_main;
+
+/*
+ * Satisfy external references when -lvlib is not available.
+ */
+void vlib_cli_output (struct vlib_main_t * vm, char * fmt, ...)
+{
+ clib_warning ("vlib_cli_output callled...");
+}
+
+
+static void
+vl_api_vnet_summary_stats_reply_t_handler (
+ vl_api_vnet_summary_stats_reply_t * mp)
+{
+ test_main_t * tm = &test_main;
+ static u8 *sb;
+ int n;
+
+ printf ("total rx pkts %llu, total rx bytes %llu\n",
+ (unsigned long long) mp->total_pkts[0],
+ (unsigned long long) mp->total_bytes[0]);
+ printf ("total tx pkts %llu, total tx bytes %llu\n",
+ (unsigned long long) mp->total_pkts[1],
+ (unsigned long long) mp->total_bytes[1]);
+ printf ("vector rate %.2f\n", mp->vector_rate);
+
+ vec_reset_length (sb);
+ sb = format (sb, "%v,%.0f,%llu,%llu,%llu,%llu\n%c",
+ tm->display_name, mp->vector_rate,
+ (unsigned long long) mp->total_pkts[0],
+ (unsigned long long) mp->total_bytes[0],
+ (unsigned long long) mp->total_pkts[1],
+ (unsigned long long) mp->total_bytes[1], 0);
+
+ n = sendto (tm->send_data_socket, sb, vec_len(sb),
+ 0, (struct sockaddr *)&tm->send_data_addr,
+ sizeof (tm->send_data_addr));
+
+ if (n != vec_len (sb))
+ clib_unix_warning ("sendto");
+
+}
+
+#define foreach_api_msg \
+_(VNET_SUMMARY_STATS_REPLY, vnet_summary_stats_reply)
+
+int connect_to_vpe(char *name)
+{
+ int rv=0;
+
+ rv = vl_client_connect_to_vlib("/vpe-api", name, 32);
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_api_msg;
+#undef _
+
+ shmem_hdr = api_main.shmem_hdr;
+
+ return rv;
+}
+
+int disconnect_from_vpe(void)
+{
+ vl_client_disconnect_from_vlib();
+ return 0;
+}
+
+static void sigterm_handler (int sig)
+{
+ test_main_t *tm = &test_main;
+ tm->sigterm_received = 1;
+}
+
+/* Parse an IP4 address %d.%d.%d.%d. */
+uword unformat_ip4_address (unformat_input_t * input, va_list * args)
+{
+ u8 * result = va_arg (*args, u8 *);
+ unsigned a[4];
+
+ if (! unformat (input, "%d.%d.%d.%d", &a[0], &a[1], &a[2], &a[3]))
+ return 0;
+
+ if (a[0] >= 256 || a[1] >= 256 || a[2] >= 256 || a[3] >= 256)
+ return 0;
+
+ result[0] = a[0];
+ result[1] = a[1];
+ result[2] = a[2];
+ result[3] = a[3];
+
+ return 1;
+}
+
+int main (int argc, char ** argv)
+{
+ api_main_t * am = &api_main;
+ test_main_t * tm = &test_main;
+ vl_api_vnet_get_summary_stats_t * mp;
+ unformat_input_t _input, *input = &_input;
+ clib_error_t * error = 0;
+ ip4_address_t collector_ip;
+ u8 * display_name = 0;
+ u16 collector_port = 7654;
+
+ collector_ip.as_u32 = (u32)~0;
+
+ unformat_init_command_line (input, argv);
+
+ while (unformat_check_input(input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, "collector-ip %U",
+ unformat_ip4_address, &collector_ip))
+ ;
+ else if (unformat (input, "display-name %v", &display_name))
+ ;
+ else if (unformat (input, "collector-port %d", &collector_port))
+ ;
+ else {
+ error =
+ clib_error_return
+ (0, "Usage: %s collector-ip <ip>\n"
+ " [display-name <string>] [collector-port <num>]\n"
+ " port defaults to 7654", argv[0]);
+ break;
+ }
+ }
+
+ if (error == 0 && collector_ip.as_u32 == (u32)~0)
+ error = clib_error_return (0, "collector-ip not set...\n");
+
+
+ if (error) {
+ clib_error_report (error);
+ exit (1);
+ }
+
+ if (display_name == 0) {
+ display_name = format (0, "vpe-to-%d.%d.%d.%d",
+ collector_ip.as_u8[0],
+ collector_ip.as_u8[1],
+ collector_ip.as_u8[2],
+ collector_ip.as_u8[3]);
+ }
+
+
+ connect_to_vpe("test_client");
+
+ tm->vl_input_queue = shmem_hdr->vl_input_queue;
+ tm->my_client_index = am->my_client_index;
+ tm->display_name = display_name;
+
+ signal(SIGTERM, sigterm_handler);
+ signal(SIGINT, sigterm_handler);
+ signal(SIGQUIT, sigterm_handler);
+
+ /* data (multicast) RX socket */
+ tm->send_data_socket = socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP);
+ if (tm->send_data_socket < 0) {
+ clib_unix_warning(0, "data_rx_socket");
+ exit (1);
+ }
+
+ memset(&tm->send_data_addr, 0, sizeof(tm->send_data_addr));
+ tm->send_data_addr.sin_family = AF_INET;
+ tm->send_data_addr.sin_addr.s_addr = collector_ip.as_u32;
+ tm->send_data_addr.sin_port = htons(collector_port);
+
+ fformat(stdout, "Send SIGINT or SIGTERM to quit...\n");
+
+ while (1) {
+ sleep (5);
+
+ if (tm->sigterm_received)
+ break;
+ /* Poll for stats */
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS);
+ mp->client_index = tm->my_client_index;
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+ }
+
+ fformat(stdout, "Exiting...\n");
+
+ disconnect_from_vpe();
+ exit (0);
+}
diff --git a/vpp/vpp-api/test_client.c b/vpp/vpp-api/test_client.c
new file mode 100644
index 00000000..92cf4531
--- /dev/null
+++ b/vpp/vpp-api/test_client.c
@@ -0,0 +1,1536 @@
+/*
+ *------------------------------------------------------------------
+ * api.c - message handler registration
+ *
+ * Copyright (c) 2010 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <netinet/in.h>
+#include <signal.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <time.h>
+#include <fcntl.h>
+#include <string.h>
+#include <vppinfra/clib.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/bitmap.h>
+#include <vppinfra/fifo.h>
+#include <vppinfra/time.h>
+#include <vppinfra/mheap.h>
+#include <vppinfra/heap.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/format.h>
+#include <vppinfra/error.h>
+
+#include <vnet/vnet.h>
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+#include <vpp-api/vpe_msg_enum.h>
+
+#include <vnet/ip/ip.h>
+#include <vnet/interface.h>
+
+#define f64_endian(a)
+#define f64_print(a,b)
+
+#define vl_typedefs /* define message structures */
+#include <vpp-api/vpe_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vpp-api/vpe_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...)
+#define vl_printfun
+#include <vpp-api/vpe_all_api_h.h>
+#undef vl_printfun
+
+vl_shmem_hdr_t *shmem_hdr;
+
+typedef struct {
+ int link_events_on;
+ int stats_on;
+ int oam_events_on;
+
+ /* convenience */
+ unix_shared_memory_queue_t * vl_input_queue;
+ u32 my_client_index;
+} test_main_t;
+
+test_main_t test_main;
+
+/*
+ * Satisfy external references when -lvlib is not available.
+ */
+void vlib_cli_output (struct vlib_main_t * vm, char * fmt, ...)
+{
+ clib_warning ("vlib_cli_output callled...");
+}
+
+u8 * format_ethernet_address (u8 * s, va_list * args)
+{
+ u8 * a = va_arg (*args, u8 *);
+
+ return format (s, "%02x:%02x:%02x:%02x:%02x:%02x",
+ a[0], a[1], a[2], a[3], a[4], a[5]);
+}
+
+static void vl_api_sw_interface_details_t_handler (
+ vl_api_sw_interface_details_t * mp)
+{
+ char * duplex, * speed;
+
+ switch (mp->link_duplex << VNET_HW_INTERFACE_FLAG_DUPLEX_SHIFT)
+ {
+ case VNET_HW_INTERFACE_FLAG_HALF_DUPLEX:
+ duplex = "half";
+ break;
+ case VNET_HW_INTERFACE_FLAG_FULL_DUPLEX:
+ duplex = "full";
+ break;
+ default:
+ duplex = "bogus";
+ break;
+ }
+ switch (mp->link_speed << VNET_HW_INTERFACE_FLAG_SPEED_SHIFT)
+ {
+ case VNET_HW_INTERFACE_FLAG_SPEED_10M:
+ speed = "10Mbps";
+ break;
+ case VNET_HW_INTERFACE_FLAG_SPEED_100M:
+ speed = "100Mbps";
+ break;
+ case VNET_HW_INTERFACE_FLAG_SPEED_1G:
+ speed = "1Gbps";
+ break;
+ case VNET_HW_INTERFACE_FLAG_SPEED_10G:
+ speed = "10Gbps";
+ break;
+ case VNET_HW_INTERFACE_FLAG_SPEED_40G:
+ speed = "40Gbps";
+ break;
+ case VNET_HW_INTERFACE_FLAG_SPEED_100G:
+ speed = "100Gbps";
+ break;
+ default:
+ speed = "bogus";
+ break;
+ }
+ fformat(stdout, "details: %s sw_if_index %d sup_sw_if_index %d "
+ "link_duplex %s link_speed %s",
+ mp->interface_name, ntohl(mp->sw_if_index),
+ ntohl(mp->sup_sw_if_index), duplex, speed);
+
+ if (mp->l2_address_length)
+ fformat(stdout, " l2 address: %U\n",
+ format_ethernet_address, mp->l2_address);
+ else
+ fformat(stdout, "\n");
+}
+
+static void vl_api_sw_interface_set_flags_t_handler (
+ vl_api_sw_interface_set_flags_t * mp)
+{
+ fformat (stdout, "set flags: sw_if_index %d, admin %s link %s\n",
+ ntohl(mp->sw_if_index),
+ mp->admin_up_down ? "up" : "down",
+ mp->link_up_down ? "up" : "down");
+}
+
+static void vl_api_sw_interface_set_flags_reply_t_handler (
+ vl_api_sw_interface_set_flags_reply_t * mp)
+{
+ fformat (stdout, "set flags reply: reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_want_interface_events_reply_t_handler (
+ vl_api_want_interface_events_reply_t *mp)
+{
+}
+
+static void vl_api_want_stats_reply_t_handler (
+ vl_api_want_stats_reply_t *mp)
+{
+ fformat (stdout, "want stats reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_want_oam_events_reply_t_handler (
+ vl_api_want_oam_events_reply_t *mp)
+{
+ fformat (stdout, "want oam reply %d\n", ntohl(mp->retval));
+}
+static void vl_api_ip_add_del_route_reply_t_handler (
+ vl_api_ip_add_del_route_reply_t *mp)
+{
+ fformat (stdout, "add_route reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_sw_interface_add_del_address_reply_t_handler (
+ vl_api_sw_interface_add_del_address_reply_t *mp)
+{
+ fformat (stdout, "add_del_address reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_sw_interface_set_table_reply_t_handler (
+ vl_api_sw_interface_set_table_reply_t *mp)
+{
+ fformat (stdout, "set_table reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_tap_connect_reply_t_handler (
+ vl_api_tap_connect_reply_t * mp)
+{
+ fformat (stdout, "tap connect reply %d, sw_if_index %d\n",
+ ntohl(mp->retval), ntohl(mp->sw_if_index));
+}
+
+static void vl_api_create_vlan_subif_reply_t_handler (
+ vl_api_create_vlan_subif_reply_t * mp)
+{
+ fformat (stdout, "create vlan subif reply %d, sw_if_index %d\n",
+ ntohl(mp->retval), ntohl(mp->sw_if_index));
+}
+
+static void vl_api_mpls_gre_add_del_tunnel_reply_t_handler (
+ vl_api_mpls_gre_add_del_tunnel_reply_t * mp)
+{
+ fformat (stdout, "add_del mpls gre tunnel reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_mpls_add_del_encap_reply_t_handler (
+ vl_api_mpls_add_del_encap_reply_t *mp)
+{
+ fformat (stdout, "add del mpls label reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_mpls_add_del_decap_reply_t_handler (
+ vl_api_mpls_add_del_decap_reply_t *mp)
+{
+ fformat (stdout, "add del mpls decap label reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_proxy_arp_add_del_reply_t_handler
+(vl_api_proxy_arp_add_del_reply_t *mp)
+{
+ fformat (stdout, "add del proxy arp reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_proxy_arp_intfc_enable_disable_reply_t_handler
+(vl_api_proxy_arp_intfc_enable_disable_reply_t *mp)
+{
+ fformat (stdout, "proxy arp intfc ena/dis reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_ip_neighbor_add_del_reply_t_handler
+(vl_api_ip_neighbor_add_del_reply_t *mp)
+
+{
+ fformat (stdout, "ip neighbor add del reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_vnet_interface_counters_t_handler (
+ vl_api_vnet_interface_counters_t *mp)
+{
+ char *counter_name;
+ u32 count, sw_if_index;
+ int i;
+
+ count = ntohl (mp->count);
+ sw_if_index = ntohl (mp->first_sw_if_index);
+ if (mp->is_combined == 0) {
+ u64 * vp, v;
+ vp = (u64 *) mp->data;
+
+ switch (mp->vnet_counter_type) {
+ case VNET_INTERFACE_COUNTER_DROP:
+ counter_name = "drop";
+ break;
+ case VNET_INTERFACE_COUNTER_PUNT:
+ counter_name = "punt";
+ break;
+ case VNET_INTERFACE_COUNTER_IP4:
+ counter_name = "ip4";
+ break;
+ case VNET_INTERFACE_COUNTER_IP6:
+ counter_name = "ip6";
+ break;
+ case VNET_INTERFACE_COUNTER_RX_NO_BUF:
+ counter_name = "rx-no-buf";
+ break;
+ case VNET_INTERFACE_COUNTER_RX_MISS:
+ counter_name = "rx-miss";
+ break;
+ case VNET_INTERFACE_COUNTER_RX_ERROR:
+ counter_name = "rx-error";
+ break;
+ case VNET_INTERFACE_COUNTER_TX_ERROR:
+ counter_name = "tx-error (fifo-full)";
+ break;
+ default:
+ counter_name = "bogus";
+ break;
+ }
+ for (i = 0; i < count; i++) {
+ v = clib_mem_unaligned (vp, u64);
+ v = clib_net_to_host_u64 (v);
+ vp++;
+ fformat (stdout, "%d.%s %lld\n", sw_if_index, counter_name, v);
+ sw_if_index++;
+ }
+ } else {
+ vlib_counter_t *vp;
+ u64 packets, bytes;
+ vp = (vlib_counter_t *) mp->data;
+
+ switch (mp->vnet_counter_type) {
+ case VNET_INTERFACE_COUNTER_RX:
+ counter_name = "rx";
+ break;
+ case VNET_INTERFACE_COUNTER_TX:
+ counter_name = "tx";
+ break;
+ default:
+ counter_name = "bogus";
+ break;
+ }
+ for (i = 0; i < count; i++) {
+ packets = clib_mem_unaligned (&vp->packets, u64);
+ packets = clib_net_to_host_u64 (packets);
+ bytes = clib_mem_unaligned (&vp->bytes, u64);
+ bytes = clib_net_to_host_u64 (bytes);
+ vp++;
+ fformat (stdout, "%d.%s.packets %lld\n",
+ sw_if_index, counter_name, packets);
+ fformat (stdout, "%d.%s.bytes %lld\n",
+ sw_if_index, counter_name, bytes);
+ sw_if_index++;
+ }
+ }
+}
+
+/* Format an IP4 address. */
+u8 * format_ip4_address (u8 * s, va_list * args)
+{
+ u8 * a = va_arg (*args, u8 *);
+ return format (s, "%d.%d.%d.%d", a[0], a[1], a[2], a[3]);
+}
+
+/* Format an IP4 route destination and length. */
+u8 * format_ip4_address_and_length (u8 * s, va_list * args)
+{
+ u8 * a = va_arg (*args, u8 *);
+ u8 l = va_arg (*args, u32);
+ return format (s, "%U/%d", format_ip4_address, a, l);
+}
+
+static void vl_api_vnet_ip4_fib_counters_t_handler (
+ vl_api_vnet_ip4_fib_counters_t *mp)
+{
+ int i;
+ vl_api_ip4_fib_counter_t * ctrp;
+ u32 count;
+
+ count = ntohl(mp->count);
+
+ fformat (stdout, "fib id %d, count this msg %d\n",
+ ntohl(mp->vrf_id), count);
+
+ ctrp = mp->c;
+ for (i = 0; i < count; i++) {
+ fformat(stdout, "%U: %lld packets, %lld bytes\n",
+ format_ip4_address_and_length, &ctrp->address,
+ (u32)ctrp->address_length,
+ clib_net_to_host_u64 (ctrp->packets),
+ clib_net_to_host_u64 (ctrp->bytes));
+ ctrp++;
+ }
+}
+
+/* Format an IP6 address. */
+u8 * format_ip6_address (u8 * s, va_list * args)
+{
+ ip6_address_t * a = va_arg (*args, ip6_address_t *);
+ u32 i, i_max_n_zero, max_n_zeros, i_first_zero, n_zeros, last_double_colon;
+
+ i_max_n_zero = ARRAY_LEN (a->as_u16);
+ max_n_zeros = 0;
+ i_first_zero = i_max_n_zero;
+ n_zeros = 0;
+ for (i = 0; i < ARRAY_LEN (a->as_u16); i++)
+ {
+ u32 is_zero = a->as_u16[i] == 0;
+ if (is_zero && i_first_zero >= ARRAY_LEN (a->as_u16))
+ {
+ i_first_zero = i;
+ n_zeros = 0;
+ }
+ n_zeros += is_zero;
+ if ((! is_zero && n_zeros > max_n_zeros)
+ || (i + 1 >= ARRAY_LEN (a->as_u16) && n_zeros > max_n_zeros))
+ {
+ i_max_n_zero = i_first_zero;
+ max_n_zeros = n_zeros;
+ i_first_zero = ARRAY_LEN (a->as_u16);
+ n_zeros = 0;
+ }
+ }
+
+ last_double_colon = 0;
+ for (i = 0; i < ARRAY_LEN (a->as_u16); i++)
+ {
+ if (i == i_max_n_zero && max_n_zeros > 1)
+ {
+ s = format (s, "::");
+ i += max_n_zeros - 1;
+ last_double_colon = 1;
+ }
+ else
+ {
+ s = format (s, "%s%x",
+ (last_double_colon || i == 0) ? "" : ":",
+ clib_net_to_host_u16 (a->as_u16[i]));
+ last_double_colon = 0;
+ }
+ }
+
+ return s;
+}
+
+/* Format an IP6 route destination and length. */
+u8 * format_ip6_address_and_length (u8 * s, va_list * args)
+{
+ ip6_address_t * a = va_arg (*args, ip6_address_t *);
+ u8 l = va_arg (*args, u32);
+ return format (s, "%U/%d", format_ip6_address, a, l);
+}
+
+static void vl_api_vnet_ip6_fib_counters_t_handler (
+ vl_api_vnet_ip6_fib_counters_t *mp)
+{
+ int i;
+ vl_api_ip6_fib_counter_t * ctrp;
+ u32 count;
+
+ count = ntohl(mp->count);
+
+ fformat (stdout, "fib id %d, count this msg %d\n",
+ ntohl(mp->vrf_id), count);
+
+ ctrp = mp->c;
+ for (i = 0; i < count; i++) {
+ fformat(stdout, "%U: %lld packets, %lld bytes\n",
+ format_ip6_address_and_length, &ctrp->address,
+ (u32)ctrp->address_length,
+ clib_net_to_host_u64 (ctrp->packets),
+ clib_net_to_host_u64 (ctrp->bytes));
+ ctrp++;
+ }
+}
+
+static void vl_api_oam_event_t_handler (
+ vl_api_oam_event_t *mp)
+{
+ fformat(stdout, "OAM: %U now %s\n",
+ format_ip4_address, &mp->dst_address,
+ mp->state == 1 ? "alive" : "dead");
+}
+
+static void vl_api_oam_add_del_reply_t_handler (
+ vl_api_oam_add_del_reply_t *mp)
+{
+ fformat(stdout, "oam add del reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_reset_fib_reply_t_handler (
+ vl_api_reset_fib_reply_t *mp)
+{
+ fformat(stdout, "fib reset reply %d\n", ntohl(mp->retval));
+}
+static void vl_api_dhcp_proxy_set_vss_reply_t_handler (
+ vl_api_dhcp_proxy_set_vss_reply_t *mp)
+{
+ fformat(stdout, "dhcp proxy set vss reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_dhcp_proxy_config_reply_t_handler (
+ vl_api_dhcp_proxy_config_reply_t *mp)
+{
+ fformat(stdout, "dhcp proxy config reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_set_ip_flow_hash_reply_t_handler (
+ vl_api_set_ip_flow_hash_reply_t *mp)
+{
+ fformat(stdout, "set ip flow hash reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_sw_interface_ip6nd_ra_config_reply_t_handler (
+ vl_api_sw_interface_ip6nd_ra_config_reply_t *mp)
+{
+ fformat (stdout, "ip6 nd ra-config reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_sw_interface_ip6nd_ra_prefix_reply_t_handler (
+ vl_api_sw_interface_ip6nd_ra_prefix_reply_t *mp)
+{
+ fformat (stdout, "ip6 nd ra-prefix reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_sw_interface_ip6_enable_disable_reply_t_handler (
+ vl_api_sw_interface_ip6_enable_disable_reply_t *mp)
+{
+ fformat (stdout, "ip6 enable/disable reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_sw_interface_ip6_set_link_local_address_reply_t_handler (
+ vl_api_sw_interface_ip6_set_link_local_address_reply_t *mp)
+{
+ fformat (stdout, "ip6 set link-local address reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_create_loopback_reply_t_handler
+(vl_api_create_loopback_reply_t *mp)
+{
+ fformat (stdout, "create loopback status %d, sw_if_index %d\n",
+ ntohl(mp->retval), ntohl (mp->sw_if_index));
+}
+
+static void vl_api_sr_tunnel_add_del_reply_t_handler (
+ vl_api_sr_tunnel_add_del_reply_t *mp)
+{
+ fformat(stdout, "sr tunnel add/del reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_l2_patch_add_del_reply_t_handler
+(vl_api_l2_patch_add_del_reply_t *mp)
+{
+ fformat (stdout, "l2 patch reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_sw_interface_set_l2_xconnect_reply_t_handler
+(vl_api_sw_interface_set_l2_xconnect_reply_t *mp)
+{
+ fformat (stdout, "l2_xconnect reply %d\n", ntohl(mp->retval));
+}
+
+static void vl_api_sw_interface_set_l2_bridge_reply_t_handler
+(vl_api_sw_interface_set_l2_bridge_reply_t *mp)
+{
+ fformat (stdout, "l2_bridge reply %d\n", ntohl(mp->retval));
+}
+
+static void noop_handler (void *notused) { }
+
+#define vl_api_vnet_ip4_fib_counters_t_endian noop_handler
+#define vl_api_vnet_ip4_fib_counters_t_print noop_handler
+#define vl_api_vnet_ip6_fib_counters_t_endian noop_handler
+#define vl_api_vnet_ip6_fib_counters_t_print noop_handler
+
+#define foreach_api_msg \
+_(SW_INTERFACE_DETAILS, sw_interface_details) \
+_(SW_INTERFACE_SET_FLAGS, sw_interface_set_flags) \
+_(SW_INTERFACE_SET_FLAGS_REPLY, sw_interface_set_flags_reply) \
+_(WANT_INTERFACE_EVENTS_REPLY, want_interface_events_reply) \
+_(WANT_STATS_REPLY, want_stats_reply) \
+_(WANT_OAM_EVENTS_REPLY, want_oam_events_reply) \
+_(OAM_EVENT, oam_event) \
+_(OAM_ADD_DEL_REPLY, oam_add_del_reply) \
+_(VNET_INTERFACE_COUNTERS, vnet_interface_counters) \
+_(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
+_(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
+_(IP_ADD_DEL_ROUTE_REPLY, ip_add_del_route_reply) \
+_(SW_INTERFACE_ADD_DEL_ADDRESS_REPLY, sw_interface_add_del_address_reply) \
+_(SW_INTERFACE_SET_TABLE_REPLY, sw_interface_set_table_reply) \
+_(TAP_CONNECT_REPLY, tap_connect_reply) \
+_(CREATE_VLAN_SUBIF_REPLY, create_vlan_subif_reply) \
+_(MPLS_GRE_ADD_DEL_TUNNEL_REPLY, mpls_gre_add_del_tunnel_reply) \
+_(MPLS_ADD_DEL_ENCAP_REPLY, mpls_add_del_encap_reply) \
+_(MPLS_ADD_DEL_DECAP_REPLY, mpls_add_del_decap_reply) \
+_(PROXY_ARP_ADD_DEL_REPLY, proxy_arp_add_del_reply) \
+_(PROXY_ARP_INTFC_ENABLE_DISABLE_REPLY, proxy_arp_intfc_enable_disable_reply) \
+_(IP_NEIGHBOR_ADD_DEL_REPLY, ip_neighbor_add_del_reply) \
+_(RESET_FIB_REPLY, reset_fib_reply) \
+_(DHCP_PROXY_CONFIG_REPLY, dhcp_proxy_config_reply) \
+_(DHCP_PROXY_SET_VSS_REPLY, dhcp_proxy_set_vss_reply) \
+_(SET_IP_FLOW_HASH_REPLY, set_ip_flow_hash_reply) \
+_(SW_INTERFACE_IP6ND_RA_CONFIG_REPLY, sw_interface_ip6nd_ra_config_reply) \
+_(SW_INTERFACE_IP6ND_RA_PREFIX_REPLY, sw_interface_ip6nd_ra_prefix_reply) \
+_(SW_INTERFACE_IP6_ENABLE_DISABLE_REPLY, sw_interface_ip6_enable_disable_reply) \
+_(SW_INTERFACE_IP6_SET_LINK_LOCAL_ADDRESS_REPLY, sw_interface_ip6_set_link_local_address_reply) \
+ _(CREATE_LOOPBACK_REPLY, create_loopback_reply) \
+_(L2_PATCH_ADD_DEL_REPLY, l2_patch_add_del_reply) \
+_(SR_TUNNEL_ADD_DEL_REPLY,sr_tunnel_add_del_reply) \
+_(SW_INTERFACE_SET_L2_XCONNECT_REPLY, sw_interface_set_l2_xconnect_reply) \
+_(SW_INTERFACE_SET_L2_BRIDGE_REPLY, sw_interface_set_l2_bridge_reply)
+
+int connect_to_vpe(char *name)
+{
+ int rv=0;
+
+ rv = vl_client_connect_to_vlib("/vpe-api", name, 32);
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_api_msg;
+#undef _
+
+ shmem_hdr = api_main.shmem_hdr;
+
+ return rv;
+}
+
+int disconnect_from_vpe(void)
+{
+ vl_client_disconnect_from_vlib();
+ return 0;
+}
+
+void link_up_down_enable_disable (test_main_t * tm, int enable)
+{
+ vl_api_want_interface_events_t * mp;
+
+ /* Request admin / link up down messages */
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_EVENTS);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->enable_disable = enable;
+ mp->pid = getpid();
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+ tm->link_events_on = enable;
+}
+
+void stats_enable_disable (test_main_t *tm, int enable)
+{
+ vl_api_want_stats_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_WANT_STATS);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->enable_disable = enable;
+ mp->pid = getpid();
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+ tm->stats_on = enable;
+}
+
+void oam_events_enable_disable (test_main_t *tm, int enable)
+{
+ vl_api_want_oam_events_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_WANT_OAM_EVENTS);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->enable_disable = enable;
+ mp->pid = getpid();
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+ tm->oam_events_on = enable;
+}
+
+void oam_add_del (test_main_t *tm, int is_add)
+{
+ vl_api_oam_add_del_t *mp;
+ ip4_address_t tmp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_OAM_ADD_DEL);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->is_add = is_add;
+
+ tmp.as_u32 = ntohl (0xc0a80101); /* 192.168.1.1 */
+ clib_memcpy (mp->src_address, tmp.as_u8, 4);
+
+ tmp.as_u32 = ntohl (0xc0a80103); /* 192.168.1.3 */
+ clib_memcpy (mp->dst_address, tmp.as_u8, 4);
+
+ mp->vrf_id = 0;
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void dump (test_main_t *tm)
+{
+ vl_api_sw_interface_dump_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_DUMP);
+ mp->client_index = tm->my_client_index;
+ mp->name_filter_valid = 1;
+ strncpy ((char *) mp->name_filter, "eth", sizeof (mp->name_filter)-1);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void add_del_ip4_route (test_main_t *tm, int enable_disable)
+{
+ vl_api_ip_add_del_route_t *mp;
+ u32 tmp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_IP_ADD_DEL_ROUTE);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->vrf_id = ntohl(0);
+ mp->create_vrf_if_needed = 1;
+ /* Arp, please, if needed */
+ mp->resolve_if_needed = 1;
+ mp->resolve_attempts = ntohl(10);
+
+ mp->next_hop_sw_if_index = ntohl(5);
+ mp->is_add = enable_disable;
+ mp->next_hop_weight = 1;
+
+ /* Next hop: 6.0.0.1 */
+ tmp = ntohl(0x06000001);
+ clib_memcpy (mp->next_hop_address, &tmp, sizeof (tmp));
+
+ /* Destination: 10.0.0.1/32 */
+ tmp = ntohl(0x0);
+ clib_memcpy (mp->dst_address, &tmp, sizeof (tmp));
+ mp->dst_address_length = 0;
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void add_del_ip6_route (test_main_t *tm, int enable_disable)
+{
+ vl_api_ip_add_del_route_t *mp;
+ u64 tmp[2];
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_IP_ADD_DEL_ROUTE);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->next_hop_sw_if_index = ntohl(5);
+ mp->is_add = enable_disable;
+ mp->is_ipv6 = 1;
+ mp->next_hop_weight = 1;
+ mp->dst_address_length = 64;
+
+ /* add/del dabe::/64 via db01::11 */
+
+ tmp[0] = clib_host_to_net_u64 (0xdabe000000000000ULL);
+ tmp[1] = clib_host_to_net_u64 (0x0ULL);
+ clib_memcpy (mp->dst_address, &tmp[0], 8);
+ clib_memcpy (&mp->dst_address[8], &tmp[1], 8);
+
+ tmp[0] = clib_host_to_net_u64(0xdb01000000000000ULL);
+ tmp[1] = clib_host_to_net_u64 (0x11ULL);
+ clib_memcpy (mp->next_hop_address, &tmp[0], 8);
+ clib_memcpy (&mp->next_hop_address[8], &tmp[1], 8);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void add_del_interface_address (test_main_t *tm, int enable_disable)
+{
+ vl_api_sw_interface_add_del_address_t *mp;
+ u32 tmp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_ADD_DEL_ADDRESS);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl(5);
+ mp->is_add = enable_disable;
+ mp->address_length = 8;
+
+ tmp = ntohl (0x01020304);
+ clib_memcpy (mp->address, &tmp, 4);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+void add_del_v6_interface_address (test_main_t *tm, int enable_disable)
+{
+ vl_api_sw_interface_add_del_address_t *mp;
+ u64 tmp[2];
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_ADD_DEL_ADDRESS);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->is_ipv6 = 1;
+ mp->sw_if_index = ntohl(5);
+ mp->is_add = enable_disable;
+ mp->address_length = 64;
+
+ tmp[0] = clib_host_to_net_u64(0xdb01000000000000ULL);
+ tmp[1] = clib_host_to_net_u64 (0x11ULL);
+
+ clib_memcpy (mp->address, &tmp[0], 8);
+ clib_memcpy (&mp->address[8], &tmp[1], 8);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void del_all_interface_addresses (test_main_t *tm)
+{
+ vl_api_sw_interface_add_del_address_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_ADD_DEL_ADDRESS);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl(5);
+ mp->del_all = 1;
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void set_interface_table (test_main_t *tm, int is_ipv6, u32 vrf_id)
+{
+ vl_api_sw_interface_set_table_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_SET_TABLE);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl(5);
+ mp->is_ipv6 = is_ipv6;
+ mp->vrf_id = ntohl(vrf_id);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void connect_unix_tap (test_main_t *tm, char *name)
+{
+ vl_api_tap_connect_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_TAP_CONNECT);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ clib_memcpy (mp->tap_name, name, strlen(name));
+ mp->use_random_mac = 1;
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void create_vlan_subif (test_main_t *tm, u32 vlan_id)
+{
+ vl_api_create_vlan_subif_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_CREATE_VLAN_SUBIF);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl (5);
+ mp->vlan_id = ntohl(vlan_id);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void create_mpls_gre_tunnel (test_main_t *tm, u32 vrf_id, u32 label,
+ u8 is_add)
+{
+ vl_api_mpls_add_del_encap_t *lp;
+ vl_api_mpls_add_del_decap_t *dlp;
+ vl_api_mpls_gre_add_del_tunnel_t *mp;
+ u32 tmp;
+
+ dlp = vl_msg_api_alloc (sizeof (*dlp));
+ memset(dlp, 0, sizeof (*dlp));
+ dlp->_vl_msg_id = ntohs (VL_API_MPLS_ADD_DEL_DECAP);
+ dlp->client_index = tm->my_client_index;
+ dlp->context = 0xdeadbeef;
+ dlp->tx_vrf_id = ntohl(vrf_id);
+ dlp->label = ntohl(label);
+ dlp->s_bit = 1;
+ dlp->is_add = is_add;
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&dlp);
+
+ lp = vl_msg_api_alloc (sizeof (*lp) + sizeof (u32));
+ memset(lp, 0, sizeof (*lp) + sizeof (u32));
+ lp->_vl_msg_id = ntohs (VL_API_MPLS_ADD_DEL_ENCAP);
+ lp->client_index = tm->my_client_index;
+ lp->context = 0xdeadbeef;
+ lp->vrf_id = ntohl(vrf_id);
+ lp->labels[0] = ntohl(label);
+ lp->nlabels = 1;
+ lp->is_add = is_add;
+ /* dst: 5.0.0.1 */
+ tmp = ntohl (0x05000001);
+ clib_memcpy (lp->dst_address, &tmp, 4);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&lp);
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_MPLS_GRE_ADD_DEL_TUNNEL);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->inner_vrf_id = ntohl(vrf_id);
+ mp->outer_vrf_id = 0;
+ mp->is_add = is_add;
+
+ /* src: 6.0.0.1 */
+ tmp = ntohl (0x06000001);
+ clib_memcpy (mp->src_address, &tmp, 4);
+ /* dst: 5.0.0.1 */
+ tmp = ntohl (0x05000001);
+ clib_memcpy (mp->dst_address, &tmp, 4);
+ /* intfc: 5.0.0.1/24 */
+ tmp = ntohl (0x05000001);
+ clib_memcpy (mp->intfc_address, &tmp, 4);
+ mp->intfc_address_length = 24;
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void add_del_proxy_arp (test_main_t *tm, int is_add)
+{
+ vl_api_proxy_arp_add_del_t *mp;
+ u32 tmp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_PROXY_ARP_ADD_DEL);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->vrf_id = ntohl(11);
+ mp->is_add = is_add;
+
+ /* proxy fib 11, 1.1.1.1 -> 1.1.1.10 */
+ tmp = ntohl (0x01010101);
+ clib_memcpy (mp->low_address, &tmp, 4);
+
+ tmp = ntohl (0x0101010a);
+ clib_memcpy (mp->hi_address, &tmp, 4);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void proxy_arp_intfc_enable_disable (test_main_t *tm, int enable_disable)
+{
+ vl_api_proxy_arp_intfc_enable_disable_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_PROXY_ARP_INTFC_ENABLE_DISABLE);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl(6);
+ mp->enable_disable = enable_disable;
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void add_ip4_neighbor (test_main_t *tm, int add_del)
+{
+ vl_api_ip_neighbor_add_del_t *mp;
+ u32 tmp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_IP_NEIGHBOR_ADD_DEL);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->vrf_id = ntohl(11);
+ mp->sw_if_index = ntohl(6);
+ mp->is_add = add_del;
+
+ memset (mp->mac_address, 0xbe, sizeof (mp->mac_address));
+
+ tmp = ntohl (0x0101010a);
+ clib_memcpy (mp->dst_address, &tmp, 4);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void add_ip6_neighbor (test_main_t *tm, int add_del)
+{
+ vl_api_ip_neighbor_add_del_t *mp;
+ u64 tmp[2];
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_IP_NEIGHBOR_ADD_DEL);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->vrf_id = ntohl(11);
+ mp->sw_if_index = ntohl(6);
+ mp->is_add = add_del;
+ mp->is_ipv6 = 1;
+
+ memset (mp->mac_address, 0xbe, sizeof (mp->mac_address));
+
+ tmp[0] = clib_host_to_net_u64(0xdb01000000000000ULL);
+ tmp[1] = clib_host_to_net_u64 (0x11ULL);
+
+ clib_memcpy (mp->dst_address, &tmp[0], 8);
+ clib_memcpy (&mp->dst_address[8], &tmp[1], 8);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void reset_fib (test_main_t *tm, u8 is_ip6)
+{
+ vl_api_reset_fib_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_RESET_FIB);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->vrf_id = ntohl(11);
+ mp->is_ipv6 = is_ip6;
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void dhcpv6_set_vss (test_main_t *tm)
+{
+ vl_api_dhcp_proxy_set_vss_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_DHCP_PROXY_SET_VSS);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->oui = ntohl(6);
+ mp->fib_id = ntohl(60);
+ mp->is_add = 1;
+ mp->is_ipv6 = 1;
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+void dhcpv4_set_vss (test_main_t *tm)
+{
+ vl_api_dhcp_proxy_set_vss_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_DHCP_PROXY_SET_VSS);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->oui = ntohl(4);
+ mp->fib_id = ntohl(40);
+ mp->is_add = 1;
+ mp->is_ipv6 = 0;
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void dhcp_set_vss(test_main_t *tm)
+{
+ dhcpv4_set_vss(tm);
+ dhcpv6_set_vss(tm);
+}
+
+void dhcp_set_proxy (test_main_t *tm, int ipv6)
+{
+ vl_api_dhcp_proxy_config_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_DHCP_PROXY_CONFIG);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->vrf_id = ntohl(0);
+ mp->is_ipv6 = ipv6;
+ mp->insert_circuit_id = 1;
+ mp->is_add = 1;
+ mp->dhcp_server[0] = 0x20;
+ mp->dhcp_server[1] = 0x01;
+ mp->dhcp_server[2] = 0xab;
+ mp->dhcp_server[3] = 0xcd;
+ mp->dhcp_server[4] = 0x12;
+ mp->dhcp_server[5] = 0x34;
+ mp->dhcp_server[6] = 0xfe;
+ mp->dhcp_server[7] = 0xdc;
+ mp->dhcp_server[14] = 0;
+ mp->dhcp_server[15] = 0x2;
+
+ mp->dhcp_src_address[0] = 0x20;
+ mp->dhcp_src_address[1] = 0x01;
+ mp->dhcp_src_address[2] = 0xab;
+ mp->dhcp_src_address[3] = 0xcd;
+ mp->dhcp_src_address[4] = 0x12;
+ mp->dhcp_src_address[5] = 0x34;
+ mp->dhcp_src_address[6] = 0x56;
+ mp->dhcp_src_address[7] = 0x78;
+ mp->dhcp_src_address[14] = 0;
+ mp->dhcp_src_address[15] = 0x2;
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void set_ip_flow_hash (test_main_t *tm, u8 is_ip6)
+{
+ vl_api_set_ip_flow_hash_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SET_IP_FLOW_HASH);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->vrf_id = 0;
+ mp->is_ipv6 = is_ip6;
+ mp->dst = 1;
+ mp->reverse = 1;
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void ip6nd_ra_config(test_main_t *tm, int is_no)
+{
+ vl_api_sw_interface_ip6nd_ra_config_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl(5);
+ mp->is_no = is_no;
+
+ mp->surpress = 1;
+
+
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_IP6ND_RA_CONFIG);
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void ip6nd_ra_prefix(test_main_t *tm, int is_no)
+{
+ vl_api_sw_interface_ip6nd_ra_prefix_t *mp;
+ u64 tmp[2];
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl(5);
+ mp->is_no = is_no;
+
+ mp->use_default = 1;
+
+
+ tmp[0] = clib_host_to_net_u64(0xdb01000000000000ULL);
+ tmp[1] = clib_host_to_net_u64 (0x11ULL);
+
+
+ clib_memcpy (mp->address, &tmp[0], 8);
+ clib_memcpy (&mp->address[8], &tmp[1], 8);
+
+ mp->address_length = 64;
+
+
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_IP6ND_RA_PREFIX);
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void ip6_enable_disable(test_main_t *tm, int enable)
+{
+ vl_api_sw_interface_ip6_enable_disable_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl(5);
+ mp->enable = (enable == 1);;
+
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_IP6_ENABLE_DISABLE);
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void loop_create (test_main_t *tm)
+{
+ vl_api_create_loopback_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof(*mp));
+ memset(mp, 0, sizeof (*mp));
+
+ mp->_vl_msg_id = ntohs (VL_API_CREATE_LOOPBACK);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void ip6_set_link_local_address(test_main_t *tm)
+{
+ vl_api_sw_interface_ip6_set_link_local_address_t *mp;
+ u64 tmp[2];
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl(5);
+
+ tmp[0] = clib_host_to_net_u64(0xfe80000000000000ULL);
+ tmp[1] = clib_host_to_net_u64 (0x11ULL);
+
+ clib_memcpy (mp->address, &tmp[0], 8);
+ clib_memcpy (&mp->address[8], &tmp[1], 8);
+
+ mp->address_length = 64;
+
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_IP6_SET_LINK_LOCAL_ADDRESS);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+
+void set_flags (test_main_t *tm, int up_down)
+{
+ vl_api_sw_interface_set_flags_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_SET_FLAGS);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl (5);
+ mp->admin_up_down = up_down;
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+
+}
+
+void l2_patch_add_del (test_main_t *tm, int is_add)
+{
+ vl_api_l2_patch_add_del_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_L2_PATCH_ADD_DEL);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->is_add = is_add;
+ mp->rx_sw_if_index = ntohl (1);
+ mp->tx_sw_if_index = ntohl (2);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void l2_xconnect (test_main_t *tm)
+{
+ vl_api_sw_interface_set_l2_xconnect_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_SET_L2_XCONNECT);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->rx_sw_if_index = ntohl(5);
+ mp->tx_sw_if_index = ntohl(6);
+ mp->enable = 1;
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+void l2_bridge (test_main_t *tm)
+{
+ vl_api_sw_interface_set_l2_bridge_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_SET_L2_BRIDGE);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->rx_sw_if_index = ntohl(5);
+ mp->bd_id = ntohl(6);
+ mp->bvi = ntohl(1);
+ mp->shg = ntohl(0);
+ mp->enable = 1;
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+int main (int argc, char ** argv)
+{
+ api_main_t * am = &api_main;
+ test_main_t * tm = &test_main;
+ int ch;
+
+ connect_to_vpe("test_client");
+
+ tm->vl_input_queue = shmem_hdr->vl_input_queue;
+ tm->my_client_index = am->my_client_index;
+
+ fformat(stdout, "Type 'h' for help, 'q' to quit...\n");
+
+ while (1) {
+ ch = getchar();
+ switch (ch) {
+ case 'q':
+ goto done;
+ case 'd':
+ dump (tm);
+ break;
+ case 'L':
+ link_up_down_enable_disable (tm, 1 /* enable_disable */);
+ break;
+ case 'l':
+ link_up_down_enable_disable (tm, 0 /* enable_disable */);
+ break;
+ case 'S':
+ stats_enable_disable (tm, 1 /* enable_disable */);
+ break;
+ case 's':
+ stats_enable_disable (tm, 0 /* enable_disable */);
+ break;
+ case '3':
+ add_del_ip4_route (tm, 0 /* add */);
+ break;
+ case '4':
+ add_del_ip4_route (tm, 1 /* add */);
+ break;
+ case '5':
+ add_del_ip6_route (tm, 0 /* add */);
+ break;
+ case '6':
+ add_del_ip6_route (tm, 1 /* add */);
+ break;
+ case 'A':
+ add_del_interface_address (tm, 1 /* add */);
+ break;
+ case 'a':
+ add_del_interface_address (tm, 0 /* add */);
+ break;
+ case 'B':
+ add_del_v6_interface_address (tm, 1 /* add */);
+ break;
+ case 'b':
+ add_del_v6_interface_address (tm, 0 /* add */);
+ break;
+ case 'E':
+ l2_patch_add_del(tm, 1 /* is_add */);
+ break;
+ case 'e':
+ l2_patch_add_del(tm, 0 /* is_add */);
+ break;
+ case 'z':
+ del_all_interface_addresses (tm);
+ break;
+ case 't':
+ set_interface_table (tm, 0 /* is_ipv6 */,
+ 11 /* my amp goes to 11 */);
+ break;
+ case 'T':
+ set_interface_table (tm, 1 /* is_ipv6 */,
+ 12 /* my amp goes to 12 */);
+ break;
+
+ case 'u':
+ create_vlan_subif (tm, 123);
+ break;
+
+ case 'c':
+ connect_unix_tap (tm, "foo");
+ break;
+
+ case 'M':
+ create_mpls_gre_tunnel (tm, 11 /* fib */, 123 /* label */,
+ 1 /* is_add */);
+ break;
+
+ case 'm':
+ create_mpls_gre_tunnel (tm, 11 /* fib */, 123 /* label */,
+ 0 /* is_add */);
+ break;
+
+ case 'n':
+ add_ip4_neighbor (tm, 1 /* is_add */);
+ add_ip6_neighbor (tm, 1 /* is_add */);
+ break;
+
+ case 'N':
+ add_ip4_neighbor (tm, 0 /* is_add */);
+ add_ip6_neighbor (tm, 0 /* is_add */);
+ break;
+
+ case 'p':
+ add_del_proxy_arp (tm, 1 /* add */);
+ break;
+
+ case 'i':
+ proxy_arp_intfc_enable_disable (tm, 1 /* enable */);
+ break;
+
+ case 'O':
+ oam_events_enable_disable (tm, 0 /* enable */);
+ break;
+
+ case 'o':
+ oam_events_enable_disable (tm, 1 /* enable */);
+ break;
+
+ case '0':
+ oam_add_del (tm, 0 /* is_add */);
+ break;
+
+ case '1':
+ oam_add_del (tm, 1 /* is_add */);
+ break;
+
+ case 'r':
+ reset_fib (tm, 0 /* is_ip6 */);
+ break;
+
+ case 'R':
+ reset_fib (tm, 1 /* is_ip6 */);
+ break;
+
+ case 'j':
+ dhcp_set_vss(tm);
+ break;
+
+ case 'k':
+ dhcp_set_proxy(tm, 0);
+ break;
+
+ case 'K':
+ dhcp_set_proxy(tm, 1 /*ipv6*/);
+ break;
+
+ case 'v':
+ set_ip_flow_hash (tm, 0 /* is_ip6 */);
+ break;
+
+ case 'V':
+ ip6_set_link_local_address(tm);
+ break;
+
+ case 'w':
+ ip6_enable_disable(tm, 1 /* enable */);
+ break;
+
+ case 'W':
+ ip6_enable_disable(tm, 0 /* disable */);
+ break;
+
+ case 'x':
+ ip6nd_ra_config(tm, 0 /* is_no */);
+ break;
+ case 'X':
+ ip6nd_ra_config(tm, 1 /* is_no */);
+ break;
+ case 'y':
+ ip6nd_ra_prefix(tm, 0 /* is_no */);
+ break;
+ case 'Y':
+ ip6nd_ra_prefix(tm, 1 /* is_no */);
+ break;
+
+ case '7':
+ loop_create (tm);
+ break;
+
+ case 'F':
+ set_flags (tm, 1 /* up_down */);
+ break;
+
+ case 'f':
+ set_flags (tm, 0 /* up_down */);
+ break;
+
+ case '@':
+ l2_xconnect (tm);
+ break;
+
+ case '#':
+ l2_bridge(tm);
+ break;
+
+ case 'h':
+ fformat (stdout, "q=quit,d=dump,L=link evts on,l=link evts off\n");
+ fformat (stdout, "S=stats on,s=stats off\n");
+ fformat (stdout, "4=add v4 route, 3=del v4 route\n");
+ fformat (stdout, "6=add v6 route, 5=del v6 route\n");
+ fformat (stdout, "A=add v4 intfc route, a=del v4 intfc route\n");
+ fformat (stdout, "B=add v6 intfc route, b=del v6 intfc route\n");
+ fformat (stdout, "z=del all intfc routes\n");
+ fformat (stdout, "t=set v4 intfc table, T=set v6 intfc table\n");
+ fformat (stdout, "c=connect unix tap\n");
+ fformat (stdout, "j=set dhcpv4 and v6 link-address/option-82 params\n");
+ fformat (stdout, "k=set dhcpv4 relay agent params\n");
+ fformat (stdout, "K=set dhcpv6 relay agent params\n");
+ fformat (stdout, "E=add l2 patch, e=del l2 patch\n");
+ fformat (stdout, "V=ip6 set link-local address \n");
+ fformat (stdout, "w=ip6 enable \n");
+ fformat (stdout, "W=ip6 disable \n");
+ fformat (stdout, "x=ip6 nd config \n");
+ fformat (stdout, "X=no ip6 nd config\n");
+ fformat (stdout, "y=ip6 nd prefix \n");
+ fformat (stdout, "Y=no ip6 nd prefix\n");
+ fformat (stdout, "@=l2 xconnect\n");
+ fformat (stdout, "#=l2 bridge\n");
+
+ default:
+ break;
+ }
+
+ }
+
+ done:
+
+ if (tm->link_events_on)
+ link_up_down_enable_disable (tm, 0 /* enable */);
+ if (tm->stats_on)
+ stats_enable_disable (tm, 0 /* enable */);
+ if (tm->oam_events_on)
+ oam_events_enable_disable (tm, 0 /* enable */);
+
+ disconnect_from_vpe();
+ exit (0);
+}
+
+#undef vl_api_version
+#define vl_api_version(n,v) static u32 vpe_api_version = v;
+#include <vpp-api/vpe.api.h>
+#undef vl_api_version
+
+void vl_client_add_api_signatures (vl_api_memclnt_create_t *mp)
+{
+ /*
+ * Send the main API signature in slot 0. This bit of code must
+ * match the checks in ../vpe/api/api.c: vl_msg_api_version_check().
+ */
+ mp->api_versions[0] = clib_host_to_net_u32 (vpe_api_version);
+}
diff --git a/vpp/vpp-api/test_ha.c b/vpp/vpp-api/test_ha.c
new file mode 100644
index 00000000..b3b39fea
--- /dev/null
+++ b/vpp/vpp-api/test_ha.c
@@ -0,0 +1,219 @@
+/*
+ *------------------------------------------------------------------
+ * api.c - message handler registration
+ *
+ * Copyright (c) 2010 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <netinet/in.h>
+#include <signal.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <time.h>
+#include <fcntl.h>
+#include <string.h>
+#include <vppinfra/clib.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/bitmap.h>
+#include <vppinfra/fifo.h>
+#include <vppinfra/time.h>
+#include <vppinfra/mheap.h>
+#include <vppinfra/heap.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/format.h>
+#include <vppinfra/error.h>
+
+#include <vnet/vnet.h>
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <svm.h>
+#include <svmdb.h>
+
+#include <vpp-api/vpe_msg_enum.h>
+
+#include <vnet/ip/ip.h>
+
+#define f64_endian(a)
+#define f64_print(a,b)
+
+#define vl_typedefs /* define message structures */
+#include <vpp-api/vpe_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vpp-api/vpe_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...)
+#define vl_printfun
+#include <vpp-api/vpe_all_api_h.h>
+#undef vl_printfun
+
+vl_shmem_hdr_t *shmem_hdr;
+
+typedef struct {
+ u32 pings_sent;
+ u32 pings_replied;
+ volatile u32 signal_received;
+
+ /* convenience */
+ unix_shared_memory_queue_t * vl_input_queue;
+ u32 my_client_index;
+ svmdb_client_t * svmdb_client;
+} test_main_t;
+
+test_main_t test_main;
+
+static void vl_api_control_ping_reply_t_handler
+(vl_api_control_ping_reply_t * mp)
+{
+ test_main_t * tm = &test_main;
+
+ fformat(stdout, "control ping reply from pid %d\n",
+ ntohl (mp->vpe_pid));
+ tm->pings_replied++;
+}
+
+void vlib_cli_output (struct vlib_main_t * vm, char * fmt, ...)
+{
+ clib_warning ("BUG: vlib_cli_output callled...");
+}
+
+#define foreach_api_msg \
+_(CONTROL_PING_REPLY,control_ping_reply)
+
+void ping (test_main_t *tm)
+{
+ vl_api_control_ping_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_CONTROL_PING);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *)&mp);
+}
+
+static void noop_handler (void *notused) { }
+
+int connect_to_vpe(char *name)
+{
+ int rv=0;
+ test_main_t * tm = &test_main;
+ api_main_t * am = &api_main;
+
+ rv = vl_client_connect_to_vlib("/vpe-api", name, 32);
+ if (rv < 0)
+ return rv;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_api_msg;
+#undef _
+
+ shmem_hdr = api_main.shmem_hdr;
+ tm->vl_input_queue = shmem_hdr->vl_input_queue;
+ tm->my_client_index = am->my_client_index;
+ return 0;
+}
+
+int disconnect_from_vpe(void)
+{
+ vl_client_disconnect_from_vlib();
+
+ return 0;
+}
+
+void signal_handler (int signo)
+{
+ test_main_t * tm = &test_main;
+
+ tm->signal_received = 1;
+}
+
+
+int main (int argc, char **argv)
+{
+ test_main_t * tm = &test_main;
+ api_main_t * am = &api_main;
+ u32 swt_pid = 0;
+ int connected = 0;
+
+ signal (SIGINT, signal_handler);
+
+ while (1) {
+ if (tm->signal_received)
+ break;
+
+ if (am->shmem_hdr)
+ swt_pid = am->shmem_hdr->vl_pid;
+
+ /* If kill returns 0, the vpe-f process is alive */
+ if (kill(swt_pid, 0) == 0) {
+ /* Try to connect */
+ if (connected == 0) {
+ fformat (stdout, "Connect to VPE-f\n");
+ if (connect_to_vpe("test_ha_client") >= 0) {
+ tm->pings_sent = 0;
+ tm->pings_replied = 0;
+ connected = 1;
+ } else {
+ fformat(stdout, "Connect failed, sleep and retry...\n");
+ sleep(1);
+ continue;
+ }
+ }
+ tm->pings_sent ++;
+ ping(tm);
+
+ sleep (1);
+
+ /* havent heard back in 3 seconds, disco / reco */
+ if ((tm->pings_replied + 3) <= tm->pings_sent) {
+ fformat (stdout, "VPE-f pid %d not responding\n", swt_pid);
+ swt_pid = 0;
+ disconnect_from_vpe();
+ connected = 0;
+ }
+ } else {
+ if (connected) {
+ fformat (stdout, "VPE-f pid %d died\n", swt_pid);
+ swt_pid = 0;
+ disconnect_from_vpe();
+ connected = 0;
+ }
+ sleep (1);
+ }
+ }
+
+ fformat (stdout, "Signal received, graceful exit\n");
+ disconnect_from_vpe();
+ exit (0);
+}
diff --git a/vpp/vpp-api/vpe.api b/vpp/vpp-api/vpe.api
new file mode 100644
index 00000000..a1ea1b2f
--- /dev/null
+++ b/vpp/vpp-api/vpe.api
@@ -0,0 +1,3954 @@
+/* Hey Emacs use -*- mode: C -*- */
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \file
+
+ This file defines vpe control-plane API messages which are generally
+ called through a shared memory interface.
+*/
+
+
+/** \brief Register for interface events
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param enable_disable - 1 => register for events, 0 => cancel registration
+ @param pid - sender's pid
+*/
+define want_interface_events {
+ u32 client_index;
+ u32 context;
+ u32 enable_disable;
+ u32 pid;
+};
+
+/** \brief Reply for interface events registration
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define want_interface_events_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Interface details structure (fix this)
+ @param sw_if_index - index of the interface
+ @param sup_sw_if_index - index of parent interface if any, else same as sw_if_index
+ @param l2_address_length - length of the interface's l2 address
+ @param pid - the interface's l2 address
+ @param interface_name - name of the interface
+ @param link_duplex - 1 if half duplex, 2 if full duplex
+ @param link_speed - 1 = 10M, 2 = 100M, 4 = 1G, 8 = 10G, 16 = 40G, 32 = 100G
+ @param link_MTU - max. transmittion unit
+ @param sub_if_id - A number 0-N to uniquely identify this subif on super if
+ @param sub_dot1ad - 0 = dot1q, 1=dot1ad
+ @param sub_number_of_tags - Number of tags (0 - 2)
+ @param sub_outer_vlan_id
+ @param sub_inner_vlan_id
+ @param sub_exact_match
+ @param sub_default
+ @param sub_outer_vlan_id_any
+ @param sub_inner_vlan_id_any
+ @param vtr_op - vlan tag rewrite operation
+ @param vtr_push_dot1q
+ @param vtr_tag1
+ @param vtr_tag2
+*/
+manual_java define sw_interface_details {
+ u32 context;
+ u32 sw_if_index;
+
+ /* index of sup interface (e.g. hw interface).
+ equal to sw_if_index for super hw interface. */
+ u32 sup_sw_if_index;
+
+ /* Layer 2 address, if applicable */
+ u32 l2_address_length;
+ u8 l2_address[8];
+
+ /* Interface name */
+ u8 interface_name[64];
+
+ /* 1 = up, 0 = down */
+ u8 admin_up_down;
+ u8 link_up_down;
+
+ /* 1 = half duplex, 2 = full duplex */
+ u8 link_duplex;
+
+ /* 1 = 10M, 2 = 100M, 4 = 1G, 8 = 10G, 16 = 40G, 32 = 100G */
+ u8 link_speed;
+
+ /* MTU */
+ u16 link_mtu;
+
+ /* Subinterface ID. A number 0-N to uniquely identify this subinterface under the super interface*/
+ u32 sub_id;
+
+ /* 0 = dot1q, 1=dot1ad */
+ u8 sub_dot1ad;
+
+ /* Number of tags 0-2 */
+ u8 sub_number_of_tags;
+ u16 sub_outer_vlan_id;
+ u16 sub_inner_vlan_id;
+ u8 sub_exact_match;
+ u8 sub_default;
+ u8 sub_outer_vlan_id_any;
+ u8 sub_inner_vlan_id_any;
+
+ /* vlan tag rewrite state */
+ u32 vtr_op;
+ u32 vtr_push_dot1q; // ethertype of first pushed tag is dot1q/dot1ad
+ u32 vtr_tag1; // first pushed tag
+ u32 vtr_tag2; // second pushed tag
+};
+
+/** \brief Set flags on the interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - index of the interface to set flags on
+ @param admin_up_down - set the admin state, 1 = up, 0 = down
+ @param link_up_down - Oper state sent on change event, not used in config.
+ @param deleted - interface was deleted
+*/
+define sw_interface_set_flags {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ /* 1 = up, 0 = down */
+ u8 admin_up_down;
+ u8 link_up_down;
+ u8 deleted;
+};
+
+/** \brief Reply to sw_interface_set_flags
+ @param context - sender context which was passed in the request
+ @param retval - return code of the set flags request
+*/
+define sw_interface_set_flags_reply {
+ u32 context;
+ i32 retval;
+};
+
+/* works */
+manual_java define sw_interface_dump {
+ u32 client_index;
+ u32 context;
+ u8 name_filter_valid;
+ u8 name_filter[49];
+};
+
+/** \brief Set or delete one or all ip addresses on a specified interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - index of the interface to add/del addresses
+ @param is_add - add address if non-zero, else delete
+ @param is_ipv6 - if non-zero the address is ipv6, else ipv4
+ @param del_all - if non-zero delete all addresses on the interface
+ @param address_length - address length in bytes, 4 for ip4, 16 for ip6
+ @param address - array of address bytes
+*/
+define sw_interface_add_del_address {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 is_add;
+ u8 is_ipv6;
+ u8 del_all;
+ u8 address_length;
+ u8 address[16];
+};
+
+/** \brief Reply for interface events registration
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define sw_interface_add_del_address_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Associate the specified interface with a fib table
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - index of the interface
+ @param is_ipv6 - if non-zero ipv6, else ipv4
+ @param vrf_id - fib table/vrd id to associate the interface with
+*/
+define sw_interface_set_table {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 is_ipv6;
+ u32 vrf_id;
+};
+
+/** \brief Reply for interface events registration
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define sw_interface_set_table_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Initialize a new tap interface with the given paramters
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param use_random_mac - let the system generate a unique mac address
+ @param tap_name - name to associate with the new interface
+ @param mac_address - mac addr to assign to the interface if use_radom not set
+*/
+define tap_connect {
+ u32 client_index;
+ u32 context;
+ u8 use_random_mac;
+ u8 tap_name [64];
+ u8 mac_address[6];
+ u8 renumber;
+ u32 custom_dev_instance;
+};
+
+/** \brief Reply for tap connect request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+ @param sw_if_index - software index allocated for the new tap interface
+*/
+define tap_connect_reply {
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief Modify a tap interface with the given paramters
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface index of existing tap interface
+ @param use_random_mac - let the system generate a unique mac address
+ @param tap_name - name to associate with the new interface
+ @param mac_address - mac addr to assign to the interface if use_radom not set
+*/
+define tap_modify {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 use_random_mac;
+ u8 tap_name [64];
+ u8 mac_address[6];
+ u8 renumber;
+ u32 custom_dev_instance;
+};
+
+/** \brief Reply for tap modify request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+ @param sw_if_index - software index if the modified tap interface
+*/
+define tap_modify_reply {
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief Delete tap interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface index of existing tap interface
+*/
+define tap_delete {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+};
+
+/** \brief Reply for tap delete request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define tap_delete_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Dump tap interfaces request */
+define sw_interface_tap_dump {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Reply for tap dump request
+ @param sw_if_index - software index of tap interface
+ @param dev_name - Linux tap device name
+*/
+manual_java define sw_interface_tap_details {
+ u32 context;
+ u32 sw_if_index;
+ u8 dev_name[64];
+};
+
+/** \brief Create a new subinterface with the given vlan id
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - software index of the new vlan's parent interface
+ @param vlan_id - vlan tag of the new interface
+*/
+define create_vlan_subif {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 vlan_id;
+};
+
+/** \brief Reply for the vlan subinterface create request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+ @param sw_if_index - software index allocated for the new subinterface
+*/
+define create_vlan_subif_reply {
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief Add / del route request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - software index of the new vlan's parent interface
+ @param vrf_id - fib table /vrf associated with the route
+ @param lookup_in_vrf -
+ @param resolve_attempts -
+ @param classify_table_index -
+ @param create_vrf_if_needed -
+ @param resolve_if_needed -
+ @param is_add - 1 if adding the route, 0 if deleting
+ @param is_drop -
+ @param is_ipv6 - 0 if an ip4 route, else ip6
+ @param is_local -
+ @param is_classify -
+ @param is_multipath - Set to 1 if this is a multipath route, else 0
+ @param not_last - Is last or not last msg in group of multiple add/del msgs
+ @param next_hop_weight -
+ @param dst_address_length -
+ @param dst_address[16] -
+ @param next_hop_address[16] -
+*/
+define ip_add_del_route {
+ u32 client_index;
+ u32 context;
+ u32 next_hop_sw_if_index;
+ u32 vrf_id;
+ u32 lookup_in_vrf;
+ u32 resolve_attempts;
+ u32 classify_table_index;
+ u8 create_vrf_if_needed;
+ u8 resolve_if_needed;
+ u8 is_add;
+ u8 is_drop;
+ u8 is_ipv6;
+ u8 is_local;
+ u8 is_classify;
+ /* Is last/not-last message in group of multiple add/del messages. */
+ u8 is_multipath;
+ u8 not_last;
+ u8 next_hop_weight;
+ u8 dst_address_length;
+ u8 dst_address[16];
+ u8 next_hop_address[16];
+};
+
+/** \brief Reply for add / del route request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define ip_add_del_route_reply {
+ u32 context;
+ i32 retval;
+};
+
+/* works */
+/** \brief Add / del gre tunnel request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - software index of the new vlan's parent interface
+ @param inner_vrf_id -
+ @param outer_vrf_id -
+ @param is_add - 1 if adding the tunnel, 0 if deleting
+ @param src_address[4] - tunnel source address
+ @param dst_address[4] - tunnel destination address
+ @param intf_address -
+ @param intf_address_length -
+*/
+define mpls_gre_add_del_tunnel {
+ u32 client_index;
+ u32 context;
+ u32 inner_vrf_id;
+ u32 outer_vrf_id;
+ u8 is_add;
+ u8 l2_only;
+ u8 src_address[4];
+ u8 dst_address[4];
+ u8 intfc_address[4];
+ u8 intfc_address_length;
+};
+
+/** \brief Reply for add / del tunnel request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define mpls_gre_add_del_tunnel_reply {
+ u32 context;
+ i32 retval;
+ u32 tunnel_sw_if_index;
+};
+
+/** \brief Add / del MPLS encapsulation request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param vrf_id - vrf id
+ @param dst_address[4] -
+ @param is_add - 1 if adding the encap, 0 if deleting
+ @param nlabels - number of labels
+ @param labels - array of labels
+*/
+define mpls_add_del_encap {
+ u32 client_index;
+ u32 context;
+ u32 vrf_id;
+ u8 dst_address[4];
+ /* 1 = add, 0 = delete */
+ u8 is_add;
+ u8 nlabels;
+ u32 labels[0];
+};
+
+/** \brief Reply for add / del encapsulation request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define mpls_add_del_encap_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Add / del MPLS decapsulation request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param rx_vrf_id - receive vrf
+ @param tx_vrf_id - transmit vrf
+ @param label -
+ @param next_index -
+ @param s_bit -
+ @param is_add - 1 if adding the encap, 0 if deleting
+*/
+define mpls_add_del_decap {
+ u32 client_index;
+ u32 context;
+ u32 rx_vrf_id;
+ u32 tx_vrf_id;
+ u32 label;
+ u32 next_index;
+ u8 s_bit;
+ u8 is_add;
+};
+
+/** \brief Reply for MPLS decap add / del request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define mpls_add_del_decap_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Proxy ARP add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param vrf_id - VRF / Fib table ID
+ @param is_add - 1 if adding the Proxy ARP range, 0 if deleting
+ @param low_address[4] - Low address of the Proxy ARP range
+ @param hi_address[4] - High address of the Proxy ARP range
+*/
+define proxy_arp_add_del {
+ u32 client_index;
+ u32 context;
+ u32 vrf_id;
+ u8 is_add;
+ u8 low_address[4];
+ u8 hi_address[4];
+};
+
+/** \brief Reply for proxy arp add / del request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define proxy_arp_add_del_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Proxy ARP add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - Which interface to enable / disable Proxy Arp on
+ @param enable_disable - 1 to enable Proxy ARP on interface, 0 to disable
+*/
+define proxy_arp_intfc_enable_disable {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ /* 1 = on, 0 = off */
+ u8 enable_disable;
+};
+
+/** \brief Reply for Proxy ARP interface enable / disable request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define proxy_arp_intfc_enable_disable_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IP neighbor add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param vrf_id - vrf_id, only for IP4
+ @param sw_if_index - interface used to reach neighbor
+ @param is_add - 1 to add neighbor, 0 to delete
+ @param is_ipv6 - 1 for IPv6 neighbor, 0 for IPv4
+ @param is_static -
+ @param mac_address - l2 address of the neighbor
+ @param dst_address - ip4 or ip6 address of the neighbor
+*/
+define ip_neighbor_add_del {
+ u32 client_index;
+ u32 context;
+ u32 vrf_id; /* only makes sense for ip4 */
+ u32 sw_if_index;
+ /* 1 = add, 0 = delete */
+ u8 is_add;
+ u8 is_ipv6;
+ u8 is_static;
+ u8 mac_address[6];
+ u8 dst_address[16];
+};
+
+/** \brief Reply for IP Neighbor add / delete request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define ip_neighbor_add_del_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Reset VRF (remove all routes etc) request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_ipv6 - 1 for IPv6 neighbor, 0 for IPv4
+ @param vrf_id - ID of th FIB table / VRF to reset
+*/
+define reset_vrf {
+ u32 client_index;
+ u32 context;
+ u8 is_ipv6;
+ u32 vrf_id;
+};
+
+/** \brief Reply for Reset VRF request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define reset_vrf_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Is Address Reachable request - DISABLED
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param next_hop_sw_if_index - index of interface used to get to next hop
+ @param is_ipv6 - 1 for IPv6, 0 for IPv4
+ @param is_error - address not found or does not match intf
+ @param address[] - Address in question
+*/
+define is_address_reachable {
+ u32 client_index; /* (api_main_t *) am->my_client_index */
+ u32 context;
+ u32 next_hop_sw_if_index;
+ u8 is_known; /* on reply, this is the answer */
+ u8 is_ipv6;
+ u8 is_error; /* address not found or does not match intf */
+ u8 address[16];
+};
+
+/** \brief Want Stats, register for stats updates
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param enable_disable - 1 = enable stats, 0 = disable
+ @param pid - pid of process requesting stats updates
+*/
+define want_stats {
+ u32 client_index;
+ u32 context;
+ u32 enable_disable;
+ u32 pid;
+};
+
+/** \brief Reply for Want Stats request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+manual_java define want_stats_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Want stats counters structure
+ @param vnet_counter_type- such as ip4, ip6, punts, etc
+ @param is_combined - rx & tx total (all types) counts
+ @param first_sw_if_index - first sw index in block of index, counts
+ @param count - number of interfaces this stats block includes counters for
+ @param data - contiguous block of vlib_counter_t structures
+*/
+manual_java define vnet_interface_counters {
+ /* enums - plural - in vnet/interface.h */
+ u8 vnet_counter_type;
+ u8 is_combined;
+ u32 first_sw_if_index;
+ u32 count;
+ u8 data[0];
+};
+
+typeonly manual_print manual_endian define ip4_fib_counter {
+ u32 address;
+ u8 address_length;
+ u64 packets;
+ u64 bytes;
+};
+
+manual_java manual_print manual_endian define vnet_ip4_fib_counters {
+ u32 vrf_id;
+ u32 count;
+ vl_api_ip4_fib_counter_t c[count];
+};
+
+typeonly manual_print manual_endian define ip6_fib_counter {
+ u64 address[2];
+ u8 address_length;
+ u64 packets;
+ u64 bytes;
+};
+
+manual_java manual_print manual_endian define vnet_ip6_fib_counters {
+ u32 vrf_id;
+ u32 count;
+ vl_api_ip6_fib_counter_t c[count];
+};
+
+/** \brief Request for a single block of summary stats
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define vnet_get_summary_stats {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Reply for vnet_get_summary_stats request
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for request
+ @param total_pkts -
+ @param total_bytes -
+ @param vector_rate -
+*/
+define vnet_summary_stats_reply {
+ u32 context;
+ i32 retval;
+ u64 total_pkts[2];
+ u64 total_bytes[2];
+ f64 vector_rate;
+};
+
+/** \brief OAM event structure
+ @param dst_address[] -
+ @param state
+*/
+manual_java define oam_event {
+ u8 dst_address[4];
+ u8 state;
+};
+
+/** \brief Want OAM events request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param enable_disable- enable if non-zero, else disable
+ @param pid - pid of the requesting process
+*/
+define want_oam_events {
+ u32 client_index;
+ u32 context;
+ u32 enable_disable;
+ u32 pid;
+};
+
+/** \brief Want OAM events response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the want oam stats request
+*/
+define want_oam_events_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief OAM add / del target request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param vrf_id - vrf_id of the target
+ @param src_address[] - source address to use for the updates
+ @param dst_address[] - destination address of the target
+ @param is_add - add target if non-zero, else delete
+*/
+define oam_add_del {
+ u32 client_index;
+ u32 context;
+ u32 vrf_id;
+ u8 src_address[4];
+ u8 dst_address[4];
+ u8 is_add;
+};
+
+/** \brief OAM add / del target response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code of the request
+*/
+define oam_add_del_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Reset fib table request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param vrf_id - vrf/table id of the fib table to reset
+ @param is_ipv6 - an ipv6 fib to reset if non-zero, else ipv4
+*/
+define reset_fib {
+ u32 client_index;
+ u32 context;
+ u32 vrf_id;
+ u8 is_ipv6;
+};
+
+/** \brief Reset fib response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the reset bfib request
+*/
+define reset_fib_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief DHCP Proxy config add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param vrf_id - vrf id
+ @param if_ipv6 - ipv6 if non-zero, else ipv4
+ @param is_add - add the config if non-zero, else delete
+ @param insert_circuit_id - option82 suboption 1 fib number
+ @param dhcp_server[] - server address
+ @param dhcp_src_address[] - <fix this, need details>
+*/
+define dhcp_proxy_config {
+ u32 client_index;
+ u32 context;
+ u32 vrf_id;
+ u8 is_ipv6;
+ u8 is_add;
+ u8 insert_circuit_id;
+ u8 dhcp_server[16];
+ u8 dhcp_src_address[16];
+};
+
+/** \brief DHCP Proxy config response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define dhcp_proxy_config_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief DHCP Proxy set / unset vss request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param tbl_id - table id
+ @param oui - first part of vpn id
+ @param fib_id - second part of vpn id
+ @param is_ipv6 - ip6 if non-zero, else ip4
+ @param is_add - set vss if non-zero, else delete
+*/
+define dhcp_proxy_set_vss {
+ u32 client_index;
+ u32 context;
+ u32 tbl_id;
+ u32 oui;
+ u32 fib_id;
+ u8 is_ipv6;
+ u8 is_add;
+};
+
+/** \brief DHCP proxy set / unset vss response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define dhcp_proxy_set_vss_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Set the ip flow hash config for a fib request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param vrf_id - vrf/fib id
+ @param is_ipv6 - if non-zero the fib is ip6, else ip4
+ @param src - if non-zero include src in flow hash
+ @param dst - if non-zero include dst in flow hash
+ @param sport - if non-zero include sport in flow hash
+ @param dport - if non-zero include dport in flow hash
+ @param proto -if non-zero include proto in flow hash
+ @param reverse - if non-zero include reverse in flow hash
+*/
+define set_ip_flow_hash {
+ u32 client_index;
+ u32 context;
+ u32 vrf_id;
+ u8 is_ipv6;
+ u8 src;
+ u8 dst;
+ u8 sport;
+ u8 dport;
+ u8 proto;
+ u8 reverse;
+};
+
+/** \brief Set the ip flow hash config for a fib response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define set_ip_flow_hash_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPv6 router advertisement config request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param surpress -
+ @param managed -
+ @param other -
+ @param ll_option -
+ @param send_unicast -
+ @param cease -
+ @param is_no -
+ @param default_router -
+ @param max_interval -
+ @param min_interval -
+ @param lifetime -
+ @param initial_count -
+ @param initial_interval -
+*/
+define sw_interface_ip6nd_ra_config {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 surpress;
+ u8 managed;
+ u8 other;
+ u8 ll_option;
+ u8 send_unicast;
+ u8 cease;
+ u8 is_no;
+ u8 default_router;
+ u32 max_interval;
+ u32 min_interval;
+ u32 lifetime;
+ u32 initial_count;
+ u32 initial_interval;
+};
+
+/** \brief IPv6 router advertisement config response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define sw_interface_ip6nd_ra_config_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPv6 router advertisement prefix config request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index -
+ @param address[] -
+ @param address_length -
+ @param use_default -
+ @param no_advertise -
+ @param off_link -
+ @param no_autoconfig -
+ @param no_onlink -
+ @param is_no -
+ @param val_lifetime -
+ @param pref_lifetime -
+*/
+define sw_interface_ip6nd_ra_prefix {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 address[16];
+ u8 address_length;
+ u8 use_default;
+ u8 no_advertise;
+ u8 off_link;
+ u8 no_autoconfig;
+ u8 no_onlink;
+ u8 is_no;
+ u32 val_lifetime;
+ u32 pref_lifetime;
+};
+
+/** \brief IPv6 router advertisement prefix config response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define sw_interface_ip6nd_ra_prefix_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPv6 interface enable / disable request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface used to reach neighbor
+ @param enable - if non-zero enable ip6 on interface, else disable
+*/
+define sw_interface_ip6_enable_disable {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 enable; /* set to true if enable*/
+};
+
+/** \brief IPv6 interface enable / disable response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define sw_interface_ip6_enable_disable_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPv6 set link local address on interface request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface to set link local on
+ @param address[] - the new link local address
+ @param address_length - link local address length
+*/
+define sw_interface_ip6_set_link_local_address {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 address[16];
+ u8 address_length;
+};
+
+/** \brief IPv6 set link local address on interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - error code for the request
+*/
+define sw_interface_ip6_set_link_local_address_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Set unnumbered interface add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface with an IP address
+ @param unnumbered_sw_if_index - interface which will use the address
+ @param is_add - if non-zero set the association, else unset it
+*/
+define sw_interface_set_unnumbered {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index; /* use this intfc address */
+ u32 unnumbered_sw_if_index; /* on this interface */
+ u8 is_add;
+};
+
+/** \brief Set unnumbered interface add / del response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define sw_interface_set_unnumbered_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Create loopback interface request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param mac_address - mac addr to assign to the interface if none-zero
+*/
+define create_loopback {
+ u32 client_index;
+ u32 context;
+ u8 mac_address[6];
+};
+
+/** \brief Create loopback interface response
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - sw index of the interface that was created
+ @param retval - return code for the request
+*/
+define create_loopback_reply {
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief Delete loopback interface request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - sw index of the interface that was created
+*/
+define delete_loopback {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+};
+
+/** \brief Delete loopback interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define delete_loopback_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Control ping from client to api server request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+manual_java define control_ping {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Control ping from the client to the server response
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+ @param vpe_pid - the pid of the vpe, returned by the server
+*/
+manual_java define control_ping_reply {
+ u32 context;
+ i32 retval;
+ u32 client_index;
+ u32 vpe_pid;
+};
+
+/** \brief Process a vpe parser cli string request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param cmd_in_shmem - pointer to cli command string
+*/
+define cli_request {
+ u32 client_index;
+ u32 context;
+ u64 cmd_in_shmem;
+};
+
+/** \brief vpe parser cli string response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for request
+ @param reply_in_shmem - Reply string from cli processing if any
+*/
+define cli_reply {
+ u32 context;
+ u32 retval;
+ u64 reply_in_shmem;
+};
+
+/** \brief Set max allowed ARP or ip6 neighbor entries request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_ipv6 - neighbor limit if non-zero, else ARP limit
+ @param arp_neighbor_limit - the new limit, defaults are ~ 50k
+*/
+define set_arp_neighbor_limit {
+ u32 client_index;
+ u32 context;
+ u8 is_ipv6;
+ u32 arp_neighbor_limit;
+};
+
+/** \brief Set max allowed ARP or ip6 neighbor entries response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for request
+*/
+define set_arp_neighbor_limit_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief L2 interface patch add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param rx_sw_if_index - receive side interface
+ @param tx_sw_if_index - transmit side interface
+ @param is_add - if non-zero set up the interface patch, else remove it
+*/
+define l2_patch_add_del {
+ u32 client_index;
+ u32 context;
+ u32 rx_sw_if_index;
+ u32 tx_sw_if_index;
+ u8 is_add;
+};
+
+/** \brief L2 interface patch add / del response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define l2_patch_add_del_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPv6 segment routing tunnel add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add the tunnel if non-zero, else delete it
+ @param name[] - tunnel name (len. 64)
+ @param src_address[] -
+ @param dst_address[] -
+ @param dst_mask_width -
+ @param inner_vrf_id -
+ @param outer_vrf_id -
+ @param flags_net_byte_order -
+ @param n_segments -
+ @param n_tags -
+ @param segs_and_tags[] -
+ @param policy_name[] - name of policy to associate this tunnel to (len. 64)
+*/
+define sr_tunnel_add_del {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 name[64];
+ u8 src_address[16];
+ u8 dst_address[16];
+ u8 dst_mask_width;
+ u32 inner_vrf_id;
+ u32 outer_vrf_id;
+ u16 flags_net_byte_order;
+ u8 n_segments;
+ u8 n_tags;
+ u8 segs_and_tags[0];
+ u8 policy_name[64];
+};
+
+/** \brief IPv6 segment routing tunnel add / del response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define sr_tunnel_add_del_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPv6 segment routing policy add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add the tunnel if non-zero, else delete it
+ @param name[] - policy name (len. 64)
+ @param tunnel_names[] -
+*/
+define sr_policy_add_del {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 name[64];
+ u8 tunnel_names[0];
+};
+
+/** \brief IPv6 segment routing policy add / del response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define sr_policy_add_del_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPv6 segment routing multicast map to policy add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add the tunnel if non-zero, else delete it
+ @param multicast_address[] - IP6 multicast address
+ @param policy_name[] = policy name (len.64)
+*/
+define sr_multicast_map_add_del {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 multicast_address[16];
+ u8 policy_name[64];
+};
+
+/** \brief IPv6 segment routing multicast map to policy add / del response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define sr_multicast_map_add_del_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Interface set vpath request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface used to reach neighbor
+ @param enable - if non-zero enable, else disable
+*/
+define sw_interface_set_vpath {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 enable;
+};
+
+/** \brief Interface set vpath response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define sw_interface_set_vpath_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief MPLS Ethernet add / del tunnel request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param vrf_id - vrf_id, only for IP4
+ @param sw_if_index - interface used to reach neighbor
+ @param is_add - add if set, tunnel delete if 0
+ @param dst_mac_address -
+ @param adj_address -
+ @param adj_address_length -
+*/
+define mpls_ethernet_add_del_tunnel {
+ u32 client_index;
+ u32 context;
+ u32 vrf_id;
+ u32 tx_sw_if_index;
+ u8 is_add;
+ u8 l2_only;
+ u8 dst_mac_address[6];
+ u8 adj_address[4];
+ u8 adj_address_length;
+};
+
+/** \brief Reply for MPLS Ethernet add / delete tunnel request
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define mpls_ethernet_add_del_tunnel_reply {
+ u32 context;
+ i32 retval;
+ u32 tunnel_sw_if_index;
+};
+/** \brief MPLS Ethernet add/ del tunnel 2
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param inner_vrf_id -
+ @param outer_vrf_id -
+ @param resolve_attempts -
+ @param resolve_opaque -
+ @param resolve_if_needed -
+ @param is_add -
+ @param adj_address -
+ @param adj_address_length -
+ @param next_hop_ip4_address_in_outer_vrf -
+*/
+define mpls_ethernet_add_del_tunnel_2 {
+ u32 client_index;
+ u32 context;
+ u32 inner_vrf_id;
+ u32 outer_vrf_id;
+ u32 resolve_attempts;
+ u32 resolve_opaque; /* no need to set this */
+ u8 resolve_if_needed;
+ u8 is_add;
+ u8 l2_only;
+ u8 adj_address[4];
+ u8 adj_address_length;
+ u8 next_hop_ip4_address_in_outer_vrf [4];
+};
+
+/** \brief MPLS Ethernet add/ del tunnel 2
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for add /del request
+*/
+define mpls_ethernet_add_del_tunnel_2_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Set L2 XConnect between two interfaces request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param rx_sw_if_index - Receive interface index
+ @param tx_sw_if_index - Transmit interface index
+ @param enable - enable xconnect if not 0, else set to L3 mode
+*/
+define sw_interface_set_l2_xconnect {
+ u32 client_index;
+ u32 context;
+ u32 rx_sw_if_index;
+ u32 tx_sw_if_index;
+ u8 enable;
+};
+
+/** \brief Set L2 XConnect response
+ @param context - sender context, to match reply w/ request
+ @param retval - L2 XConnect request return code
+*/
+define sw_interface_set_l2_xconnect_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Interface bridge mode request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param rx_sw_if_index - the interface
+ @param bd_id - bridge domain id
+ @param bvi - Setup interface as a bvi, bridge mode only
+ @param shg - Shared horizon group, for bridge mode only
+ @param enable - Enable beige mode if not 0, else set to L3 mode
+*/
+define sw_interface_set_l2_bridge {
+ u32 client_index;
+ u32 context;
+ u32 rx_sw_if_index;
+ u32 bd_id;
+ u8 shg;
+ u8 bvi;
+ u8 enable;
+};
+
+/** \brief Interface bridge mode response
+ @param context - sender context, to match reply w/ request
+ @param retval - Bridge mode request return code
+*/
+define sw_interface_set_l2_bridge_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief L2 FIB add entry request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param mac - the entry's mac address
+ @param bd_id - the entry's bridge domain id
+ @param sw_if_index - the interface
+ @param is_add - If non zero add the entry, else delete it
+ @param static_mac -
+ @param filter_mac -
+*/
+define l2fib_add_del {
+ u32 client_index;
+ u32 context;
+ u64 mac;
+ u32 bd_id;
+ u32 sw_if_index;
+ u8 is_add;
+ u8 static_mac;
+ u8 filter_mac;
+};
+
+/** \brief L2 FIB add entry response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the add l2fib entry request
+*/
+define l2fib_add_del_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Set L2 flags request !!! TODO - need more info, feature bits in l2_input.h
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface
+ @param is_set - if non-zero, set the bits, else clear them
+ @param feature_bitmap - non-zero bits to set or clear
+*/
+define l2_flags {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 is_set;
+ u32 feature_bitmap;
+};
+
+/** \brief Set L2 bits response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the set l2 bits request
+*/
+define l2_flags_reply {
+ u32 context;
+ u32 retval;
+ u32 resulting_feature_bitmap;
+};
+
+/** \brief Set bridge flags (such as L2_LEARN, L2_FWD, L2_FLOOD,
+ L2_UU_FLOOD, or L2_ARP_TERM) request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param bd_id - the bridge domain to set the flags for
+ @param is_set - if non-zero, set the flags, else clear them
+ @param feature_bitmap - bits that are non-zero to set or clear
+*/
+define bridge_flags {
+ u32 client_index;
+ u32 context;
+ u32 bd_id;
+ u8 is_set;
+ u32 feature_bitmap;
+};
+
+/** \brief Set bridge flags response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the set bridge flags request
+ @param resulting_feature_bitmap - the feature bitmap value after the request is implemented
+*/
+define bridge_flags_reply {
+ u32 context;
+ u32 retval;
+ u32 resulting_feature_bitmap;
+};
+
+/** \brief Set bridge domain ip to mac entry request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param bd_id - the bridge domain to set the flags for
+ @param is_add - if non-zero, add the entry, else clear it
+ @param is_ipv6 - if non-zero, ipv6 address, else ipv4 address
+ @param mac_address - MAC address
+ @param
+*/
+define bd_ip_mac_add_del {
+ u32 client_index;
+ u32 context;
+ u32 bd_id;
+ u8 is_add;
+ u8 is_ipv6;
+ u8 ip_address[16];
+ u8 mac_address[6];
+};
+
+/** \brief Set bridge domain ip to mac entry response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the set bridge flags request
+*/
+define bd_ip_mac_add_del_reply {
+ u32 context;
+ u32 retval;
+};
+
+/** \brief Add/Delete classification table request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add- if non-zero add the table, else delete it
+ @param table_index - if add, reuturns index of the created table, else specifies the table to delete
+ @param nbuckets - number of buckets when adding a table
+ @param memory_size - memory size when adding a table
+ @param match_n_vectors - number of match vectors
+ @param next_table_index - index of next table
+ @param miss_next_index - index of miss table
+ @param mask[] - match mask
+*/
+define classify_add_del_table {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u32 table_index;
+ u32 nbuckets;
+ u32 memory_size;
+ u32 skip_n_vectors;
+ u32 match_n_vectors;
+ u32 next_table_index;
+ u32 miss_next_index;
+ u8 mask[0];
+};
+
+/** \brief Add/Delete classification table response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the table add/del requst
+ @param new_table_index - for add, returned index of the new table
+ @param skip_n_vectors - for add, returned value of skip_n_vectors in table
+ @param match_n_vectors -for add, returned value of match_n_vectors in table
+*/
+define classify_add_del_table_reply {
+ u32 context;
+ i32 retval;
+ u32 new_table_index;
+ u32 skip_n_vectors;
+ u32 match_n_vectors;
+};
+
+/** \brief Classify add / del session request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add session if non-zero, else delete
+ @param table_index - index of the table to add/del the session, required
+ @param hit_next_index - for add, hit_next_index of new session, required
+ @param opaque_index - for add, opaque_index of new session
+ @param advance -for add, advance value for session
+ @param match[] - for add, match value for session, required
+*/
+define classify_add_del_session {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u32 table_index;
+ u32 hit_next_index;
+ u32 opaque_index;
+ i32 advance;
+ u8 match[0];
+};
+
+/** \brief Classify add / del session response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the add/del session request
+*/
+define classify_add_del_session_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Set/unset the classification table for an interface request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_ipv6 - ipv6 if non-zero, else ipv4
+ @param sw_if_index - interface to associate with the table
+ @param table_index - index of the table, if ~0 unset the table
+*/
+define classify_set_interface_ip_table {
+ u32 client_index;
+ u32 context;
+ u8 is_ipv6;
+ u32 sw_if_index;
+ u32 table_index; /* ~0 => off */
+};
+
+/** \brief Set/unset interface classification table response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code
+*/
+define classify_set_interface_ip_table_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Set/unset l2 classification tables for an interface request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface to set/unset tables for
+ @param ip4_table_index - ip4 index, use ~0 for all 3 indexes to unset
+ @param ip6_table_index - ip6 index
+ @param other_table_index - other index
+*/
+define classify_set_interface_l2_tables {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ /* 3 x ~0 => off */
+ u32 ip4_table_index;
+ u32 ip6_table_index;
+ u32 other_table_index;
+};
+
+/** \brief Set/unset l2 classification tables for an interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define classify_set_interface_l2_tables_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Get node index using name request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param node_name[] - name of the node
+*/
+define get_node_index {
+ u32 client_index;
+ u32 context;
+ u8 node_name[64];
+};
+
+/** \brief Get node index using name request
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+ @param node_index - index of the desired node if found, else ~0
+*/
+define get_node_index_reply {
+ u32 context;
+ i32 retval;
+ u32 node_index;
+};
+
+/** \brief Set the next node for a given node request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param node_name[] - node to add the next node to
+ @param next_name[] - node to add as the next node
+*/
+define add_node_next {
+ u32 client_index;
+ u32 context;
+ u8 node_name[64];
+ u8 next_name[64];
+};
+
+/** \brief IP Set the next node for a given node response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the add next node request
+ @param next_index - the index of the next node if success, else ~0
+*/
+define add_node_next_reply {
+ u32 context;
+ i32 retval;
+ u32 next_index;
+};
+
+/** \brief DHCP Proxy config 2 add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param rx_vrf_id - receive vrf id
+ @param server_vrf_id - server vrf id
+ @param if_ipv6 - ipv6 if non-zero, else ipv4
+ @param is_add - add the config if non-zero, else delete
+ @param insert_circuit_id - option82 suboption 1 fib number
+ @param dhcp_server[] - server address
+ @param dhcp_src_address[] - <fix this, need details>
+*/
+define dhcp_proxy_config_2 {
+ u32 client_index;
+ u32 context;
+ u32 rx_vrf_id;
+ u32 server_vrf_id;
+ u8 is_ipv6;
+ u8 is_add;
+ u8 insert_circuit_id;
+ u8 dhcp_server[16];
+ u8 dhcp_src_address[16];
+};
+
+/** \brief DHCP Proxy config 2 add / del response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for request
+*/
+define dhcp_proxy_config_2_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief l2tpv3 tunnel interface create request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param client_address - remote client tunnel ip address
+ @param client_address - local tunnel ip address
+ @param is_ipv6 - ipv6 if non-zero, else ipv4
+ @param local_session_id - local tunnel session id
+ @param remote_session_id - remote tunnel session id
+ @param local_cookie - local tunnel cookie
+ @param l2_sublayer_present - l2 sublayer is present in packets if non-zero
+*/
+define l2tpv3_create_tunnel {
+ u32 client_index;
+ u32 context;
+ u8 client_address [16];
+ u8 our_address [16];
+ u8 is_ipv6;
+ u32 local_session_id;
+ u32 remote_session_id;
+ u64 local_cookie;
+ u64 remote_cookie;
+ u8 l2_sublayer_present;
+};
+
+/** \brief l2tpv3 tunnel interface create response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+ @param sw_if_index - index of the new tunnel interface
+*/
+define l2tpv3_create_tunnel_reply {
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+define l2tpv3_set_tunnel_cookies {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u64 new_local_cookie;
+ u64 new_remote_cookie;
+};
+
+/** \brief L2TP tunnel set cookies response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define l2tpv3_set_tunnel_cookies_reply {
+ u32 context;
+ i32 retval;
+};
+
+manual_java define sw_if_l2tpv3_tunnel_details {
+ u32 context;
+ u32 sw_if_index;
+ u8 interface_name[64];
+ u8 client_address [16];
+ u8 our_address [16];
+ u32 local_session_id;
+ u32 remote_session_id;
+ u64 local_cookie[2];
+ u64 remote_cookie;
+ u8 l2_sublayer_present;
+};
+
+define sw_if_l2tpv3_tunnel_dump {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief L2 fib clear table request, clear all mac entries in the l2 fib
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define l2_fib_clear_table {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief L2 fib clear table response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define l2_fib_clear_table_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief L2 interface ethernet flow point filtering enable/disable request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface to enable/disable filtering on
+ @param enable_disable - if non-zero enable filtering, else disable
+*/
+define l2_interface_efp_filter {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 enable_disable;
+};
+
+/** \brief L2 interface ethernet flow point filtering response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define l2_interface_efp_filter_reply {
+ u32 context;
+ i32 retval;
+};
+
+define l2tpv3_interface_enable_disable {
+ u32 client_index;
+ u32 context;
+ u8 enable_disable;
+ u32 sw_if_index;
+};
+
+define l2tpv3_interface_enable_disable_reply {
+ u32 context;
+ i32 retval;
+};
+
+define l2tpv3_set_lookup_key {
+ u32 client_index;
+ u32 context;
+ /* 0 = ip6 src_address, 1 = ip6 dst_address, 2 = session_id */
+ u8 key;
+};
+
+define l2tpv3_set_lookup_key_reply {
+ u32 context;
+ i32 retval;
+};
+
+define vxlan_add_del_tunnel {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 is_ipv6;
+ u8 src_address[16];
+ u8 dst_address[16];
+ u32 encap_vrf_id;
+ u32 decap_next_index;
+ u32 vni;
+};
+
+define vxlan_add_del_tunnel_reply {
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+manual_java define vxlan_tunnel_dump {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+};
+
+manual_java define vxlan_tunnel_details {
+ u32 context;
+ u32 sw_if_index;
+ u8 src_address[16];
+ u8 dst_address[16];
+ u32 encap_vrf_id;
+ u32 decap_next_index;
+ u32 vni;
+ u8 is_ipv6;
+};
+
+define gre_add_del_tunnel {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u32 src_address;
+ u32 dst_address;
+ u32 outer_table_id;
+};
+
+define gre_add_del_tunnel_reply {
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+manual_java define gre_tunnel_dump {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+};
+
+manual_java define gre_tunnel_details {
+ u32 context;
+ u32 sw_if_index;
+ u32 src_address;
+ u32 dst_address;
+ u32 outer_table_id;
+};
+
+/** \brief L2 interface vlan tag rewrite configure request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface the operation is applied to
+ @param vtr_op - Choose from l2_vtr_op_t enum values
+ @param push_dot1q - first pushed flag dot1q id set, else dot1ad
+ @param tag1 - Needed for any push or translate vtr op
+ @param tag2 - Needed for any push 2 or translate x-2 vtr ops
+*/
+define l2_interface_vlan_tag_rewrite {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 vtr_op;
+ u32 push_dot1q; // ethertype of first pushed tag is dot1q/dot1ad
+ u32 tag1; // first pushed tag
+ u32 tag2; // second pushed tag
+};
+
+/** \brief L2 interface vlan tag rewrite response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define l2_interface_vlan_tag_rewrite_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief vhost-user interface create request
+ @param client_index - opaque cookie to identify the sender
+ @param is_server - our side is socket server
+ @param sock_filename - unix socket filename, used to speak with frontend
+ @param use_custom_mac - enable or disable the use of the provided hardware address
+ @param mac_address - hardware address to use if 'use_custom_mac' is set
+*/
+define create_vhost_user_if {
+ u32 client_index;
+ u32 context;
+ u8 is_server;
+ u8 sock_filename[256];
+ u8 renumber;
+ u32 custom_dev_instance;
+ u8 use_custom_mac;
+ u8 mac_address[6];
+};
+
+/** \brief vhost-user interface create response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+ @param sw_if_index - interface the operation is applied to
+*/
+define create_vhost_user_if_reply {
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief vhost-user interface modify request
+ @param client_index - opaque cookie to identify the sender
+ @param is_server - our side is socket server
+ @param sock_filename - unix socket filename, used to speak with frontend
+*/
+define modify_vhost_user_if {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 is_server;
+ u8 sock_filename[256];
+ u8 renumber;
+ u32 custom_dev_instance;
+};
+
+/** \brief vhost-user interface modify response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define modify_vhost_user_if_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief vhost-user interface delete request
+ @param client_index - opaque cookie to identify the sender
+*/
+define delete_vhost_user_if {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+};
+
+/** \brief vhost-user interface delete response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define delete_vhost_user_if_reply {
+ u32 context;
+ i32 retval;
+};
+
+define create_subif {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 sub_id;
+
+ /* These fields map directly onto the subif template */
+ u8 no_tags;
+ u8 one_tag;
+ u8 two_tags;
+ u8 dot1ad; // 0 = dot1q, 1=dot1ad
+ u8 exact_match;
+ u8 default_sub;
+ u8 outer_vlan_id_any;
+ u8 inner_vlan_id_any;
+ u16 outer_vlan_id;
+ u16 inner_vlan_id;
+};
+
+define create_subif_reply {
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief show version
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define show_version {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief show version response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+ @param program - name of the program (vpe)
+ @param version - version of the program
+ @param build_directory - root of the workspace where the program was built
+*/
+manual_java define show_version_reply {
+ u32 context;
+ i32 retval;
+ u8 program[32];
+ u8 version[32];
+ u8 build_date[32];
+ u8 build_directory[256];
+};
+
+/** \brief Vhost-user interface details structure (fix this)
+ @param sw_if_index - index of the interface
+ @param interface_name - name of interface
+ @param virtio_net_hdr_sz - net header size
+ @param features - interface features
+ @param is_server - vhost-user server socket
+ @param sock_filename - socket filename
+ @param num_regions - number of used memory regions
+*/
+manual_java define sw_interface_vhost_user_details {
+ u32 context;
+ u32 sw_if_index;
+ u8 interface_name[64];
+ u32 virtio_net_hdr_sz;
+ u64 features;
+ u8 is_server;
+ u8 sock_filename[256];
+ u32 num_regions;
+ i32 sock_errno;
+};
+
+/* works */
+define sw_interface_vhost_user_dump {
+ u32 client_index;
+ u32 context;
+};
+
+manual_java define ip_address_details {
+ u32 client_index;
+ u32 context;
+ u8 ip[16];
+ u8 prefix_length;
+};
+
+manual_java define ip_address_dump {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 is_ipv6;
+};
+
+manual_java define ip_details {
+ u32 sw_if_index;
+ u32 context;
+};
+
+define ip_dump {
+ u32 client_index;
+ u32 context;
+ u8 is_ipv6;
+};
+
+/** \brief l2 fib table entry structure
+ @param bd_id - the l2 fib / bridge domain table id
+ @param mac - the entry's mac address
+ @param sw_if_index - index of the interface
+ @param static_mac - the entry is statically configured.
+ @param filter_mac - the entry is a mac filter entry.
+ @param bvi_mac - the mac address is a bridge virtual interface
+*/
+manual_java define l2_fib_table_entry {
+ u32 context;
+ u32 bd_id;
+ u64 mac;
+ u32 sw_if_index;
+ u8 static_mac;
+ u8 filter_mac;
+ u8 bvi_mac;
+};
+
+/** \brief Dump l2 fib (aka bridge domain) table
+ @param client_index - opaque cookie to identify the sender
+ @param bd_id - the l2 fib / bridge domain table identifier
+*/
+manual_java define l2_fib_table_dump {
+ u32 client_index;
+ u32 context;
+ u32 bd_id;
+};
+
+define vxlan_gpe_add_del_tunnel {
+ u32 client_index;
+ u32 context;
+ u8 is_ipv6;
+ u8 local[16];
+ u8 remote[16];
+ u32 encap_vrf_id;
+ u32 decap_vrf_id;
+ u8 protocol;
+ u32 vni;
+ u8 is_add;
+};
+
+define vxlan_gpe_add_del_tunnel_reply {
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+manual_java define vxlan_gpe_tunnel_dump {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+};
+
+manual_java define vxlan_gpe_tunnel_details {
+ u32 context;
+ u32 sw_if_index;
+ u8 local[16];
+ u8 remote[16];
+ u32 vni;
+ u8 protocol;
+ u32 encap_vrf_id;
+ u32 decap_vrf_id;
+ u8 is_ipv6;
+};
+
+/** \brief add or delete locator_set
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero, else delete
+ @param locator_set_name - locator name
+*/
+define lisp_add_del_locator_set {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 locator_set_name[64];
+};
+
+/** \brief Reply for locator_set add/del
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_add_del_locator_set_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief add or delete locator for locator_set
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero, else delete
+ @param locator_set_name - name of locator_set to add/del locator
+ @param sw_if_index - index of the interface
+ @param priority - priority of the lisp locator
+ @param weight - weight of the lisp locator
+*/
+define lisp_add_del_locator {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 locator_set_name[64];
+ u32 sw_if_index;
+ u8 priority;
+ u8 weight;
+};
+
+/** \brief Reply for locator add/del
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_add_del_locator_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief add or delete lisp eid-table
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero, else delete
+ @param is_ipv6 - if non-zero the address is ipv6, else ipv4
+ @param ip_address - array of address bytes
+ @param prefix_len - prefix len
+ @param locator_set_name - name of locator_set to add/del eid-table
+ @param vni - vitual network instance
+*/
+define lisp_add_del_local_eid {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 is_ipv6;
+ u8 ip_address[16];
+ u8 prefix_len;
+ u8 locator_set_name[64];
+ u32 vni;
+};
+
+/** \brief Reply for local_eid add/del
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_add_del_local_eid_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief add or delete lisp gpe maptunel
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero, else delete
+ @param eid_is_ipv6 - if non-zero the address is ipv6, else ipv4
+ @param eid_ip_address - array of address bytes
+ @param eid_prefix_len - prefix len
+ @param address_is_ipv6 - if non-zero the address is ipv6, else ipv4
+ @param source_ip_address - array of address bytes
+ @param destination_ip_address - array of address bytes
+*/
+define lisp_gpe_add_del_fwd_entry {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 eid_is_ipv6;
+ u8 eid_ip_address[16];
+ u8 eid_prefix_len;
+ u8 address_is_ipv6;
+ u8 source_ip_address[16];
+ u8 destination_ip_address[16];
+};
+
+/** \brief Reply for gpe_fwd_entry add/del
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_gpe_add_del_fwd_entry_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief add or delete map-resolver
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero, else delete
+ @param is_ipv6 - if non-zero the address is ipv6, else ipv4
+ @param ip_address - array of address bytes
+*/
+define lisp_add_del_map_resolver {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 is_ipv6;
+ u8 ip_address[16];
+};
+
+/** \brief Reply for map_resolver add/del
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_add_del_map_resolver_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief enable or disable lisp-gpe protocol
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_en - enable protocol if non-zero, else disable
+*/
+define lisp_gpe_enable_disable {
+ u32 client_index;
+ u32 context;
+ u8 is_en;
+};
+
+/** \brief Reply for gpe enable/disable
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_gpe_enable_disable_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief enable or disable LISP feature
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_en - enable protocol if non-zero, else disable
+*/
+define lisp_enable_disable {
+ u32 client_index;
+ u32 context;
+ u8 is_en;
+};
+
+/** \brief Reply for gpe enable/disable
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_enable_disable_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief add or delete gpe_iface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero, else delete
+*/
+define lisp_gpe_add_del_iface {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u32 table_id;
+ u32 vni;
+};
+
+/** \brief Reply for gpe_iface add/del
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_gpe_add_del_iface_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief configure or disable LISP PITR node
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param ls_name - locator set name
+ @param is_add - add locator set if non-zero, else disable pitr
+*/
+define lisp_pitr_set_locator_set {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 ls_name[64];
+};
+
+/** \brief Reply for lisp_pitr_set_locator_set
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_pitr_set_locator_set_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief add or delete remote static mapping
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero, else delete
+ @param del_all - if set, delete all remote mappings
+ @param vni - virtual network instance
+ @param action - negative map-reply action
+ @param eid_is_ip4 - ipv4/6 of source and destination EIDs
+ @param deid - destination EID
+ @param seid - source EID
+ @param rloc_num - number of remote locators
+ @param rlocs - remote locator data
+*/
+define lisp_add_del_remote_mapping {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 del_all;
+ u32 vni;
+ u8 action;
+ u8 eid_is_ip4;
+ u8 deid[16];
+ u8 seid[16];
+ u8 deid_len;
+ u8 seid_len;
+ u32 rloc_num;
+ u8 rlocs[0];
+};
+
+/** \brief Reply for lisp_add_del_remote_mapping
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+
+define lisp_add_del_remote_mapping_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief add or delete map request itr rlocs
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add address if non-zero, else delete
+ @param locator_set_name - locator set name
+*/
+define lisp_add_del_map_request_itr_rlocs {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 locator_set_name[64];
+};
+
+/** \brief Reply for lisp_add_del_map_request_itr_rlocs
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+
+define lisp_add_del_map_request_itr_rlocs_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief map/unmap vni to vrf
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add or delete mapping
+ @param vni - virtual network id
+ @param vrf - vrf
+*/
+define lisp_eid_table_add_del_map {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u32 vni;
+ u32 vrf;
+};
+
+/** \brief Reply for lisp_eid_table_add_del_map
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define lisp_eid_table_add_del_map_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief LISP locator_set status
+ @param locator_set_name - name of the locator_set
+ @param sw_if_index - sw_if_index of the locator
+ @param priority - locator priority
+ @param weight - locator weight
+ */
+manual_java define lisp_locator_set_details {
+ u32 context;
+ u8 local;
+ u8 locator_set_name[64];
+ u32 sw_if_index;
+ u8 is_ipv6;
+ u8 ip_address[16];
+ u8 prefix_len;
+ u8 priority;
+ u8 weight;
+};
+
+/** \brief Request for locator_set summary status
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ */
+define lisp_locator_set_dump {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief LISP local eid table status
+ @param locator_set_name - name of the locator_set
+ @param eid_is_ipv6 - if non-zero the address is ipv6, else ipv4
+ @param eid_ip_address - array of address bytes
+ @param eid_prefix_len - prefix len
+ */
+manual_java define lisp_local_eid_table_details {
+ u32 context;
+ u8 locator_set_name[64];
+ u8 eid_is_ipv6;
+ u32 vni;
+ u8 eid_ip_address[16];
+ u8 eid_prefix_len;
+};
+
+/** \brief Request for local eid table summary status
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ */
+define lisp_local_eid_table_dump {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Shows relationship between vni and vrf
+ @param vrf - VRF index
+ @param vni - vitual network instance
+ */
+manual_java define lisp_eid_table_map_details {
+ u32 context;
+ u32 vni;
+ u32 vrf;
+};
+
+/** \brief Request for lisp_eid_table_map_details
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ */
+define lisp_eid_table_map_dump {
+ u32 client_index;
+ u32 context;
+};
+
+manual_java define lisp_gpe_tunnel_details {
+ u32 context;
+ u32 tunnels;
+ u8 is_ipv6;
+ u8 source_ip[16];
+ u8 destination_ip[16];
+ u32 encap_fib_id;
+ u32 decap_fib_id;
+ u32 dcap_next;
+ u8 lisp_ver;
+ u8 next_protocol;
+ u8 flags;
+ u8 ver_res;
+ u8 res;
+ u32 iid;
+};
+
+/** \brief Request for gpe tunnel summary status
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ */
+define lisp_gpe_tunnel_dump {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief LISP map resolver status
+ @param locator_set_name - name of the locator_set
+ @param is_ipv6 - if non-zero the address is ipv6, else ipv4
+ @param ip_address - array of address bytes
+ */
+manual_java define lisp_map_resolver_details {
+ u32 context;
+ u8 is_ipv6;
+ u8 ip_address[16];
+};
+
+/** \brief Request for map resolver summary status
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ */
+define lisp_map_resolver_dump {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Status of lisp-gpe protocol, enable or disable
+ @param context - sender context, to match reply w/ request
+ @param is_en - enable protocol if non-zero, else disable
+*/
+manual_java define lisp_enable_disable_status_details {
+ u32 context;
+ u8 feature_status;
+ u8 gpe_status;
+};
+
+/** \brief Request for lisp-gpe protocol status
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define lisp_enable_disable_status_dump {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Get LISP map request itr rlocs status
+ @param context - sender context, to match reply w/ request
+ @param locator_set_name - name of the locator_set
+ */
+define lisp_get_map_request_itr_rlocs {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Request for map request itr rlocs summary status
+ */
+define lisp_get_map_request_itr_rlocs_reply {
+ u32 context;
+ i32 retval;
+ u8 locator_set_name[64];
+};
+
+/* Gross kludge, DGMS */
+define interface_name_renumber {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 new_show_dev_instance;
+};
+
+define interface_name_renumber_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Register for ip4 arp resolution events
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param enable_disable - 1 => register for events, 0 => cancel registration
+ @param pid - sender's pid
+ @param address - the exact ip4 address of interest
+*/
+define want_ip4_arp_events {
+ u32 client_index;
+ u32 context;
+ u8 enable_disable;
+ u32 pid;
+ u32 address;
+};
+
+/** \brief Reply for interface events registration
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define want_ip4_arp_events_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Tell client about an ip4 arp resolution event
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param address - the exact ip4 address of interest
+ @param pid - client pid registered to receive notification
+ @param sw_if_index - interface which received ARP packet
+ @param new_mac - the new mac address
+*/
+define ip4_arp_event {
+ u32 client_index;
+ u32 context;
+ u32 address;
+ u32 pid;
+ u32 sw_if_index;
+ u8 new_mac[6];
+};
+
+/** \brief L2 bridge domain add or delete request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param bd_id - the bridge domain to create
+ @param flood - enable/disable bcast/mcast flooding in the bd
+ @param uu_flood - enable/disable uknown unicast flood in the bd
+ @param forward - enable/disable forwarding on all interfaces in the bd
+ @param learn - enable/disable learning on all interfaces in the bd
+ @param arp_term - enable/disable arp termination in the bd
+ @param is_add - add or delete flag
+*/
+define bridge_domain_add_del {
+ u32 client_index;
+ u32 context;
+ u32 bd_id;
+ u8 flood;
+ u8 uu_flood;
+ u8 forward;
+ u8 learn;
+ u8 arp_term;
+ u8 is_add;
+};
+
+/** \brief L2 bridge domain add or delete response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the set bridge flags request
+*/
+define bridge_domain_add_del_reply {
+ u32 context;
+ u32 retval;
+};
+
+/** \brief L2 bridge domain request operational state details
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param bd_id - the bridge domain id desired or ~0 to request all bds
+*/
+manual_java define bridge_domain_dump {
+ u32 client_index;
+ u32 context;
+ u32 bd_id;
+};
+
+/** \brief L2 bridge domain operational state response
+ @param bd_id - the bridge domain id
+ @param flood - bcast/mcast flooding state on all interfaces in the bd
+ @param uu_flood - uknown unicast flooding state on all interfaces in the bd
+ @param forward - forwarding state on all interfaces in the bd
+ @param learn - learning state on all interfaces in the bd
+ @param arp_term - arp termination state on all interfaces in the bd
+ @param n_sw_ifs - number of sw_if_index's in the domain
+*/
+manual_java define bridge_domain_details {
+ u32 context;
+ u32 bd_id;
+ u8 flood;
+ u8 uu_flood;
+ u8 forward;
+ u8 learn;
+ u8 arp_term;
+ u32 bvi_sw_if_index;
+ u32 n_sw_ifs;
+};
+
+/** \brief L2 bridge domain sw interface operational state response
+ @param bd_id - the bridge domain id
+ @param sw_if_index - sw_if_index in the domain
+ @param shg - split horizon group for the interface
+*/
+manual_java define bridge_domain_sw_if_details {
+ u32 context;
+ u32 bd_id;
+ u32 sw_if_index;
+ u8 shg;
+};
+
+/** \brief DHCP Client config add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - index of the interface for DHCP client
+ @param hostname - hostname
+ @param is_add - add the config if non-zero, else delete
+ @param want_dhcp_event - DHCP event sent to the sender
+ via dhcp_compl_event API message if non-zero
+ @param pid - sender's pid
+*/
+define dhcp_client_config {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 hostname[64];
+ u8 is_add;
+ u8 want_dhcp_event;
+ u32 pid;
+};
+
+/** \brief DHCP Client config response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define dhcp_client_config_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Set/unset input ACL interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface to set/unset input ACL
+ @param ip4_table_index - ip4 classify table index (~0 for skip)
+ @param ip6_table_index - ip6 classify table index (~0 for skip)
+ @param l2_table_index - l2 classify table index (~0 for skip)
+ @param is_add - Set input ACL if non-zero, else unset
+ Note: User is recommeneded to use just one valid table_index per call.
+ (ip4_table_index, ip6_table_index, or l2_table_index)
+*/
+define input_acl_set_interface {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 ip4_table_index;
+ u32 ip6_table_index;
+ u32 l2_table_index;
+ u8 is_add;
+};
+
+/** \brief Set/unset input ACL interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define input_acl_set_interface_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPsec: Add/delete Security Policy Database
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add SPD if non-zero, else delete
+ @param spd_id - SPD instance id (control plane allocated)
+*/
+
+define ipsec_spd_add_del {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u32 spd_id;
+};
+
+/** \brief Reply for IPsec: Add/delete Security Policy Database entry
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+
+define ipsec_spd_add_del_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPsec: Add/delete SPD from interface
+
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add security mode if non-zero, else delete
+ @param sw_if_index - index of the interface
+ @param spd_id - SPD instance id to use for lookups
+*/
+
+
+define ipsec_interface_add_del_spd {
+ u32 client_index;
+ u32 context;
+
+ u8 is_add;
+ u32 sw_if_index;
+ u32 spd_id;
+};
+
+/** \brief Reply for IPsec: Add/delete SPD from interface
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+
+define ipsec_interface_add_del_spd_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPsec: Add/delete Security Policy Database entry
+
+ See RFC 4301, 4.4.1.1 on how to match packet to selectors
+
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add SPD if non-zero, else delete
+ @param spd_id - SPD instance id (control plane allocated)
+ @param priority - priority of SPD entry (non-unique value). Used to order SPD matching - higher priorities match before lower
+ @param is_outbound - entry applies to outbound traffic if non-zero, otherwise applies to inbound traffic
+ @param is_ipv6 - remote/local address are IPv6 if non-zero, else IPv4
+ @param remote_address_start - start of remote address range to match
+ @param remote_address_stop - end of remote address range to match
+ @param local_address_start - start of local address range to match
+ @param local_address_stop - end of local address range to match
+ @param protocol - protocol type to match [0 means any]
+ @param remote_port_start - start of remote port range to match ...
+ @param remote_port_stop - end of remote port range to match [0 to 65535 means ANY, 65535 to 0 means OPAQUE]
+ @param local_port_start - start of local port range to match ...
+ @param local_port_stop - end of remote port range to match [0 to 65535 means ANY, 65535 to 0 means OPAQUE]
+ @param policy - 0 = bypass (no IPsec processing), 1 = discard (discard packet with ICMP processing), 2 = resolve (send request to control plane for SA resolving, and discard without ICMP processing), 3 = protect (apply IPsec policy using following parameters)
+ @param sa_id - SAD instance id (control plane allocated)
+
+*/
+
+define ipsec_spd_add_del_entry {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+
+ u32 spd_id;
+ i32 priority;
+ u8 is_outbound;
+
+ // Selector
+ u8 is_ipv6;
+ u8 is_ip_any;
+ u8 remote_address_start[16];
+ u8 remote_address_stop[16];
+ u8 local_address_start[16];
+ u8 local_address_stop[16];
+
+ u8 protocol;
+
+ u16 remote_port_start;
+ u16 remote_port_stop;
+ u16 local_port_start;
+ u16 local_port_stop;
+
+ // Policy
+ u8 policy;
+ u32 sa_id;
+};
+
+/** \brief Reply for IPsec: Add/delete Security Policy Database entry
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+
+define ipsec_spd_add_del_entry_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPsec: Add/delete Security Association Database entry
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add SAD entry if non-zero, else delete
+
+ @param sad_id - sad id
+
+ @param spi - security parameter index
+
+ @param protocol - 0 = AH, 1 = ESP
+
+ @param crypto_algorithm - 0 = Null, 1 = AES-CBC-128, 2 = AES-CBC-192, 3 = AES-CBC-256, 4 = 3DES-CBC
+ @param crypto_key_length - length of crypto_key in bytes
+ @param crypto_key - crypto keying material
+
+ @param integrity_algorithm - 0 = None, 1 = MD5-96, 2 = SHA1-96, 3 = SHA-256, 4 = SHA-384, 5=SHA-512
+ @param integrity_key_length - length of integrity_key in bytes
+ @param integrity_key - integrity keying material
+
+ @param use_extended_sequence_number - use ESN when non-zero
+
+ @param is_tunnel - IPsec tunnel mode if non-zero, else transport mode
+ @param is_tunnel_ipv6 - IPsec tunnel mode is IPv6 if non-zero, else IPv4 tunnel only valid if is_tunnel is non-zero
+ @param tunnel_src_address - IPsec tunnel source address IPv6 if is_tunnel_ipv6 is non-zero, else IPv4. Only valid if is_tunnel is non-zero
+ @param tunnel_dst_address - IPsec tunnel destination address IPv6 if is_tunnel_ipv6 is non-zero, else IPv4. Only valid if is_tunnel is non-zero
+
+ To be added:
+ Anti-replay
+ IPsec tunnel address copy mode (to support GDOI)
+ */
+
+define ipsec_sad_add_del_entry {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+
+ u32 sad_id;
+
+ u32 spi;
+
+ u8 protocol;
+
+ u8 crypto_algorithm;
+ u8 crypto_key_length;
+ u8 crypto_key[128];
+
+ u8 integrity_algorithm;
+ u8 integrity_key_length;
+ u8 integrity_key[128];
+
+ u8 use_extended_sequence_number;
+
+ u8 is_tunnel;
+ u8 is_tunnel_ipv6;
+ u8 tunnel_src_address[16];
+ u8 tunnel_dst_address[16];
+};
+
+/** \brief Reply for IPsec: Add/delete Security Association Database entry
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+
+define ipsec_sad_add_del_entry_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPsec: Update Security Association keys
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+
+ @param sa_id - sa id
+
+ @param crypto_key_length - length of crypto_key in bytes
+ @param crypto_key - crypto keying material
+
+ @param integrity_key_length - length of integrity_key in bytes
+ @param integrity_key - integrity keying material
+*/
+
+define ipsec_sa_set_key {
+ u32 client_index;
+ u32 context;
+
+ u32 sa_id;
+
+ u8 crypto_key_length;
+ u8 crypto_key[128];
+
+ u8 integrity_key_length;
+ u8 integrity_key[128];
+};
+
+/** \brief Reply for IPsec: Update Security Association keys
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+
+define ipsec_sa_set_key_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IKEv2: Add/delete profile
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+
+ @param name - IKEv2 profile name
+ @param is_add - Add IKEv2 profile if non-zero, else delete
+*/
+define ikev2_profile_add_del {
+ u32 client_index;
+ u32 context;
+
+ u8 name[64];
+ u8 is_add;
+};
+
+/** \brief Reply for IKEv2: Add/delete profile
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define ikev2_profile_add_del_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IKEv2: Set IKEv2 profile authentication method
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+
+ @param name - IKEv2 profile name
+ @param auth_method - IKEv2 authentication method (shared-key-mic/rsa-sig)
+ @param is_hex - Authentication data in hex format if non-zero, else string
+ @param data_len - Authentication data length
+ @param data - Authentication data (for rsa-sig cert file path)
+*/
+define ikev2_profile_set_auth {
+ u32 client_index;
+ u32 context;
+
+ u8 name[64];
+ u8 auth_method;
+ u8 is_hex;
+ u32 data_len;
+ u8 data[0];
+};
+
+/** \brief Reply for IKEv2: Set IKEv2 profile authentication method
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define ikev2_profile_set_auth_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IKEv2: Set IKEv2 profile local/remote identification
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+
+ @param name - IKEv2 profile name
+ @param is_local - Identification is local if non-zero, else remote
+ @param id_type - Identification type
+ @param data_len - Identification data length
+ @param data - Identification data
+*/
+define ikev2_profile_set_id {
+ u32 client_index;
+ u32 context;
+
+ u8 name[64];
+ u8 is_local;
+ u8 id_type;
+ u32 data_len;
+ u8 data[0];
+};
+
+/** \brief Reply for IKEv2:
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define ikev2_profile_set_id_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IKEv2: Set IKEv2 profile traffic selector parameters
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+
+ @param name - IKEv2 profile name
+ @param is_local - Traffic selector is local if non-zero, else remote
+ @param proto - Traffic selector IP protocol (if zero not relevant)
+ @param start_port - The smallest port number allowed by traffic selector
+ @param end_port - The largest port number allowed by traffic selector
+ @param start_addr - The smallest address included in traffic selector
+ @param end_addr - The largest address included in traffic selector
+*/
+define ikev2_profile_set_ts {
+ u32 client_index;
+ u32 context;
+
+ u8 name[64];
+ u8 is_local;
+ u8 proto;
+ u16 start_port;
+ u16 end_port;
+ u32 start_addr;
+ u32 end_addr;
+};
+
+/** \brief Reply for IKEv2: Set IKEv2 profile traffic selector parameters
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define ikev2_profile_set_ts_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IKEv2: Set IKEv2 local RSA private key
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+
+ @param key_file - Key file absolute path
+*/
+define ikev2_set_local_key {
+ u32 client_index;
+ u32 context;
+
+ u8 key_file[256];
+};
+
+/** \brief Reply for IKEv2: Set IKEv2 local key
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define ikev2_set_local_key_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Tell client about a DHCP completion event
+ @param client_index - opaque cookie to identify the sender
+ @param pid - client pid registered to receive notification
+ @param is_ipv6 - if non-zero the address is ipv6, else ipv4
+ @param host_address - Host IP address
+ @param router_address - Router IP address
+ @param host_mac - Host MAC address
+*/
+manual_java define dhcp_compl_event {
+ u32 client_index;
+ u32 pid;
+ u8 hostname[64];
+ u8 is_ipv6;
+ u8 host_address[16];
+ u8 router_address[16];
+ u8 host_mac[6];
+};
+
+/** \brief Add MAP domains
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param ip6_prefix - Rule IPv6 prefix
+ @param ip4_prefix - Rule IPv4 prefix
+ @param ip6_src - MAP domain IPv6 BR address / Tunnel source
+ @param ip6_prefix_len - Rule IPv6 prefix length
+ @param ip4_prefix_len - Rule IPv4 prefix length
+ @param ea_bits_len - Embedded Address bits length
+ @param psid_offset - Port Set Identifider (PSID) offset
+ @param psid_length - PSID length
+ @param is_translation - MAP-E / MAP-T
+ @param mtu - MTU
+*/
+define map_add_domain {
+ u32 client_index;
+ u32 context;
+ u8 ip6_prefix[16];
+ u8 ip4_prefix[4];
+ u8 ip6_src[16];
+ u8 ip6_prefix_len;
+ u8 ip4_prefix_len;
+ u8 ip6_src_prefix_len;
+ u8 ea_bits_len;
+ u8 psid_offset;
+ u8 psid_length;
+ u8 is_translation;
+ u16 mtu;
+};
+
+/** \brief Reply for MAP domain add
+ @param context - returned sender context, to match reply w/ request
+ @param index - MAP domain index
+ @param retval - return code
+*/
+define map_add_domain_reply {
+ u32 context;
+ u32 index;
+ i32 retval;
+};
+
+/** \brief Delete MAP domain
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param index - MAP Domain index
+*/
+define map_del_domain {
+ u32 client_index;
+ u32 context;
+ u32 index;
+};
+
+/** \brief Reply for MAP domain del
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define map_del_domain_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Add or Delete MAP rule from a domain (Only used for shared IPv4 per subscriber)
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param index - MAP Domain index
+ @param is_add - If 1 add rule, if 0 delete rule
+ @param ip6_dst - MAP CE IPv6 address
+ @param psid - Rule PSID
+*/
+define map_add_del_rule {
+ u32 client_index;
+ u32 context;
+ u32 index;
+ u32 is_add;
+ u8 ip6_dst[16];
+ u16 psid;
+};
+
+/** \brief Reply for MAP rule add/del
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define map_add_del_rule_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Get list of map domains
+ @param client_index - opaque cookie to identify the sender
+*/
+define map_domain_dump {
+ u32 client_index;
+ u32 context;
+};
+
+manual_java define map_domain_details {
+ u32 context;
+ u32 domain_index;
+ u8 ip6_prefix[16];
+ u8 ip4_prefix[4];
+ u8 ip6_src[16];
+ u8 ip6_prefix_len;
+ u8 ip4_prefix_len;
+ u8 ip6_src_len;
+ u8 ea_bits_len;
+ u8 psid_offset;
+ u8 psid_length;
+ u8 flags;
+ u16 mtu;
+ u8 is_translation;
+};
+
+define map_rule_dump {
+ u32 client_index;
+ u32 context;
+ u32 domain_index;
+};
+
+manual_java define map_rule_details {
+ u32 context;
+ u8 ip6_dst[16];
+ u16 psid;
+};
+
+/** \brief Request for a single block of summary stats
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define map_summary_stats {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Reply for map_summary_stats request
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for request
+ @param total_bindings -
+ @param total_pkts -
+ @param total_ip4_fragments -
+ @param total_security_check -
+*/
+define map_summary_stats_reply {
+ u32 context;
+ i32 retval;
+ u64 total_bindings;
+ u64 total_pkts[2];
+ u64 total_bytes[2];
+ u64 total_ip4_fragments;
+ u64 total_security_check[2];
+};
+
+/** \brief cop: enable/disable junk filtration features on an interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_inded - desired interface
+ @param enable_disable - 1 => enable, 0 => disable
+*/
+
+define cop_interface_enable_disable {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 enable_disable;
+};
+
+/** \brief cop: interface enable/disable junk filtration reply
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+
+define cop_interface_enable_disable_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief cop: enable/disable whitelist filtration features on an interface
+ Note: the supplied fib_id must match in order to remove the feature!
+
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface handle, physical interfaces only
+ @param fib_id - fib identifier for the whitelist / blacklist fib
+ @param ip4 - 1 => enable ip4 filtration, 0=> disable ip4 filtration
+ @param ip6 - 1 => enable ip6 filtration, 0=> disable ip6 filtration
+ @param default_cop - 1 => enable non-ip4, non-ip6 filtration 0=> disable it
+*/
+
+define cop_whitelist_enable_disable {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 fib_id;
+ u8 ip4;
+ u8 ip6;
+ u8 default_cop;
+};
+
+/** \brief cop: interface enable/disable junk filtration reply
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+
+define cop_whitelist_enable_disable_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief get_node_graph - get a copy of the vpp node graph
+ including the current set of graph arcs.
+
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+
+define get_node_graph {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief get_node_graph_reply
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+ @param reply_in_shmem - result from vlib_node_serialize, in shared
+ memory. Process with vlib_node_unserialize, remember to switch
+ heaps and free the result.
+*/
+
+define get_node_graph_reply {
+ u32 context;
+ i32 retval;
+ u64 reply_in_shmem;
+};
+
+/** \brief Clear interface statistics
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - index of the interface to clear statistics
+*/
+define sw_interface_clear_stats {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+};
+
+/** \brief Reply to sw_interface_clear_stats
+ @param context - sender context which was passed in the request
+ @param retval - return code of the set flags request
+*/
+define sw_interface_clear_stats_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IOAM Trace : Set TRACE profile
+ @param id - profile id
+ @param trace_type - Trace type
+ @param trace_num_elt - Number of nodes in trace path
+ @param trace_ppc - Trace PPC (none/encap/decap)
+ @param trace_tsp - Trace timestamp precision (0-sec,1-ms,2-us,3-ns)
+ @param trace_app_data - Trace application data, can be any 4 bytes
+ @param pow_enable - Proof of Work enabled or not flag
+ @param node_id - Id of this node
+*/
+define trace_profile_add {
+ u32 client_index;
+ u32 context;
+ u16 id;
+ u8 trace_type;
+ u8 trace_num_elt;
+ u8 trace_ppc;
+ u8 trace_tsp;
+ u32 trace_app_data;
+ u8 pow_enable;
+ u32 node_id;
+};
+
+/** \brief Trace profile add / del response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define trace_profile_add_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IOAM Trace enable trace profile for a flow
+ @param id - id of the trace profile to be applied
+ @param dest_ipv6 - Destination IPv6 address
+ @param prefix_length - prefix mask
+ @param vrf_id - VRF ID
+ @param trace_op - Trace operation (add/mod/del)
+ @param enable - apply/remove the trace profile for the flow
+*/
+define trace_profile_apply {
+ u32 client_index;
+ u32 context;
+ u16 id;
+ u8 dest_ipv6[16];
+ u32 prefix_length;
+ u32 vrf_id;
+ u8 trace_op;
+ u8 enable;
+};
+
+/** \brief Trace profile apply response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define trace_profile_apply_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Delete Trace Profile
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param index - MAP Domain index
+*/
+define trace_profile_del {
+ u32 client_index;
+ u32 context;
+ u16 id;
+};
+
+/** \brief Trace profile add / del response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define trace_profile_del_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Create host-interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param host_if_name - interface name
+ @param hw_addr - interface MAC
+ @param use_random_hw_addr - use random generated MAC
+*/
+define af_packet_create {
+ u32 client_index;
+ u32 context;
+
+ u8 host_if_name[64];
+ u8 hw_addr[6];
+ u8 use_random_hw_addr;
+};
+
+/** \brief Create host-interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define af_packet_create_reply {
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief Delete host-interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param host_if_name - interface name
+*/
+define af_packet_delete {
+ u32 client_index;
+ u32 context;
+
+ u8 host_if_name[64];
+};
+
+/** \brief Delete host-interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define af_packet_delete_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Add/del policer
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add policer if non-zero, else delete
+ @param name - policer name
+ @param cir - CIR
+ @param eir - EIR
+ @param cb - Committed Burst
+ @param eb - Excess or Peak Burst
+ @param rate_type - rate type
+ @param round_type - rounding type
+ @param type - policer algorithm
+*/
+define policer_add_del {
+ u32 client_index;
+ u32 context;
+
+ u8 is_add;
+ u8 name[64];
+ u32 cir;
+ u32 eir;
+ u64 cb;
+ u64 eb;
+ u8 rate_type;
+ u8 round_type;
+ u8 type;
+};
+
+/** \brief Add/del policer response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define policer_add_del_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Get list of policers
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param match_name_valid - if 0 request all policers otherwise use match_name
+ @param match_name - policer name
+*/
+define policer_dump {
+ u32 client_index;
+ u32 context;
+
+ u8 match_name_valid;
+ u8 match_name[64];
+};
+
+/** \brief Policer operational state response.
+ @param context - sender context, to match reply w/ request
+ @param name - policer name
+ @param cir - CIR
+ @param eir - EIR
+ @param cb - Committed Burst
+ @param eb - Excess or Peak Burst
+ @param rate_type - rate type
+ @param round_type - rounding type
+ @param type - policer algorithm
+ @param single_rate - 1 = single rate policer, 0 = two rate policer
+ @param color_aware - for hierarchical policing
+ @param scale - power-of-2 shift amount for lower rates
+ @param cir_tokens_per_period - number of tokens for each period
+ @param pir_tokens_per_period - number of tokens for each period for 2-rate policer
+ @param current_limit - current limit
+ @param current_bucket - current bucket
+ @param extended_limit - extended limit
+ @param extended_bucket - extended bucket
+ @param last_update_time - last update time
+*/
+manual_java define policer_details {
+ u32 context;
+
+ u8 name[64];
+ u32 cir;
+ u32 eir;
+ u64 cb;
+ u64 eb;
+ u8 rate_type;
+ u8 round_type;
+ u8 type;
+ u8 single_rate;
+ u8 color_aware;
+ u32 scale;
+ u32 cir_tokens_per_period;
+ u32 pir_tokens_per_period;
+ u32 current_limit;
+ u32 current_bucket;
+ u32 extended_limit;
+ u32 extended_bucket;
+ u64 last_update_time;
+};
+
+/** \brief Create netmap
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param netmap_if_name - interface name
+ @param hw_addr - interface MAC
+ @param use_random_hw_addr - use random generated MAC
+ @param is_pipe - is pipe
+ @param is_master - 0=slave, 1=master
+*/
+define netmap_create {
+ u32 client_index;
+ u32 context;
+
+ u8 netmap_if_name[64];
+ u8 hw_addr[6];
+ u8 use_random_hw_addr;
+ u8 is_pipe;
+ u8 is_master;
+};
+
+/** \brief Create netmap response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define netmap_create_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Delete netmap
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param netmap_if_name - interface name
+*/
+define netmap_delete {
+ u32 client_index;
+ u32 context;
+
+ u8 netmap_if_name[64];
+};
+
+/** \brief Delete netmap response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define netmap_delete_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Dump mpls gre tunnel table
+ @param client_index - opaque cookie to identify the sender
+ @param tunnel_index - gre tunnel identifier or -1 in case of all tunnels
+*/
+define mpls_gre_tunnel_dump {
+ u32 client_index;
+ u32 context;
+ i32 tunnel_index;
+};
+
+/** \brief mpls gre tunnel operational state response
+ @param tunnel_index - gre tunnel identifier
+ @param intfc_address - interface ipv4 addr
+ @param mask_width - interface ipv4 addr mask
+ @param hw_if_index - interface id
+ @param l2_only -
+ @param tunnel_src - tunnel source ipv4 addr
+ @param tunnel_dst - tunnel destination ipv4 addr
+ @param outer_fib_index - gre tunnel identifier
+ @param encap_index - reference to mpls label table
+ @param nlabels - number of resolved labels
+ @param labels - resolved labels
+*/
+manual_java define mpls_gre_tunnel_details {
+ u32 context;
+ u32 tunnel_index;
+
+ u32 intfc_address;
+ u32 inner_fib_index;
+ u32 mask_width;
+ u32 encap_index;
+ u32 hw_if_index;
+ u8 l2_only;
+ u32 tunnel_src;
+ u32 tunnel_dst;
+ u32 outer_fib_index;
+ u32 nlabels;
+ u32 labels[0];
+};
+
+/** \brief Dump mpls eth tunnel table
+ @param client_index - opaque cookie to identify the sender
+ @param tunnel_index - eth tunnel identifier or -1 in case of all tunnels
+*/
+define mpls_eth_tunnel_dump {
+ u32 client_index;
+ u32 context;
+ i32 tunnel_index;
+};
+
+/** \brief mpls eth tunnel operational state response
+ @param tunnel_index - eth tunnel identifier
+ @param intfc_address - interface ipv4 addr
+ @param mask_width - interface ipv4 addr mask
+ @param hw_if_index - interface id
+ @param l2_only -
+ @param tunnel_dst_mac -
+ @param tx_sw_if_index -
+ @param encap_index - reference to mpls label table
+ @param nlabels - number of resolved labels
+ @param labels - resolved labels
+*/
+manual_java define mpls_eth_tunnel_details {
+ u32 context;
+ u32 tunnel_index;
+
+ u32 intfc_address;
+ u32 inner_fib_index;
+ u32 mask_width;
+ u32 encap_index;
+ u32 hw_if_index;
+ u8 l2_only;
+ u8 tunnel_dst_mac[6];
+ u32 tx_sw_if_index;
+ u32 nlabels;
+ u32 labels[0];
+};
+
+/** \brief Dump mpls fib table
+ @param client_index - opaque cookie to identify the sender
+ @param fib_index - mpls fib entry identifier or -1 in case of all entries
+*/
+define mpls_fib_encap_dump {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief mpls fib encap table response
+ @param fib_index - fib table id
+ @param dest - destination ipv4 addr
+ @param s_bit -
+ @param entry_index - reference to mpls label table
+ @param nlabels - number of resolved labels
+ @param labels - resolved labels
+*/
+manual_java define mpls_fib_encap_details {
+ u32 context;
+
+ u32 fib_index;
+ u32 entry_index;
+ u32 dest;
+ u32 s_bit;
+ u32 nlabels;
+ u32 labels[0];
+};
+
+/** \brief Dump mpls fib decap table
+ @param client_index - opaque cookie to identify the sender
+ @param fib_index - mpls fib entry identifier or -1 in case of all entries
+*/
+define mpls_fib_decap_dump {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief mpls fib decap table response
+ @param fib_index - fib table id
+ @param entry_index - reference to mpls label table
+ @param dest - destination ipv4 addr
+ @param s_bit -
+ @param label - mpls labels
+ @param rx_table_id - rx fib id
+ @param tx_table_id - tx fib id
+ @param swif_tag -
+*/
+manual_java define mpls_fib_decap_details {
+ u32 context;
+
+ u32 fib_index;
+ u32 entry_index;
+ u32 dest;
+ u32 s_bit;
+ u32 label;
+ u32 rx_table_id;
+ u32 tx_table_id;
+ u8 swif_tag[8];
+};
+
+/** \brief Classify get table IDs request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define classify_table_ids {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Reply for classify get table IDs request
+ @param context - sender context which was passed in the request
+ @param count - number of ids returned in response
+ @param ids - array of classify table ids
+*/
+manual_java define classify_table_ids_reply {
+ u32 context;
+ u32 retval;
+ u32 count;
+ u32 ids[0];
+};
+
+/** \brief Classify table ids by interface index request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - index of the interface
+*/
+define classify_table_by_interface {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+};
+
+/** \brief Reply for classify table id by interface index request
+ @param context - sender context which was passed in the request
+ @param count - number of ids returned in response
+ @param sw_if_index - index of the interface
+ @param l2_table_id - l2 classify table index
+ @param ip4_table_id - ip4 classify table index
+ @param ip6_table_id - ip6 classify table index
+*/
+manual_java define classify_table_by_interface_reply {
+ u32 context;
+ u32 retval;
+ u32 sw_if_index;
+ u32 l2_table_id;
+ u32 ip4_table_id;
+ u32 ip6_table_id;
+};
+
+/** \brief Classify table info
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param table_id - classify table index
+*/
+define classify_table_info {
+ u32 client_index;
+ u32 context;
+ u32 table_id;
+};
+
+/** \brief Reply for classify table info request
+ @param context - sender context which was passed in the request
+ @param count - number of ids returned in response
+ @param table_id - classify table index
+ @param nbuckets - number of buckets when adding a table
+ @param match_n_vectors - number of match vectors
+ @param skip_n_vectors - number of skip_n_vectors
+ @param active_sessions - number of sessions (active entries)
+ @param next_table_index - index of next table
+ @param miss_next_index - index of miss table
+ @param mask[] - match mask
+*/
+manual_java define classify_table_info_reply {
+ u32 context;
+ i32 retval;
+ u32 table_id;
+ u32 nbuckets;
+ u32 match_n_vectors;
+ u32 skip_n_vectors;
+ u32 active_sessions;
+ u32 next_table_index;
+ u32 miss_next_index;
+ u32 mask_length;
+ u8 mask[0];
+};
+
+/** \brief Classify sessions dump request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param table_id - classify table index
+*/
+define classify_session_dump {
+ u32 client_index;
+ u32 context;
+ u32 table_id;
+};
+
+/** \brief Reply for classify table session dump request
+ @param context - sender context which was passed in the request
+ @param count - number of ids returned in response
+ @param table_id - classify table index
+ @param hit_next_index - hit_next_index of session
+ @param opaque_index - for add, opaque_index of session
+ @param advance - advance value of session
+ @param match[] - match value for session
+*/
+manual_java define classify_session_details {
+ u32 context;
+ i32 retval;
+ u32 table_id;
+ u32 hit_next_index;
+ i32 advance;
+ u32 opaque_index;
+ u32 match_length;
+ u8 match[0];
+};
diff --git a/vpp/vpp-api/vpe_all_api_h.h b/vpp/vpp-api/vpe_all_api_h.h
new file mode 100644
index 00000000..ca6776c6
--- /dev/null
+++ b/vpp/vpp-api/vpe_all_api_h.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Add to the bottom of the #include list, or elves will steal your
+ * keyboard in the middle of the night!
+ */
+
+/* Inherited from vlib */
+#include <vlibmemory/vl_memory_api_h.h>
+
+/* Here are the vpe forwarder specific API definitions */
+#include <vpp-api/vpe.api.h>
diff --git a/vpp/vpp-api/vpe_msg_enum.h b/vpp/vpp-api/vpe_msg_enum.h
new file mode 100644
index 00000000..fa194ee0
--- /dev/null
+++ b/vpp/vpp-api/vpe_msg_enum.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_vpe_msg_enum_h
+#define included_vpe_msg_enum_h
+
+#include <vppinfra/byte_order.h>
+
+#define vl_msg_id(n,h) n,
+typedef enum {
+ VL_ILLEGAL_MESSAGE_ID=0,
+#include <vpp-api/vpe_all_api_h.h>
+ VL_MSG_FIRST_AVAILABLE,
+} vl_msg_id_t;
+#undef vl_msg_id
+
+#endif /* included_vpe_msg_enum_h */