aboutsummaryrefslogtreecommitdiffstats
path: root/src/vpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/vpp')
-rw-r--r--src/vpp/api/api.c4922
l---------src/vpp/api/api_format.c1
-rw-r--r--src/vpp/api/api_main.c192
-rw-r--r--src/vpp/api/custom_dump.c3139
-rw-r--r--src/vpp/api/gmon.c319
-rw-r--r--src/vpp/api/json_format.c304
-rw-r--r--src/vpp/api/json_format.h254
-rw-r--r--src/vpp/api/summary_stats_client.c302
-rw-r--r--src/vpp/api/test_client.c1531
-rw-r--r--src/vpp/api/test_ha.c249
l---------src/vpp/api/vat.h1
-rw-r--r--src/vpp/api/vpe.api2782
-rw-r--r--src/vpp/api/vpe_all_api_h.h37
-rw-r--r--src/vpp/api/vpe_msg_enum.h37
-rw-r--r--src/vpp/api/vpp_get_metrics.c253
-rw-r--r--src/vpp/app/l2t.c557
-rw-r--r--src/vpp/app/l2t_l2.c267
-rw-r--r--src/vpp/app/sticky_hash.c581
-rw-r--r--src/vpp/app/version.c102
-rw-r--r--src/vpp/app/vpe_cli.c123
-rw-r--r--src/vpp/conf/80-vpp.conf15
-rw-r--r--src/vpp/conf/startup.conf99
-rw-r--r--src/vpp/conf/startup.uiopcigeneric.conf18
-rw-r--r--src/vpp/oam/oam.c648
-rw-r--r--src/vpp/oam/oam.h96
-rw-r--r--src/vpp/stats/stats.c987
-rw-r--r--src/vpp/stats/stats.h76
-rw-r--r--src/vpp/vnet/main.c415
28 files changed, 18307 insertions, 0 deletions
diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c
new file mode 100644
index 00000000..6289249c
--- /dev/null
+++ b/src/vpp/api/api.c
@@ -0,0 +1,4922 @@
+/*
+ *------------------------------------------------------------------
+ * api.c - message handler registration
+ *
+ * Copyright (c) 2010-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <netinet/in.h>
+#include <signal.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <time.h>
+#include <fcntl.h>
+#include <string.h>
+#include <pwd.h>
+#include <grp.h>
+
+#include <vppinfra/clib.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/bitmap.h>
+#include <vppinfra/fifo.h>
+#include <vppinfra/time.h>
+#include <vppinfra/mheap.h>
+#include <vppinfra/heap.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/format.h>
+#include <vppinfra/error.h>
+
+#include <vnet/api_errno.h>
+#include <vnet/vnet.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/l2_bd.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ip/ip6.h>
+#include <vnet/ip/ip6_neighbor.h>
+#include <vnet/mpls/mpls.h>
+#include <vnet/mpls/mpls_tunnel.h>
+#include <vnet/dhcp/proxy.h>
+#include <vnet/dhcp/client.h>
+#if IPV6SR > 0
+#include <vnet/sr/sr.h>
+#endif
+#include <vnet/dhcpv6/proxy.h>
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vnet/classify/vnet_classify.h>
+#include <vnet/classify/input_acl.h>
+#include <vnet/classify/policer_classify.h>
+#include <vnet/classify/flow_classify.h>
+#include <vnet/l2/l2_classify.h>
+#include <vnet/vxlan/vxlan.h>
+#include <vnet/l2/l2_vtr.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/map/map.h>
+#include <vnet/cop/cop.h>
+#include <vnet/ip/ip6_hop_by_hop.h>
+#include <vnet/ip/ip_source_and_port_range_check.h>
+#include <vnet/policer/policer.h>
+#include <vnet/flow/flow_report.h>
+#include <vnet/flow/flow_report_classify.h>
+#include <vnet/ip/punt.h>
+#include <vnet/feature/feature.h>
+
+#undef BIHASH_TYPE
+#undef __included_bihash_template_h__
+#include <vnet/l2/l2_fib.h>
+
+#if DPDK > 0
+#include <vnet/devices/dpdk/dpdk.h>
+#endif
+
+#include <vpp/stats/stats.h>
+#include <vpp/oam/oam.h>
+
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ethernet/arp_packet.h>
+#include <vnet/interface.h>
+#include <vnet/l2/l2_fib.h>
+#include <vnet/l2/l2_bd.h>
+#include <vpp/api/vpe_msg_enum.h>
+#include <vnet/span/span.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/fib_api.h>
+#include <vnet/dpo/drop_dpo.h>
+#include <vnet/dpo/receive_dpo.h>
+#include <vnet/dpo/lookup_dpo.h>
+#include <vnet/dpo/classify_dpo.h>
+#include <vnet/dpo/ip_null_dpo.h>
+#define vl_typedefs /* define message structures */
+#include <vpp/api/vpe_all_api_h.h>
+#undef vl_typedefs
+#define vl_endianfun /* define message structures */
+#include <vpp/api/vpe_all_api_h.h>
+#undef vl_endianfun
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vpp/api/vpe_all_api_h.h>
+#undef vl_printfun
+#include <vlibapi/api_helper_macros.h>
+#define foreach_vpe_api_msg \
+_(WANT_OAM_EVENTS, want_oam_events) \
+_(OAM_ADD_DEL, oam_add_del) \
+_(MPLS_ROUTE_ADD_DEL, mpls_route_add_del) \
+_(MPLS_IP_BIND_UNBIND, mpls_ip_bind_unbind) \
+_(IS_ADDRESS_REACHABLE, is_address_reachable) \
+_(SW_INTERFACE_SET_MPLS_ENABLE, sw_interface_set_mpls_enable) \
+_(SW_INTERFACE_SET_VPATH, sw_interface_set_vpath) \
+_(SW_INTERFACE_SET_VXLAN_BYPASS, sw_interface_set_vxlan_bypass) \
+_(SW_INTERFACE_SET_L2_XCONNECT, sw_interface_set_l2_xconnect) \
+_(SW_INTERFACE_SET_L2_BRIDGE, sw_interface_set_l2_bridge) \
+_(SW_INTERFACE_SET_DPDK_HQOS_PIPE, sw_interface_set_dpdk_hqos_pipe) \
+_(SW_INTERFACE_SET_DPDK_HQOS_SUBPORT, sw_interface_set_dpdk_hqos_subport) \
+_(SW_INTERFACE_SET_DPDK_HQOS_TCTBL, sw_interface_set_dpdk_hqos_tctbl) \
+_(BRIDGE_DOMAIN_ADD_DEL, bridge_domain_add_del) \
+_(BRIDGE_DOMAIN_DUMP, bridge_domain_dump) \
+_(BRIDGE_DOMAIN_DETAILS, bridge_domain_details) \
+_(BRIDGE_DOMAIN_SW_IF_DETAILS, bridge_domain_sw_if_details) \
+_(L2FIB_ADD_DEL, l2fib_add_del) \
+_(L2_FLAGS, l2_flags) \
+_(BRIDGE_FLAGS, bridge_flags) \
+_(CREATE_VLAN_SUBIF, create_vlan_subif) \
+_(CREATE_SUBIF, create_subif) \
+_(MPLS_TUNNEL_ADD_DEL, mpls_tunnel_add_del) \
+_(PROXY_ARP_ADD_DEL, proxy_arp_add_del) \
+_(PROXY_ARP_INTFC_ENABLE_DISABLE, proxy_arp_intfc_enable_disable) \
+_(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \
+_(RESET_FIB, reset_fib) \
+_(DHCP_PROXY_CONFIG,dhcp_proxy_config) \
+_(DHCP_PROXY_CONFIG_2,dhcp_proxy_config_2) \
+_(DHCP_PROXY_SET_VSS,dhcp_proxy_set_vss) \
+_(DHCP_CLIENT_CONFIG, dhcp_client_config) \
+_(CREATE_LOOPBACK, create_loopback) \
+_(CONTROL_PING, control_ping) \
+_(CLI_REQUEST, cli_request) \
+_(CLI_INBAND, cli_inband) \
+_(SET_ARP_NEIGHBOR_LIMIT, set_arp_neighbor_limit) \
+_(L2_PATCH_ADD_DEL, l2_patch_add_del) \
+_(CLASSIFY_ADD_DEL_TABLE, classify_add_del_table) \
+_(CLASSIFY_ADD_DEL_SESSION, classify_add_del_session) \
+_(CLASSIFY_SET_INTERFACE_IP_TABLE, classify_set_interface_ip_table) \
+_(CLASSIFY_SET_INTERFACE_L2_TABLES, classify_set_interface_l2_tables) \
+_(GET_NODE_INDEX, get_node_index) \
+_(ADD_NODE_NEXT, add_node_next) \
+_(VXLAN_ADD_DEL_TUNNEL, vxlan_add_del_tunnel) \
+_(VXLAN_TUNNEL_DUMP, vxlan_tunnel_dump) \
+_(L2_FIB_CLEAR_TABLE, l2_fib_clear_table) \
+_(L2_INTERFACE_EFP_FILTER, l2_interface_efp_filter) \
+_(L2_INTERFACE_VLAN_TAG_REWRITE, l2_interface_vlan_tag_rewrite) \
+_(SHOW_VERSION, show_version) \
+_(L2_FIB_TABLE_DUMP, l2_fib_table_dump) \
+_(L2_FIB_TABLE_ENTRY, l2_fib_table_entry) \
+_(VXLAN_GPE_ADD_DEL_TUNNEL, vxlan_gpe_add_del_tunnel) \
+_(VXLAN_GPE_TUNNEL_DUMP, vxlan_gpe_tunnel_dump) \
+_(INTERFACE_NAME_RENUMBER, interface_name_renumber) \
+_(WANT_IP4_ARP_EVENTS, want_ip4_arp_events) \
+_(WANT_IP6_ND_EVENTS, want_ip6_nd_events) \
+_(INPUT_ACL_SET_INTERFACE, input_acl_set_interface) \
+_(DELETE_LOOPBACK, delete_loopback) \
+_(BD_IP_MAC_ADD_DEL, bd_ip_mac_add_del) \
+_(COP_INTERFACE_ENABLE_DISABLE, cop_interface_enable_disable) \
+_(COP_WHITELIST_ENABLE_DISABLE, cop_whitelist_enable_disable) \
+_(GET_NODE_GRAPH, get_node_graph) \
+_(IOAM_ENABLE, ioam_enable) \
+_(IOAM_DISABLE, ioam_disable) \
+_(SR_MULTICAST_MAP_ADD_DEL, sr_multicast_map_add_del) \
+_(POLICER_ADD_DEL, policer_add_del) \
+_(POLICER_DUMP, policer_dump) \
+_(POLICER_CLASSIFY_SET_INTERFACE, policer_classify_set_interface) \
+_(POLICER_CLASSIFY_DUMP, policer_classify_dump) \
+_(MPLS_TUNNEL_DUMP, mpls_tunnel_dump) \
+_(MPLS_TUNNEL_DETAILS, mpls_tunnel_details) \
+_(MPLS_FIB_DUMP, mpls_fib_dump) \
+_(MPLS_FIB_DETAILS, mpls_fib_details) \
+_(CLASSIFY_TABLE_IDS,classify_table_ids) \
+_(CLASSIFY_TABLE_BY_INTERFACE, classify_table_by_interface) \
+_(CLASSIFY_TABLE_INFO,classify_table_info) \
+_(CLASSIFY_SESSION_DUMP,classify_session_dump) \
+_(CLASSIFY_SESSION_DETAILS,classify_session_details) \
+_(SET_IPFIX_EXPORTER, set_ipfix_exporter) \
+_(IPFIX_EXPORTER_DUMP, ipfix_exporter_dump) \
+_(SET_IPFIX_CLASSIFY_STREAM, set_ipfix_classify_stream) \
+_(IPFIX_CLASSIFY_STREAM_DUMP, ipfix_classify_stream_dump) \
+_(IPFIX_CLASSIFY_TABLE_ADD_DEL, ipfix_classify_table_add_del) \
+_(IPFIX_CLASSIFY_TABLE_DUMP, ipfix_classify_table_dump) \
+_(GET_NEXT_INDEX, get_next_index) \
+_(PG_CREATE_INTERFACE, pg_create_interface) \
+_(PG_CAPTURE, pg_capture) \
+_(PG_ENABLE_DISABLE, pg_enable_disable) \
+_(IP_SOURCE_AND_PORT_RANGE_CHECK_ADD_DEL, \
+ ip_source_and_port_range_check_add_del) \
+_(IP_SOURCE_AND_PORT_RANGE_CHECK_INTERFACE_ADD_DEL, \
+ ip_source_and_port_range_check_interface_add_del) \
+_(DELETE_SUBIF, delete_subif) \
+_(L2_INTERFACE_PBB_TAG_REWRITE, l2_interface_pbb_tag_rewrite) \
+_(PUNT, punt) \
+_(FLOW_CLASSIFY_SET_INTERFACE, flow_classify_set_interface) \
+_(FLOW_CLASSIFY_DUMP, flow_classify_dump) \
+_(FEATURE_ENABLE_DISABLE, feature_enable_disable)
+
+#define QUOTE_(x) #x
+#define QUOTE(x) QUOTE_(x)
+typedef enum
+{
+ RESOLVE_IP4_ADD_DEL_ROUTE = 1,
+ RESOLVE_IP6_ADD_DEL_ROUTE,
+} resolve_t;
+
+static vlib_node_registration_t vpe_resolver_process_node;
+vpe_api_main_t vpe_api_main;
+
+static int arp_change_delete_callback (u32 pool_index, u8 * notused);
+static int nd_change_delete_callback (u32 pool_index, u8 * notused);
+
+/* Clean up all registrations belonging to the indicated client */
+int
+vl_api_memclnt_delete_callback (u32 client_index)
+{
+ vpe_api_main_t *vam = &vpe_api_main;
+ vpe_client_registration_t *rp;
+ uword *p;
+ int stats_memclnt_delete_callback (u32 client_index);
+
+ stats_memclnt_delete_callback (client_index);
+
+#define _(a) \
+ p = hash_get (vam->a##_registration_hash, client_index); \
+ if (p) { \
+ rp = pool_elt_at_index (vam->a##_registrations, p[0]); \
+ pool_put (vam->a##_registrations, rp); \
+ hash_unset (vam->a##_registration_hash, client_index); \
+ }
+ foreach_registration_hash;
+#undef _
+ return 0;
+}
+
+pub_sub_handler (oam_events, OAM_EVENTS);
+
+#define RESOLUTION_EVENT 1
+#define RESOLUTION_PENDING_EVENT 2
+#define IP4_ARP_EVENT 3
+#define IP6_ND_EVENT 4
+
+int ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp);
+
+int ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp);
+
+void
+handle_ip4_arp_event (u32 pool_index)
+{
+ vpe_api_main_t *vam = &vpe_api_main;
+ vnet_main_t *vnm = vam->vnet_main;
+ vlib_main_t *vm = vam->vlib_main;
+ vl_api_ip4_arp_event_t *event;
+ vl_api_ip4_arp_event_t *mp;
+ unix_shared_memory_queue_t *q;
+
+ /* Client can cancel, die, etc. */
+ if (pool_is_free_index (vam->arp_events, pool_index))
+ return;
+
+ event = pool_elt_at_index (vam->arp_events, pool_index);
+
+ q = vl_api_client_index_to_input_queue (event->client_index);
+ if (!q)
+ {
+ (void) vnet_add_del_ip4_arp_change_event
+ (vnm, arp_change_delete_callback,
+ event->pid, &event->address,
+ vpe_resolver_process_node.index, IP4_ARP_EVENT,
+ ~0 /* pool index, notused */ , 0 /* is_add */ );
+ return;
+ }
+
+ if (q->cursize < q->maxsize)
+ {
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ clib_memcpy (mp, event, sizeof (*mp));
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+ }
+ else
+ {
+ static f64 last_time;
+ /*
+ * Throttle syslog msgs.
+ * It's pretty tempting to just revoke the registration...
+ */
+ if (vlib_time_now (vm) > last_time + 10.0)
+ {
+ clib_warning ("arp event for %U to pid %d: queue stuffed!",
+ format_ip4_address, &event->address, event->pid);
+ last_time = vlib_time_now (vm);
+ }
+ }
+}
+
+void
+handle_ip6_nd_event (u32 pool_index)
+{
+ vpe_api_main_t *vam = &vpe_api_main;
+ vnet_main_t *vnm = vam->vnet_main;
+ vlib_main_t *vm = vam->vlib_main;
+ vl_api_ip6_nd_event_t *event;
+ vl_api_ip6_nd_event_t *mp;
+ unix_shared_memory_queue_t *q;
+
+ /* Client can cancel, die, etc. */
+ if (pool_is_free_index (vam->nd_events, pool_index))
+ return;
+
+ event = pool_elt_at_index (vam->nd_events, pool_index);
+
+ q = vl_api_client_index_to_input_queue (event->client_index);
+ if (!q)
+ {
+ (void) vnet_add_del_ip6_nd_change_event
+ (vnm, nd_change_delete_callback,
+ event->pid, &event->address,
+ vpe_resolver_process_node.index, IP6_ND_EVENT,
+ ~0 /* pool index, notused */ , 0 /* is_add */ );
+ return;
+ }
+
+ if (q->cursize < q->maxsize)
+ {
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ clib_memcpy (mp, event, sizeof (*mp));
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+ }
+ else
+ {
+ static f64 last_time;
+ /*
+ * Throttle syslog msgs.
+ * It's pretty tempting to just revoke the registration...
+ */
+ if (vlib_time_now (vm) > last_time + 10.0)
+ {
+ clib_warning ("ip6 nd event for %U to pid %d: queue stuffed!",
+ format_ip6_address, &event->address, event->pid);
+ last_time = vlib_time_now (vm);
+ }
+ }
+}
+
+static uword
+resolver_process (vlib_main_t * vm,
+ vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ uword event_type;
+ uword *event_data = 0;
+ f64 timeout = 100.0;
+ int i;
+
+ while (1)
+ {
+ vlib_process_wait_for_event_or_clock (vm, timeout);
+
+ event_type = vlib_process_get_events (vm, &event_data);
+
+ switch (event_type)
+ {
+ case RESOLUTION_PENDING_EVENT:
+ timeout = 1.0;
+ break;
+
+ case RESOLUTION_EVENT:
+ clib_warning ("resolver: BOGUS TYPE");
+ break;
+
+ case IP4_ARP_EVENT:
+ for (i = 0; i < vec_len (event_data); i++)
+ handle_ip4_arp_event (event_data[i]);
+ break;
+
+ case IP6_ND_EVENT:
+ for (i = 0; i < vec_len (event_data); i++)
+ handle_ip6_nd_event (event_data[i]);
+ break;
+
+ case ~0: /* timeout */
+ break;
+ }
+
+ vec_reset_length (event_data);
+ }
+ return 0; /* or not */
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (vpe_resolver_process_node,static) = {
+ .function = resolver_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "vpe-route-resolver-process",
+};
+/* *INDENT-ON* */
+
+static int
+mpls_route_add_del_t_handler (vnet_main_t * vnm,
+ vl_api_mpls_route_add_del_t * mp)
+{
+ u32 fib_index, next_hop_fib_index;
+ mpls_label_t *label_stack = NULL;
+ int rv, ii, n_labels;;
+
+ fib_prefix_t pfx = {
+ .fp_len = 21,
+ .fp_proto = FIB_PROTOCOL_MPLS,
+ .fp_eos = mp->mr_eos,
+ .fp_label = ntohl (mp->mr_label),
+ };
+ if (pfx.fp_eos)
+ {
+ if (mp->mr_next_hop_proto_is_ip4)
+ {
+ pfx.fp_payload_proto = DPO_PROTO_IP4;
+ }
+ else
+ {
+ pfx.fp_payload_proto = DPO_PROTO_IP6;
+ }
+ }
+ else
+ {
+ pfx.fp_payload_proto = DPO_PROTO_MPLS;
+ }
+
+ rv = add_del_route_check (FIB_PROTOCOL_MPLS,
+ mp->mr_table_id,
+ mp->mr_next_hop_sw_if_index,
+ dpo_proto_to_fib (pfx.fp_payload_proto),
+ mp->mr_next_hop_table_id,
+ mp->mr_create_table_if_needed,
+ &fib_index, &next_hop_fib_index);
+
+ if (0 != rv)
+ return (rv);
+
+ ip46_address_t nh;
+ memset (&nh, 0, sizeof (nh));
+
+ if (mp->mr_next_hop_proto_is_ip4)
+ memcpy (&nh.ip4, mp->mr_next_hop, sizeof (nh.ip4));
+ else
+ memcpy (&nh.ip6, mp->mr_next_hop, sizeof (nh.ip6));
+
+ n_labels = mp->mr_next_hop_n_out_labels;
+ if (n_labels == 0)
+ ;
+ else if (1 == n_labels)
+ vec_add1 (label_stack, ntohl (mp->mr_next_hop_out_label_stack[0]));
+ else
+ {
+ vec_validate (label_stack, n_labels - 1);
+ for (ii = 0; ii < n_labels; ii++)
+ label_stack[ii] = ntohl (mp->mr_next_hop_out_label_stack[ii]);
+ }
+
+ return (add_del_route_t_handler (mp->mr_is_multipath, mp->mr_is_add, 0, // mp->is_drop,
+ 0, // mp->is_unreach,
+ 0, // mp->is_prohibit,
+ 0, // mp->is_local,
+ mp->mr_is_classify,
+ mp->mr_classify_table_index,
+ mp->mr_is_resolve_host,
+ mp->mr_is_resolve_attached,
+ fib_index, &pfx,
+ mp->mr_next_hop_proto_is_ip4,
+ &nh, ntohl (mp->mr_next_hop_sw_if_index),
+ next_hop_fib_index,
+ mp->mr_next_hop_weight,
+ ntohl (mp->mr_next_hop_via_label),
+ label_stack));
+}
+
+void
+vl_api_mpls_route_add_del_t_handler (vl_api_mpls_route_add_del_t * mp)
+{
+ vl_api_mpls_route_add_del_reply_t *rmp;
+ vnet_main_t *vnm;
+ int rv;
+
+ vnm = vnet_get_main ();
+ vnm->api_errno = 0;
+
+ rv = mpls_route_add_del_t_handler (vnm, mp);
+
+ rv = (rv == 0) ? vnm->api_errno : rv;
+
+ REPLY_MACRO (VL_API_MPLS_ROUTE_ADD_DEL_REPLY);
+}
+
+static int
+mpls_ip_bind_unbind_handler (vnet_main_t * vnm,
+ vl_api_mpls_ip_bind_unbind_t * mp)
+{
+ u32 mpls_fib_index, ip_fib_index;
+
+ mpls_fib_index =
+ fib_table_find (FIB_PROTOCOL_MPLS, ntohl (mp->mb_mpls_table_id));
+
+ if (~0 == mpls_fib_index)
+ {
+ if (mp->mb_create_table_if_needed)
+ {
+ mpls_fib_index =
+ fib_table_find_or_create_and_lock (FIB_PROTOCOL_MPLS,
+ ntohl (mp->mb_mpls_table_id));
+ }
+ else
+ return VNET_API_ERROR_NO_SUCH_FIB;
+ }
+
+ ip_fib_index = fib_table_find ((mp->mb_is_ip4 ?
+ FIB_PROTOCOL_IP4 :
+ FIB_PROTOCOL_IP6),
+ ntohl (mp->mb_ip_table_id));
+ if (~0 == ip_fib_index)
+ return VNET_API_ERROR_NO_SUCH_FIB;
+
+ fib_prefix_t pfx = {
+ .fp_len = mp->mb_address_length,
+ };
+
+ if (mp->mb_is_ip4)
+ {
+ pfx.fp_proto = FIB_PROTOCOL_IP4;
+ clib_memcpy (&pfx.fp_addr.ip4, mp->mb_address,
+ sizeof (pfx.fp_addr.ip4));
+ }
+ else
+ {
+ pfx.fp_proto = FIB_PROTOCOL_IP6;
+ clib_memcpy (&pfx.fp_addr.ip6, mp->mb_address,
+ sizeof (pfx.fp_addr.ip6));
+ }
+
+ if (mp->mb_is_bind)
+ fib_table_entry_local_label_add (ip_fib_index, &pfx,
+ ntohl (mp->mb_label));
+ else
+ fib_table_entry_local_label_remove (ip_fib_index, &pfx,
+ ntohl (mp->mb_label));
+
+ return (0);
+}
+
+void
+vl_api_mpls_ip_bind_unbind_t_handler (vl_api_mpls_ip_bind_unbind_t * mp)
+{
+ vl_api_mpls_route_add_del_reply_t *rmp;
+ vnet_main_t *vnm;
+ int rv;
+
+ vnm = vnet_get_main ();
+ vnm->api_errno = 0;
+
+ rv = mpls_ip_bind_unbind_handler (vnm, mp);
+
+ rv = (rv == 0) ? vnm->api_errno : rv;
+
+ REPLY_MACRO (VL_API_MPLS_ROUTE_ADD_DEL_REPLY);
+}
+
+static void
+vl_api_sw_interface_set_vpath_t_handler (vl_api_sw_interface_set_vpath_t * mp)
+{
+ vl_api_sw_interface_set_vpath_reply_t *rmp;
+ int rv = 0;
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_VPATH, mp->enable);
+ vnet_feature_enable_disable ("ip4-unicast", "vpath-input-ip4",
+ sw_if_index, mp->enable, 0, 0);
+ vnet_feature_enable_disable ("ip4-multicast", "vpath-input-ip4",
+ sw_if_index, mp->enable, 0, 0);
+ vnet_feature_enable_disable ("ip6-unicast", "vpath-input-ip6",
+ sw_if_index, mp->enable, 0, 0);
+ vnet_feature_enable_disable ("ip6-multicast", "vpath-input-ip6",
+ sw_if_index, mp->enable, 0, 0);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_SET_VPATH_REPLY);
+}
+
+static void
+ vl_api_sw_interface_set_vxlan_bypass_t_handler
+ (vl_api_sw_interface_set_vxlan_bypass_t * mp)
+{
+ vl_api_sw_interface_set_vxlan_bypass_reply_t *rmp;
+ int rv = 0;
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ if (mp->is_ipv6)
+ {
+ /* not yet implemented */
+ }
+ else
+ vnet_feature_enable_disable ("ip4-unicast", "ip4-vxlan-bypass",
+ sw_if_index, mp->enable, 0, 0);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_SET_VXLAN_BYPASS_REPLY);
+}
+
+static void
+ vl_api_sw_interface_set_l2_xconnect_t_handler
+ (vl_api_sw_interface_set_l2_xconnect_t * mp)
+{
+ vl_api_sw_interface_set_l2_xconnect_reply_t *rmp;
+ int rv = 0;
+ u32 rx_sw_if_index = ntohl (mp->rx_sw_if_index);
+ u32 tx_sw_if_index = ntohl (mp->tx_sw_if_index);
+ vlib_main_t *vm = vlib_get_main ();
+ vnet_main_t *vnm = vnet_get_main ();
+
+ VALIDATE_RX_SW_IF_INDEX (mp);
+
+ if (mp->enable)
+ {
+ VALIDATE_TX_SW_IF_INDEX (mp);
+ rv = set_int_l2_mode (vm, vnm, MODE_L2_XC,
+ rx_sw_if_index, 0, 0, 0, tx_sw_if_index);
+ }
+ else
+ {
+ rv = set_int_l2_mode (vm, vnm, MODE_L3, rx_sw_if_index, 0, 0, 0, 0);
+ }
+
+ BAD_RX_SW_IF_INDEX_LABEL;
+ BAD_TX_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_SET_L2_XCONNECT_REPLY);
+}
+
+static void
+ vl_api_sw_interface_set_l2_bridge_t_handler
+ (vl_api_sw_interface_set_l2_bridge_t * mp)
+{
+ bd_main_t *bdm = &bd_main;
+ vl_api_sw_interface_set_l2_bridge_reply_t *rmp;
+ int rv = 0;
+ u32 rx_sw_if_index = ntohl (mp->rx_sw_if_index);
+ u32 bd_id = ntohl (mp->bd_id);
+ u32 bd_index;
+ u32 bvi = mp->bvi;
+ u8 shg = mp->shg;
+ vlib_main_t *vm = vlib_get_main ();
+ vnet_main_t *vnm = vnet_get_main ();
+
+ VALIDATE_RX_SW_IF_INDEX (mp);
+
+ bd_index = bd_find_or_add_bd_index (bdm, bd_id);
+
+ if (mp->enable)
+ {
+ //VALIDATE_TX_SW_IF_INDEX(mp);
+ rv = set_int_l2_mode (vm, vnm, MODE_L2_BRIDGE,
+ rx_sw_if_index, bd_index, bvi, shg, 0);
+ }
+ else
+ {
+ rv = set_int_l2_mode (vm, vnm, MODE_L3, rx_sw_if_index, 0, 0, 0, 0);
+ }
+
+ BAD_RX_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_SET_L2_BRIDGE_REPLY);
+}
+
+static void
+ vl_api_sw_interface_set_dpdk_hqos_pipe_t_handler
+ (vl_api_sw_interface_set_dpdk_hqos_pipe_t * mp)
+{
+ vl_api_sw_interface_set_dpdk_hqos_pipe_reply_t *rmp;
+ int rv = 0;
+
+#if DPDK > 0
+ dpdk_main_t *dm = &dpdk_main;
+ dpdk_device_t *xd;
+
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+ u32 subport = ntohl (mp->subport);
+ u32 pipe = ntohl (mp->pipe);
+ u32 profile = ntohl (mp->profile);
+ vnet_hw_interface_t *hw;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ /* hw_if & dpdk device */
+ hw = vnet_get_sup_hw_interface (dm->vnet_main, sw_if_index);
+
+ xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+
+ rv = rte_sched_pipe_config (xd->hqos_ht->hqos, subport, pipe, profile);
+
+ BAD_SW_IF_INDEX_LABEL;
+#else
+ clib_warning ("setting HQoS pipe parameters without DPDK not implemented");
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif /* DPDK */
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_SET_DPDK_HQOS_PIPE_REPLY);
+}
+
+static void
+ vl_api_sw_interface_set_dpdk_hqos_subport_t_handler
+ (vl_api_sw_interface_set_dpdk_hqos_subport_t * mp)
+{
+ vl_api_sw_interface_set_dpdk_hqos_subport_reply_t *rmp;
+ int rv = 0;
+
+#if DPDK > 0
+ dpdk_main_t *dm = &dpdk_main;
+ dpdk_device_t *xd;
+ struct rte_sched_subport_params p;
+
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+ u32 subport = ntohl (mp->subport);
+ p.tb_rate = ntohl (mp->tb_rate);
+ p.tb_size = ntohl (mp->tb_size);
+ p.tc_rate[0] = ntohl (mp->tc_rate[0]);
+ p.tc_rate[1] = ntohl (mp->tc_rate[1]);
+ p.tc_rate[2] = ntohl (mp->tc_rate[2]);
+ p.tc_rate[3] = ntohl (mp->tc_rate[3]);
+ p.tc_period = ntohl (mp->tc_period);
+
+ vnet_hw_interface_t *hw;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ /* hw_if & dpdk device */
+ hw = vnet_get_sup_hw_interface (dm->vnet_main, sw_if_index);
+
+ xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+
+ rv = rte_sched_subport_config (xd->hqos_ht->hqos, subport, &p);
+
+ BAD_SW_IF_INDEX_LABEL;
+#else
+ clib_warning
+ ("setting HQoS subport parameters without DPDK not implemented");
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif /* DPDK */
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_SET_DPDK_HQOS_SUBPORT_REPLY);
+}
+
+static void
+ vl_api_sw_interface_set_dpdk_hqos_tctbl_t_handler
+ (vl_api_sw_interface_set_dpdk_hqos_tctbl_t * mp)
+{
+ vl_api_sw_interface_set_dpdk_hqos_tctbl_reply_t *rmp;
+ int rv = 0;
+
+#if DPDK > 0
+ dpdk_main_t *dm = &dpdk_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ dpdk_device_t *xd;
+
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+ u32 entry = ntohl (mp->entry);
+ u32 tc = ntohl (mp->tc);
+ u32 queue = ntohl (mp->queue);
+ u32 val, i;
+
+ vnet_hw_interface_t *hw;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ /* hw_if & dpdk device */
+ hw = vnet_get_sup_hw_interface (dm->vnet_main, sw_if_index);
+
+ xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+
+ if (tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+ {
+ clib_warning ("invalid traffic class !!");
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto done;
+ }
+ if (queue >= RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
+ {
+ clib_warning ("invalid queue !!");
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto done;
+ }
+
+ /* Detect the set of worker threads */
+ uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers");
+
+ if (p == 0)
+ {
+ clib_warning ("worker thread registration AWOL !!");
+ rv = VNET_API_ERROR_INVALID_VALUE_2;
+ goto done;
+ }
+
+ vlib_thread_registration_t *tr = (vlib_thread_registration_t *) p[0];
+ int worker_thread_first = tr->first_index;
+ int worker_thread_count = tr->count;
+
+ val = tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
+ for (i = 0; i < worker_thread_count; i++)
+ xd->hqos_wt[worker_thread_first + i].hqos_tc_table[entry] = val;
+
+ BAD_SW_IF_INDEX_LABEL;
+done:
+#else
+ clib_warning ("setting HQoS DSCP table entry without DPDK not implemented");
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif /* DPDK */
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_SET_DPDK_HQOS_TCTBL_REPLY);
+}
+
+static void
+vl_api_bridge_domain_add_del_t_handler (vl_api_bridge_domain_add_del_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ bd_main_t *bdm = &bd_main;
+ vl_api_bridge_domain_add_del_reply_t *rmp;
+ int rv = 0;
+ u32 enable_flags = 0, disable_flags = 0;
+ u32 bd_id = ntohl (mp->bd_id);
+ u32 bd_index;
+
+ if (mp->is_add)
+ {
+ bd_index = bd_find_or_add_bd_index (bdm, bd_id);
+
+ if (mp->flood)
+ enable_flags |= L2_FLOOD;
+ else
+ disable_flags |= L2_FLOOD;
+
+ if (mp->uu_flood)
+ enable_flags |= L2_UU_FLOOD;
+ else
+ disable_flags |= L2_UU_FLOOD;
+
+ if (mp->forward)
+ enable_flags |= L2_FWD;
+ else
+ disable_flags |= L2_FWD;
+
+ if (mp->arp_term)
+ enable_flags |= L2_ARP_TERM;
+ else
+ disable_flags |= L2_ARP_TERM;
+
+ if (mp->learn)
+ enable_flags |= L2_LEARN;
+ else
+ disable_flags |= L2_LEARN;
+
+ if (enable_flags)
+ bd_set_flags (vm, bd_index, enable_flags, 1 /* enable */ );
+
+ if (disable_flags)
+ bd_set_flags (vm, bd_index, disable_flags, 0 /* disable */ );
+
+ bd_set_mac_age (vm, bd_index, mp->mac_age);
+ }
+ else
+ rv = bd_delete_bd_index (bdm, bd_id);
+
+ REPLY_MACRO (VL_API_BRIDGE_DOMAIN_ADD_DEL_REPLY);
+}
+
+static void
+vl_api_bridge_domain_details_t_handler (vl_api_bridge_domain_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void
+ vl_api_bridge_domain_sw_if_details_t_handler
+ (vl_api_bridge_domain_sw_if_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void
+send_bridge_domain_details (unix_shared_memory_queue_t * q,
+ l2_bridge_domain_t * bd_config,
+ u32 n_sw_ifs, u32 context)
+{
+ vl_api_bridge_domain_details_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_BRIDGE_DOMAIN_DETAILS);
+ mp->bd_id = ntohl (bd_config->bd_id);
+ mp->flood = bd_feature_flood (bd_config);
+ mp->uu_flood = bd_feature_uu_flood (bd_config);
+ mp->forward = bd_feature_forward (bd_config);
+ mp->learn = bd_feature_learn (bd_config);
+ mp->arp_term = bd_feature_arp_term (bd_config);
+ mp->bvi_sw_if_index = ntohl (bd_config->bvi_sw_if_index);
+ mp->mac_age = bd_config->mac_age;
+ mp->n_sw_ifs = ntohl (n_sw_ifs);
+ mp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+send_bd_sw_if_details (l2input_main_t * l2im,
+ unix_shared_memory_queue_t * q,
+ l2_flood_member_t * member, u32 bd_id, u32 context)
+{
+ vl_api_bridge_domain_sw_if_details_t *mp;
+ l2_input_config_t *input_cfg;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_BRIDGE_DOMAIN_SW_IF_DETAILS);
+ mp->bd_id = ntohl (bd_id);
+ mp->sw_if_index = ntohl (member->sw_if_index);
+ input_cfg = vec_elt_at_index (l2im->configs, member->sw_if_index);
+ mp->shg = input_cfg->shg;
+ mp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+vl_api_bridge_domain_dump_t_handler (vl_api_bridge_domain_dump_t * mp)
+{
+ bd_main_t *bdm = &bd_main;
+ l2input_main_t *l2im = &l2input_main;
+ unix_shared_memory_queue_t *q;
+ l2_bridge_domain_t *bd_config;
+ u32 bd_id, bd_index;
+ u32 end;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+
+ if (q == 0)
+ return;
+
+ bd_id = ntohl (mp->bd_id);
+
+ bd_index = (bd_id == ~0) ? 0 : bd_find_or_add_bd_index (bdm, bd_id);
+ end = (bd_id == ~0) ? vec_len (l2im->bd_configs) : bd_index + 1;
+ for (; bd_index < end; bd_index++)
+ {
+ bd_config = l2input_bd_config_from_index (l2im, bd_index);
+ /* skip dummy bd_id 0 */
+ if (bd_config && (bd_config->bd_id > 0))
+ {
+ u32 n_sw_ifs;
+ l2_flood_member_t *m;
+
+ n_sw_ifs = vec_len (bd_config->members);
+ send_bridge_domain_details (q, bd_config, n_sw_ifs, mp->context);
+
+ vec_foreach (m, bd_config->members)
+ {
+ send_bd_sw_if_details (l2im, q, m, bd_config->bd_id, mp->context);
+ }
+ }
+ }
+}
+
+static void
+vl_api_l2fib_add_del_t_handler (vl_api_l2fib_add_del_t * mp)
+{
+ bd_main_t *bdm = &bd_main;
+ l2input_main_t *l2im = &l2input_main;
+ vl_api_l2fib_add_del_reply_t *rmp;
+ int rv = 0;
+ u64 mac = 0;
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+ u32 bd_id = ntohl (mp->bd_id);
+ u32 bd_index;
+ u32 static_mac;
+ u32 filter_mac;
+ u32 bvi_mac;
+ uword *p;
+
+ mac = mp->mac;
+
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+ if (!p)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_ENTRY;
+ goto bad_sw_if_index;
+ }
+ bd_index = p[0];
+
+ if (mp->is_add)
+ {
+ filter_mac = mp->filter_mac ? 1 : 0;
+ if (filter_mac == 0)
+ {
+ VALIDATE_SW_IF_INDEX (mp);
+ if (vec_len (l2im->configs) <= sw_if_index)
+ {
+ rv = VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ goto bad_sw_if_index;
+ }
+ else
+ {
+ l2_input_config_t *config;
+ config = vec_elt_at_index (l2im->configs, sw_if_index);
+ if (config->bridge == 0)
+ {
+ rv = VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ goto bad_sw_if_index;
+ }
+ }
+ }
+ static_mac = mp->static_mac ? 1 : 0;
+ bvi_mac = mp->bvi_mac ? 1 : 0;
+ l2fib_add_entry (mac, bd_index, sw_if_index, static_mac, filter_mac,
+ bvi_mac);
+ }
+ else
+ {
+ l2fib_del_entry (mac, bd_index);
+ }
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_L2FIB_ADD_DEL_REPLY);
+}
+
+static void
+vl_api_l2_flags_t_handler (vl_api_l2_flags_t * mp)
+{
+ vl_api_l2_flags_reply_t *rmp;
+ int rv = 0;
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+ u32 flags = ntohl (mp->feature_bitmap);
+ u32 rbm = 0;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+#define _(a,b) \
+ if (flags & L2INPUT_FEAT_ ## a) \
+ rbm = l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_ ## a, mp->is_set);
+ foreach_l2input_feat;
+#undef _
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_L2_FLAGS_REPLY,
+ ({
+ rmp->resulting_feature_bitmap = ntohl(rbm);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_bridge_flags_t_handler (vl_api_bridge_flags_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ bd_main_t *bdm = &bd_main;
+ vl_api_bridge_flags_reply_t *rmp;
+ int rv = 0;
+ u32 bd_id = ntohl (mp->bd_id);
+ u32 bd_index;
+ u32 flags = ntohl (mp->feature_bitmap);
+ uword *p;
+
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+ if (p == 0)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_ENTRY;
+ goto out;
+ }
+
+ bd_index = p[0];
+
+ bd_set_flags (vm, bd_index, flags, mp->is_set);
+
+out:
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_BRIDGE_FLAGS_REPLY,
+ ({
+ rmp->resulting_feature_bitmap = ntohl(flags);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_bd_ip_mac_add_del_t_handler (vl_api_bd_ip_mac_add_del_t * mp)
+{
+ bd_main_t *bdm = &bd_main;
+ vl_api_bd_ip_mac_add_del_reply_t *rmp;
+ int rv = 0;
+ u32 bd_id = ntohl (mp->bd_id);
+ u32 bd_index;
+ uword *p;
+
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+ if (p == 0)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_ENTRY;
+ goto out;
+ }
+
+ bd_index = p[0];
+ if (bd_add_del_ip_mac (bd_index, mp->ip_address,
+ mp->mac_address, mp->is_ipv6, mp->is_add))
+ rv = VNET_API_ERROR_UNSPECIFIED;
+
+out:
+ REPLY_MACRO (VL_API_BD_IP_MAC_ADD_DEL_REPLY);
+}
+
+static void
+vl_api_create_vlan_subif_t_handler (vl_api_create_vlan_subif_t * mp)
+{
+ vl_api_create_vlan_subif_reply_t *rmp;
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 hw_if_index, sw_if_index = (u32) ~ 0;
+ vnet_hw_interface_t *hi;
+ int rv = 0;
+ u32 id;
+ vnet_sw_interface_t template;
+ uword *p;
+ vnet_interface_main_t *im = &vnm->interface_main;
+ u64 sup_and_sub_key;
+ u64 *kp;
+ unix_shared_memory_queue_t *q;
+ clib_error_t *error;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ hw_if_index = ntohl (mp->sw_if_index);
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+
+ id = ntohl (mp->vlan_id);
+ if (id == 0 || id > 4095)
+ {
+ rv = VNET_API_ERROR_INVALID_VLAN;
+ goto out;
+ }
+
+ sup_and_sub_key = ((u64) (hi->sw_if_index) << 32) | (u64) id;
+
+ p = hash_get_mem (im->sw_if_index_by_sup_and_sub, &sup_and_sub_key);
+ if (p)
+ {
+ rv = VNET_API_ERROR_VLAN_ALREADY_EXISTS;
+ goto out;
+ }
+
+ kp = clib_mem_alloc (sizeof (*kp));
+ *kp = sup_and_sub_key;
+
+ memset (&template, 0, sizeof (template));
+ template.type = VNET_SW_INTERFACE_TYPE_SUB;
+ template.sup_sw_if_index = hi->sw_if_index;
+ template.sub.id = id;
+ template.sub.eth.raw_flags = 0;
+ template.sub.eth.flags.one_tag = 1;
+ template.sub.eth.outer_vlan_id = id;
+ template.sub.eth.flags.exact_match = 1;
+
+ error = vnet_create_sw_interface (vnm, &template, &sw_if_index);
+ if (error)
+ {
+ clib_error_report (error);
+ rv = VNET_API_ERROR_INVALID_REGISTRATION;
+ goto out;
+ }
+ hash_set (hi->sub_interface_sw_if_index_by_id, id, sw_if_index);
+ hash_set_mem (im->sw_if_index_by_sup_and_sub, kp, sw_if_index);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+out:
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_CREATE_VLAN_SUBIF_REPLY);
+ rmp->context = mp->context;
+ rmp->retval = ntohl (rv);
+ rmp->sw_if_index = ntohl (sw_if_index);
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+vl_api_create_subif_t_handler (vl_api_create_subif_t * mp)
+{
+ vl_api_create_subif_reply_t *rmp;
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 sw_if_index = ~0;
+ int rv = 0;
+ u32 sub_id;
+ vnet_sw_interface_t *si;
+ vnet_hw_interface_t *hi;
+ vnet_sw_interface_t template;
+ uword *p;
+ vnet_interface_main_t *im = &vnm->interface_main;
+ u64 sup_and_sub_key;
+ u64 *kp;
+ clib_error_t *error;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ si = vnet_get_sup_sw_interface (vnm, ntohl (mp->sw_if_index));
+ hi = vnet_get_sup_hw_interface (vnm, ntohl (mp->sw_if_index));
+
+ if (hi->bond_info == VNET_HW_INTERFACE_BOND_INFO_SLAVE)
+ {
+ rv = VNET_API_ERROR_BOND_SLAVE_NOT_ALLOWED;
+ goto out;
+ }
+
+ sw_if_index = si->sw_if_index;
+ sub_id = ntohl (mp->sub_id);
+
+ sup_and_sub_key = ((u64) (sw_if_index) << 32) | (u64) sub_id;
+
+ p = hash_get_mem (im->sw_if_index_by_sup_and_sub, &sup_and_sub_key);
+ if (p)
+ {
+ if (CLIB_DEBUG > 0)
+ clib_warning ("sup sw_if_index %d, sub id %d already exists\n",
+ sw_if_index, sub_id);
+ rv = VNET_API_ERROR_SUBIF_ALREADY_EXISTS;
+ goto out;
+ }
+
+ kp = clib_mem_alloc (sizeof (*kp));
+ *kp = sup_and_sub_key;
+
+ memset (&template, 0, sizeof (template));
+ template.type = VNET_SW_INTERFACE_TYPE_SUB;
+ template.sup_sw_if_index = sw_if_index;
+ template.sub.id = sub_id;
+ template.sub.eth.flags.no_tags = mp->no_tags;
+ template.sub.eth.flags.one_tag = mp->one_tag;
+ template.sub.eth.flags.two_tags = mp->two_tags;
+ template.sub.eth.flags.dot1ad = mp->dot1ad;
+ template.sub.eth.flags.exact_match = mp->exact_match;
+ template.sub.eth.flags.default_sub = mp->default_sub;
+ template.sub.eth.flags.outer_vlan_id_any = mp->outer_vlan_id_any;
+ template.sub.eth.flags.inner_vlan_id_any = mp->inner_vlan_id_any;
+ template.sub.eth.outer_vlan_id = ntohs (mp->outer_vlan_id);
+ template.sub.eth.inner_vlan_id = ntohs (mp->inner_vlan_id);
+
+ error = vnet_create_sw_interface (vnm, &template, &sw_if_index);
+ if (error)
+ {
+ clib_error_report (error);
+ rv = VNET_API_ERROR_SUBIF_CREATE_FAILED;
+ goto out;
+ }
+
+ hash_set (hi->sub_interface_sw_if_index_by_id, sub_id, sw_if_index);
+ hash_set_mem (im->sw_if_index_by_sup_and_sub, kp, sw_if_index);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+out:
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_CREATE_SUBIF_REPLY,
+ ({
+ rmp->sw_if_index = ntohl(sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_mpls_tunnel_add_del_t_handler (vl_api_mpls_tunnel_add_del_t * mp)
+{
+ vl_api_mpls_tunnel_add_del_reply_t *rmp;
+ int rv = 0;
+ stats_main_t *sm = &stats_main;
+ u32 tunnel_sw_if_index;
+ int ii;
+
+ dslock (sm, 1 /* release hint */ , 5 /* tag */ );
+
+ if (mp->mt_is_add)
+ {
+ fib_route_path_t rpath, *rpaths = NULL;
+ mpls_label_t *label_stack = NULL;
+
+ memset (&rpath, 0, sizeof (rpath));
+
+ if (mp->mt_next_hop_proto_is_ip4)
+ {
+ rpath.frp_proto = FIB_PROTOCOL_IP4;
+ clib_memcpy (&rpath.frp_addr.ip4,
+ mp->mt_next_hop, sizeof (rpath.frp_addr.ip4));
+ }
+ else
+ {
+ rpath.frp_proto = FIB_PROTOCOL_IP6;
+ clib_memcpy (&rpath.frp_addr.ip6,
+ mp->mt_next_hop, sizeof (rpath.frp_addr.ip6));
+ }
+ rpath.frp_sw_if_index = ntohl (mp->mt_next_hop_sw_if_index);
+
+ for (ii = 0; ii < mp->mt_next_hop_n_out_labels; ii++)
+ vec_add1 (label_stack, ntohl (mp->mt_next_hop_out_label_stack[ii]));
+
+ vec_add1 (rpaths, rpath);
+
+ vnet_mpls_tunnel_add (rpaths, label_stack,
+ mp->mt_l2_only, &tunnel_sw_if_index);
+ vec_free (rpaths);
+ vec_free (label_stack);
+ }
+ else
+ {
+ tunnel_sw_if_index = ntohl (mp->mt_sw_if_index);
+ vnet_mpls_tunnel_del (tunnel_sw_if_index);
+ }
+
+ dsunlock (sm);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_MPLS_TUNNEL_ADD_DEL_REPLY,
+ ({
+ rmp->sw_if_index = ntohl(tunnel_sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_proxy_arp_add_del_t_handler (vl_api_proxy_arp_add_del_t * mp)
+{
+ vl_api_proxy_arp_add_del_reply_t *rmp;
+ u32 fib_index;
+ int rv;
+ ip4_main_t *im = &ip4_main;
+ stats_main_t *sm = &stats_main;
+ int vnet_proxy_arp_add_del (ip4_address_t * lo_addr,
+ ip4_address_t * hi_addr,
+ u32 fib_index, int is_del);
+ uword *p;
+
+ dslock (sm, 1 /* release hint */ , 6 /* tag */ );
+
+ p = hash_get (im->fib_index_by_table_id, ntohl (mp->vrf_id));
+
+ if (!p)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_FIB;
+ goto out;
+ }
+
+ fib_index = p[0];
+
+ rv = vnet_proxy_arp_add_del ((ip4_address_t *) mp->low_address,
+ (ip4_address_t *) mp->hi_address,
+ fib_index, mp->is_add == 0);
+
+out:
+ dsunlock (sm);
+ REPLY_MACRO (VL_API_PROXY_ARP_ADD_DEL_REPLY);
+}
+
+static void
+ vl_api_proxy_arp_intfc_enable_disable_t_handler
+ (vl_api_proxy_arp_intfc_enable_disable_t * mp)
+{
+ int rv = 0;
+ vnet_main_t *vnm = vnet_get_main ();
+ vl_api_proxy_arp_intfc_enable_disable_reply_t *rmp;
+ vnet_sw_interface_t *si;
+ u32 sw_if_index;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ if (pool_is_free_index (vnm->interface_main.sw_interfaces, sw_if_index))
+ {
+ rv = VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ goto out;
+ }
+
+ si = vnet_get_sw_interface (vnm, sw_if_index);
+
+ ASSERT (si);
+
+ if (mp->enable_disable)
+ si->flags |= VNET_SW_INTERFACE_FLAG_PROXY_ARP;
+ else
+ si->flags &= ~VNET_SW_INTERFACE_FLAG_PROXY_ARP;
+
+ BAD_SW_IF_INDEX_LABEL;
+
+out:
+ REPLY_MACRO (VL_API_PROXY_ARP_INTFC_ENABLE_DISABLE_REPLY);
+}
+
+static void
+vl_api_is_address_reachable_t_handler (vl_api_is_address_reachable_t * mp)
+{
+#if 0
+ vpe_main_t *rm = &vpe_main;
+ ip4_main_t *im4 = &ip4_main;
+ ip6_main_t *im6 = &ip6_main;
+ ip_lookup_main_t *lm;
+ union
+ {
+ ip4_address_t ip4;
+ ip6_address_t ip6;
+ } addr;
+ u32 adj_index, sw_if_index;
+ vl_api_is_address_reachable_t *rmp;
+ ip_adjacency_t *adj;
+ unix_shared_memory_queue_t *q;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ {
+ increment_missing_api_client_counter (rm->vlib_main);
+ return;
+ }
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ clib_memcpy (rmp, mp, sizeof (*rmp));
+
+ sw_if_index = mp->next_hop_sw_if_index;
+ clib_memcpy (&addr, mp->address, sizeof (addr));
+ if (mp->is_ipv6)
+ {
+ lm = &im6->lookup_main;
+ adj_index = ip6_fib_lookup (im6, sw_if_index, &addr.ip6);
+ }
+ else
+ {
+ lm = &im4->lookup_main;
+ // FIXME NOT an ADJ
+ adj_index = ip4_fib_lookup (im4, sw_if_index, &addr.ip4);
+ }
+ if (adj_index == ~0)
+ {
+ rmp->is_error = 1;
+ goto send;
+ }
+ adj = ip_get_adjacency (lm, adj_index);
+
+ if (adj->lookup_next_index == IP_LOOKUP_NEXT_REWRITE
+ && adj->rewrite_header.sw_if_index == sw_if_index)
+ {
+ rmp->is_known = 1;
+ }
+ else
+ {
+ if (adj->lookup_next_index == IP_LOOKUP_NEXT_ARP
+ && adj->rewrite_header.sw_if_index == sw_if_index)
+ {
+ if (mp->is_ipv6)
+ ip6_probe_neighbor (rm->vlib_main, &addr.ip6, sw_if_index);
+ else
+ ip4_probe_neighbor (rm->vlib_main, &addr.ip4, sw_if_index);
+ }
+ else if (adj->lookup_next_index == IP_LOOKUP_NEXT_DROP)
+ {
+ rmp->is_known = 1;
+ goto send;
+ }
+ rmp->is_known = 0;
+ }
+
+send:
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+#endif
+}
+
+static void
+ vl_api_sw_interface_set_mpls_enable_t_handler
+ (vl_api_sw_interface_set_mpls_enable_t * mp)
+{
+ vl_api_sw_interface_set_mpls_enable_reply_t *rmp;
+ int rv = 0;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ mpls_sw_interface_enable_disable (&mpls_main,
+ ntohl (mp->sw_if_index), mp->enable);
+
+ BAD_SW_IF_INDEX_LABEL;
+ REPLY_MACRO (VL_API_SW_INTERFACE_SET_MPLS_ENABLE_REPLY);
+}
+
+void
+send_oam_event (oam_target_t * t)
+{
+ vpe_api_main_t *vam = &vpe_api_main;
+ unix_shared_memory_queue_t *q;
+ vpe_client_registration_t *reg;
+ vl_api_oam_event_t *mp;
+
+ /* *INDENT-OFF* */
+ pool_foreach(reg, vam->oam_events_registrations,
+ ({
+ q = vl_api_client_index_to_input_queue (reg->client_index);
+ if (q)
+ {
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_OAM_EVENT);
+ clib_memcpy (mp->dst_address, &t->dst_address,
+ sizeof (mp->dst_address));
+ mp->state = t->state;
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+ }
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_oam_add_del_t_handler (vl_api_oam_add_del_t * mp)
+{
+ vl_api_oam_add_del_reply_t *rmp;
+ int rv;
+
+ rv = vpe_oam_add_del_target ((ip4_address_t *) mp->src_address,
+ (ip4_address_t *) mp->dst_address,
+ ntohl (mp->vrf_id), (int) (mp->is_add));
+
+ REPLY_MACRO (VL_API_OAM_ADD_DEL_REPLY);
+}
+
+static void
+vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp)
+{
+ stats_main_t *sm = &stats_main;
+ vnet_interface_main_t *im = sm->interface_main;
+ vl_api_vnet_summary_stats_reply_t *rmp;
+ vlib_combined_counter_main_t *cm;
+ vlib_counter_t v;
+ int i, which;
+ u64 total_pkts[VLIB_N_RX_TX];
+ u64 total_bytes[VLIB_N_RX_TX];
+
+ unix_shared_memory_queue_t *q =
+ vl_api_client_index_to_input_queue (mp->client_index);
+
+ if (!q)
+ return;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_VNET_SUMMARY_STATS_REPLY);
+ rmp->context = mp->context;
+ rmp->retval = 0;
+
+ memset (total_pkts, 0, sizeof (total_pkts));
+ memset (total_bytes, 0, sizeof (total_bytes));
+
+ vnet_interface_counter_lock (im);
+
+ vec_foreach (cm, im->combined_sw_if_counters)
+ {
+ which = cm - im->combined_sw_if_counters;
+
+ for (i = 0; i < vec_len (cm->maxi); i++)
+ {
+ vlib_get_combined_counter (cm, i, &v);
+ total_pkts[which] += v.packets;
+ total_bytes[which] += v.bytes;
+ }
+ }
+ vnet_interface_counter_unlock (im);
+
+ rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]);
+ rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]);
+ rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]);
+ rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]);
+ rmp->vector_rate =
+ clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ ip4_address_t address;
+ u32 address_length: 6;
+ u32 index:26;
+}) ip4_route_t;
+/* *INDENT-ON* */
+
+static int
+ip4_reset_fib_t_handler (vl_api_reset_fib_t * mp)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ ip4_main_t *im4 = &ip4_main;
+ static u32 *sw_if_indices_to_shut;
+ stats_main_t *sm = &stats_main;
+ fib_table_t *fib_table;
+ ip4_fib_t *fib;
+ u32 sw_if_index;
+ int i;
+ int rv = VNET_API_ERROR_NO_SUCH_FIB;
+ u32 target_fib_id = ntohl (mp->vrf_id);
+
+ dslock (sm, 1 /* release hint */ , 8 /* tag */ );
+
+ /* *INDENT-OFF* */
+ pool_foreach (fib_table, im4->fibs,
+ ({
+ fib = &fib_table->v4;
+ vnet_sw_interface_t * si;
+
+ if (fib->table_id != target_fib_id)
+ continue;
+
+ /* remove any mpls encap/decap labels */
+ mpls_fib_reset_labels (fib->table_id);
+
+ /* remove any proxy arps in this fib */
+ vnet_proxy_arp_fib_reset (fib->table_id);
+
+ /* Set the flow hash for this fib to the default */
+ vnet_set_ip4_flow_hash (fib->table_id, IP_FLOW_HASH_DEFAULT);
+
+ vec_reset_length (sw_if_indices_to_shut);
+
+ /* Shut down interfaces in this FIB / clean out intfc routes */
+ pool_foreach (si, im->sw_interfaces,
+ ({
+ u32 sw_if_index = si->sw_if_index;
+
+ if (sw_if_index < vec_len (im4->fib_index_by_sw_if_index)
+ && (im4->fib_index_by_sw_if_index[si->sw_if_index] ==
+ fib->index))
+ vec_add1 (sw_if_indices_to_shut, si->sw_if_index);
+ }));
+
+ for (i = 0; i < vec_len (sw_if_indices_to_shut); i++) {
+ sw_if_index = sw_if_indices_to_shut[i];
+ // vec_foreach (sw_if_index, sw_if_indices_to_shut) {
+
+ u32 flags = vnet_sw_interface_get_flags (vnm, sw_if_index);
+ flags &= ~(VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ vnet_sw_interface_set_flags (vnm, sw_if_index, flags);
+ }
+
+ fib_table_flush(fib->index, FIB_PROTOCOL_IP4, FIB_SOURCE_API);
+ fib_table_flush(fib->index, FIB_PROTOCOL_IP4, FIB_SOURCE_INTERFACE);
+
+ rv = 0;
+ break;
+ })); /* pool_foreach (fib) */
+ /* *INDENT-ON* */
+
+ dsunlock (sm);
+ return rv;
+}
+
+static int
+ip6_reset_fib_t_handler (vl_api_reset_fib_t * mp)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ ip6_main_t *im6 = &ip6_main;
+ stats_main_t *sm = &stats_main;
+ static u32 *sw_if_indices_to_shut;
+ fib_table_t *fib_table;
+ ip6_fib_t *fib;
+ u32 sw_if_index;
+ int i;
+ int rv = VNET_API_ERROR_NO_SUCH_FIB;
+ u32 target_fib_id = ntohl (mp->vrf_id);
+
+ dslock (sm, 1 /* release hint */ , 9 /* tag */ );
+
+ /* *INDENT-OFF* */
+ pool_foreach (fib_table, im6->fibs,
+ ({
+ vnet_sw_interface_t * si;
+ fib = &(fib_table->v6);
+
+ if (fib->table_id != target_fib_id)
+ continue;
+
+ vec_reset_length (sw_if_indices_to_shut);
+
+ /* Shut down interfaces in this FIB / clean out intfc routes */
+ pool_foreach (si, im->sw_interfaces,
+ ({
+ if (im6->fib_index_by_sw_if_index[si->sw_if_index] ==
+ fib->index)
+ vec_add1 (sw_if_indices_to_shut, si->sw_if_index);
+ }));
+
+ for (i = 0; i < vec_len (sw_if_indices_to_shut); i++) {
+ sw_if_index = sw_if_indices_to_shut[i];
+ // vec_foreach (sw_if_index, sw_if_indices_to_shut) {
+
+ u32 flags = vnet_sw_interface_get_flags (vnm, sw_if_index);
+ flags &= ~(VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ vnet_sw_interface_set_flags (vnm, sw_if_index, flags);
+ }
+
+ fib_table_flush(fib->index, FIB_PROTOCOL_IP6, FIB_SOURCE_API);
+ fib_table_flush(fib->index, FIB_PROTOCOL_IP6, FIB_SOURCE_INTERFACE);
+
+ rv = 0;
+ break;
+ })); /* pool_foreach (fib) */
+ /* *INDENT-ON* */
+
+ dsunlock (sm);
+ return rv;
+}
+
+static void
+vl_api_reset_fib_t_handler (vl_api_reset_fib_t * mp)
+{
+ int rv;
+ vl_api_reset_fib_reply_t *rmp;
+
+ if (mp->is_ipv6)
+ rv = ip6_reset_fib_t_handler (mp);
+ else
+ rv = ip4_reset_fib_t_handler (mp);
+
+ REPLY_MACRO (VL_API_RESET_FIB_REPLY);
+}
+
+
+static void
+dhcpv4_proxy_config (vl_api_dhcp_proxy_config_t * mp)
+{
+ vl_api_dhcp_proxy_config_reply_t *rmp;
+ int rv;
+
+ rv = dhcp_proxy_set_server ((ip4_address_t *) (&mp->dhcp_server),
+ (ip4_address_t *) (&mp->dhcp_src_address),
+ (u32) ntohl (mp->vrf_id),
+ (int) mp->insert_circuit_id,
+ (int) (mp->is_add == 0));
+
+ REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_REPLY);
+}
+
+
+static void
+dhcpv6_proxy_config (vl_api_dhcp_proxy_config_t * mp)
+{
+ vl_api_dhcp_proxy_config_reply_t *rmp;
+ int rv = -1;
+
+ rv = dhcpv6_proxy_set_server ((ip6_address_t *) (&mp->dhcp_server),
+ (ip6_address_t *) (&mp->dhcp_src_address),
+ (u32) ntohl (mp->vrf_id),
+ (int) mp->insert_circuit_id,
+ (int) (mp->is_add == 0));
+
+ REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_REPLY);
+}
+
+static void
+dhcpv4_proxy_config_2 (vl_api_dhcp_proxy_config_2_t * mp)
+{
+ vl_api_dhcp_proxy_config_reply_t *rmp;
+ int rv;
+
+ rv = dhcp_proxy_set_server_2 ((ip4_address_t *) (&mp->dhcp_server),
+ (ip4_address_t *) (&mp->dhcp_src_address),
+ (u32) ntohl (mp->rx_vrf_id),
+ (u32) ntohl (mp->server_vrf_id),
+ (int) mp->insert_circuit_id,
+ (int) (mp->is_add == 0));
+
+ REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_2_REPLY);
+}
+
+
+static void
+dhcpv6_proxy_config_2 (vl_api_dhcp_proxy_config_2_t * mp)
+{
+ vl_api_dhcp_proxy_config_reply_t *rmp;
+ int rv = -1;
+
+ rv = dhcpv6_proxy_set_server_2 ((ip6_address_t *) (&mp->dhcp_server),
+ (ip6_address_t *) (&mp->dhcp_src_address),
+ (u32) ntohl (mp->rx_vrf_id),
+ (u32) ntohl (mp->server_vrf_id),
+ (int) mp->insert_circuit_id,
+ (int) (mp->is_add == 0));
+
+ REPLY_MACRO (VL_API_DHCP_PROXY_CONFIG_2_REPLY);
+}
+
+
+static void
+vl_api_dhcp_proxy_set_vss_t_handler (vl_api_dhcp_proxy_set_vss_t * mp)
+{
+ vl_api_dhcp_proxy_set_vss_reply_t *rmp;
+ int rv;
+ if (!mp->is_ipv6)
+ rv = dhcp_proxy_set_option82_vss (ntohl (mp->tbl_id),
+ ntohl (mp->oui),
+ ntohl (mp->fib_id),
+ (int) mp->is_add == 0);
+ else
+ rv = dhcpv6_proxy_set_vss (ntohl (mp->tbl_id),
+ ntohl (mp->oui),
+ ntohl (mp->fib_id), (int) mp->is_add == 0);
+
+ REPLY_MACRO (VL_API_DHCP_PROXY_SET_VSS_REPLY);
+}
+
+
+static void vl_api_dhcp_proxy_config_t_handler
+ (vl_api_dhcp_proxy_config_t * mp)
+{
+ if (mp->is_ipv6 == 0)
+ dhcpv4_proxy_config (mp);
+ else
+ dhcpv6_proxy_config (mp);
+}
+
+static void vl_api_dhcp_proxy_config_2_t_handler
+ (vl_api_dhcp_proxy_config_2_t * mp)
+{
+ if (mp->is_ipv6 == 0)
+ dhcpv4_proxy_config_2 (mp);
+ else
+ dhcpv6_proxy_config_2 (mp);
+}
+
+void
+dhcp_compl_event_callback (u32 client_index, u32 pid, u8 * hostname,
+ u8 is_ipv6, u8 * host_address, u8 * router_address,
+ u8 * host_mac)
+{
+ unix_shared_memory_queue_t *q;
+ vl_api_dhcp_compl_event_t *mp;
+
+ q = vl_api_client_index_to_input_queue (client_index);
+ if (!q)
+ return;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ mp->client_index = client_index;
+ mp->pid = pid;
+ mp->is_ipv6 = is_ipv6;
+ clib_memcpy (&mp->hostname, hostname, vec_len (hostname));
+ mp->hostname[vec_len (hostname) + 1] = '\n';
+ clib_memcpy (&mp->host_address[0], host_address, 16);
+ clib_memcpy (&mp->router_address[0], router_address, 16);
+
+ if (NULL != host_mac)
+ clib_memcpy (&mp->host_mac[0], host_mac, 6);
+
+ mp->_vl_msg_id = ntohs (VL_API_DHCP_COMPL_EVENT);
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void vl_api_dhcp_client_config_t_handler
+ (vl_api_dhcp_client_config_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_dhcp_client_config_reply_t *rmp;
+ int rv = 0;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ rv = dhcp_client_config (vm, ntohl (mp->sw_if_index),
+ mp->hostname, mp->is_add, mp->client_index,
+ mp->want_dhcp_event ? dhcp_compl_event_callback :
+ NULL, mp->pid);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_DHCP_CLIENT_CONFIG_REPLY);
+}
+
+static void
+vl_api_create_loopback_t_handler (vl_api_create_loopback_t * mp)
+{
+ vl_api_create_loopback_reply_t *rmp;
+ u32 sw_if_index;
+ int rv;
+
+ rv = vnet_create_loopback_interface (&sw_if_index, mp->mac_address);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_CREATE_LOOPBACK_REPLY,
+ ({
+ rmp->sw_if_index = ntohl (sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_delete_loopback_t_handler (vl_api_delete_loopback_t * mp)
+{
+ vl_api_delete_loopback_reply_t *rmp;
+ u32 sw_if_index;
+ int rv;
+
+ sw_if_index = ntohl (mp->sw_if_index);
+ rv = vnet_delete_loopback_interface (sw_if_index);
+
+ REPLY_MACRO (VL_API_DELETE_LOOPBACK_REPLY);
+}
+
+static void
+vl_api_control_ping_t_handler (vl_api_control_ping_t * mp)
+{
+ vl_api_control_ping_reply_t *rmp;
+ int rv = 0;
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_CONTROL_PING_REPLY,
+ ({
+ rmp->vpe_pid = ntohl (getpid());
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+shmem_cli_output (uword arg, u8 * buffer, uword buffer_bytes)
+{
+ u8 **shmem_vecp = (u8 **) arg;
+ u8 *shmem_vec;
+ void *oldheap;
+ api_main_t *am = &api_main;
+ u32 offset;
+
+ shmem_vec = *shmem_vecp;
+
+ offset = vec_len (shmem_vec);
+
+ pthread_mutex_lock (&am->vlib_rp->mutex);
+ oldheap = svm_push_data_heap (am->vlib_rp);
+
+ vec_validate (shmem_vec, offset + buffer_bytes - 1);
+
+ clib_memcpy (shmem_vec + offset, buffer, buffer_bytes);
+
+ svm_pop_heap (oldheap);
+ pthread_mutex_unlock (&am->vlib_rp->mutex);
+
+ *shmem_vecp = shmem_vec;
+}
+
+
+static void
+vl_api_cli_request_t_handler (vl_api_cli_request_t * mp)
+{
+ vl_api_cli_reply_t *rp;
+ unix_shared_memory_queue_t *q;
+ vlib_main_t *vm = vlib_get_main ();
+ api_main_t *am = &api_main;
+ unformat_input_t input;
+ u8 *shmem_vec = 0;
+ void *oldheap;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ rp = vl_msg_api_alloc (sizeof (*rp));
+ rp->_vl_msg_id = ntohs (VL_API_CLI_REPLY);
+ rp->context = mp->context;
+
+ unformat_init_vector (&input, (u8 *) (uword) mp->cmd_in_shmem);
+
+ vlib_cli_input (vm, &input, shmem_cli_output, (uword) & shmem_vec);
+
+ pthread_mutex_lock (&am->vlib_rp->mutex);
+ oldheap = svm_push_data_heap (am->vlib_rp);
+
+ vec_add1 (shmem_vec, 0);
+
+ svm_pop_heap (oldheap);
+ pthread_mutex_unlock (&am->vlib_rp->mutex);
+
+ rp->reply_in_shmem = (uword) shmem_vec;
+
+ vl_msg_api_send_shmem (q, (u8 *) & rp);
+}
+
+static void
+inband_cli_output (uword arg, u8 * buffer, uword buffer_bytes)
+{
+ u8 **mem_vecp = (u8 **) arg;
+ u8 *mem_vec = *mem_vecp;
+ u32 offset = vec_len (mem_vec);
+
+ vec_validate (mem_vec, offset + buffer_bytes - 1);
+ clib_memcpy (mem_vec + offset, buffer, buffer_bytes);
+ *mem_vecp = mem_vec;
+}
+
+static void
+vl_api_cli_inband_t_handler (vl_api_cli_inband_t * mp)
+{
+ vl_api_cli_inband_reply_t *rmp;
+ int rv = 0;
+ unix_shared_memory_queue_t *q;
+ vlib_main_t *vm = vlib_get_main ();
+ unformat_input_t input;
+ u8 *out_vec = 0;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ unformat_init_string (&input, (char *) mp->cmd, ntohl (mp->length));
+ vlib_cli_input (vm, &input, inband_cli_output, (uword) & out_vec);
+
+ u32 len = vec_len (out_vec);
+ /* *INDENT-OFF* */
+ REPLY_MACRO3(VL_API_CLI_INBAND_REPLY, len,
+ ({
+ rmp->length = htonl (len);
+ clib_memcpy (rmp->reply, out_vec, len);
+ }));
+ /* *INDENT-ON* */
+ vec_free (out_vec);
+}
+
+static void
+vl_api_set_arp_neighbor_limit_t_handler (vl_api_set_arp_neighbor_limit_t * mp)
+{
+ int rv;
+ vl_api_set_arp_neighbor_limit_reply_t *rmp;
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error;
+
+ vnm->api_errno = 0;
+
+ if (mp->is_ipv6)
+ error = ip6_set_neighbor_limit (ntohl (mp->arp_neighbor_limit));
+ else
+ error = ip4_set_arp_limit (ntohl (mp->arp_neighbor_limit));
+
+ if (error)
+ {
+ clib_error_report (error);
+ rv = VNET_API_ERROR_UNSPECIFIED;
+ }
+ else
+ {
+ rv = vnm->api_errno;
+ }
+
+ REPLY_MACRO (VL_API_SET_ARP_NEIGHBOR_LIMIT_REPLY);
+}
+
+static void vl_api_sr_tunnel_add_del_t_handler
+ (vl_api_sr_tunnel_add_del_t * mp)
+{
+#if IP6SR == 0
+ clib_warning ("unimplemented");
+#else
+ ip6_sr_add_del_tunnel_args_t _a, *a = &_a;
+ int rv = 0;
+ vl_api_sr_tunnel_add_del_reply_t *rmp;
+ ip6_address_t *segments = 0, *seg;
+ ip6_address_t *tags = 0, *tag;
+ ip6_address_t *this_address;
+ int i;
+
+ if (mp->n_segments == 0)
+ {
+ rv = -11;
+ goto out;
+ }
+
+ memset (a, 0, sizeof (*a));
+ a->src_address = (ip6_address_t *) & mp->src_address;
+ a->dst_address = (ip6_address_t *) & mp->dst_address;
+ a->dst_mask_width = mp->dst_mask_width;
+ a->flags_net_byte_order = mp->flags_net_byte_order;
+ a->is_del = (mp->is_add == 0);
+ a->rx_table_id = ntohl (mp->outer_vrf_id);
+ a->tx_table_id = ntohl (mp->inner_vrf_id);
+
+ a->name = format (0, "%s", mp->name);
+ if (!(vec_len (a->name)))
+ a->name = 0;
+
+ a->policy_name = format (0, "%s", mp->policy_name);
+ if (!(vec_len (a->policy_name)))
+ a->policy_name = 0;
+
+ /* Yank segments and tags out of the API message */
+ this_address = (ip6_address_t *) mp->segs_and_tags;
+ for (i = 0; i < mp->n_segments; i++)
+ {
+ vec_add2 (segments, seg, 1);
+ clib_memcpy (seg->as_u8, this_address->as_u8, sizeof (*this_address));
+ this_address++;
+ }
+ for (i = 0; i < mp->n_tags; i++)
+ {
+ vec_add2 (tags, tag, 1);
+ clib_memcpy (tag->as_u8, this_address->as_u8, sizeof (*this_address));
+ this_address++;
+ }
+
+ a->segments = segments;
+ a->tags = tags;
+
+ rv = ip6_sr_add_del_tunnel (a);
+
+out:
+
+ REPLY_MACRO (VL_API_SR_TUNNEL_ADD_DEL_REPLY);
+#endif
+}
+
+static void vl_api_sr_policy_add_del_t_handler
+ (vl_api_sr_policy_add_del_t * mp)
+{
+#if IP6SR == 0
+ clib_warning ("unimplemented");
+#else
+ ip6_sr_add_del_policy_args_t _a, *a = &_a;
+ int rv = 0;
+ vl_api_sr_policy_add_del_reply_t *rmp;
+ int i;
+
+ memset (a, 0, sizeof (*a));
+ a->is_del = (mp->is_add == 0);
+
+ a->name = format (0, "%s", mp->name);
+ if (!(vec_len (a->name)))
+ {
+ rv = VNET_API_ERROR_NO_SUCH_NODE2;
+ goto out;
+ }
+
+ if (!(mp->tunnel_names[0]))
+ {
+ rv = VNET_API_ERROR_NO_SUCH_NODE2;
+ goto out;
+ }
+
+ // start deserializing tunnel_names
+ int num_tunnels = mp->tunnel_names[0]; //number of tunnels
+ u8 *deser_tun_names = mp->tunnel_names;
+ deser_tun_names += 1; //moving along
+
+ u8 *tun_name = 0;
+ int tun_name_len = 0;
+
+ for (i = 0; i < num_tunnels; i++)
+ {
+ tun_name_len = *deser_tun_names;
+ deser_tun_names += 1;
+ vec_resize (tun_name, tun_name_len);
+ memcpy (tun_name, deser_tun_names, tun_name_len);
+ vec_add1 (a->tunnel_names, tun_name);
+ deser_tun_names += tun_name_len;
+ tun_name = 0;
+ }
+
+ rv = ip6_sr_add_del_policy (a);
+
+out:
+
+ REPLY_MACRO (VL_API_SR_POLICY_ADD_DEL_REPLY);
+#endif
+}
+
+static void vl_api_sr_multicast_map_add_del_t_handler
+ (vl_api_sr_multicast_map_add_del_t * mp)
+{
+#if IP6SR == 0
+ clib_warning ("unimplemented");
+#else
+ ip6_sr_add_del_multicastmap_args_t _a, *a = &_a;
+ int rv = 0;
+ vl_api_sr_multicast_map_add_del_reply_t *rmp;
+
+ memset (a, 0, sizeof (*a));
+ a->is_del = (mp->is_add == 0);
+
+ a->multicast_address = (ip6_address_t *) & mp->multicast_address;
+ a->policy_name = format (0, "%s", mp->policy_name);
+
+ if (a->multicast_address == 0)
+ {
+ rv = -1;
+ goto out;
+ }
+
+ if (!(a->policy_name))
+ {
+ rv = -2;
+ goto out;
+ }
+
+#if DPDK > 0 /* Cannot call replicate without DPDK */
+ rv = ip6_sr_add_del_multicastmap (a);
+#else
+ clib_warning ("multicast replication without DPDK not implemented");
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif /* DPDK */
+
+out:
+
+ REPLY_MACRO (VL_API_SR_MULTICAST_MAP_ADD_DEL_REPLY);
+#endif
+}
+
+#define foreach_classify_add_del_table_field \
+_(table_index) \
+_(nbuckets) \
+_(memory_size) \
+_(skip_n_vectors) \
+_(match_n_vectors) \
+_(next_table_index) \
+_(miss_next_index) \
+_(current_data_flag) \
+_(current_data_offset)
+
+static void vl_api_classify_add_del_table_t_handler
+ (vl_api_classify_add_del_table_t * mp)
+{
+ vl_api_classify_add_del_table_reply_t *rmp;
+ vnet_classify_main_t *cm = &vnet_classify_main;
+ vnet_classify_table_t *t;
+ int rv;
+
+#define _(a) u32 a;
+ foreach_classify_add_del_table_field;
+#undef _
+
+#define _(a) a = ntohl(mp->a);
+ foreach_classify_add_del_table_field;
+#undef _
+
+ /* The underlying API fails silently, on purpose, so check here */
+ if (mp->is_add == 0) /* delete */
+ {
+ if (pool_is_free_index (cm->tables, table_index))
+ {
+ rv = VNET_API_ERROR_NO_SUCH_TABLE;
+ goto out;
+ }
+ }
+ else /* add or update */
+ {
+ if (table_index != ~0 && pool_is_free_index (cm->tables, table_index))
+ table_index = ~0;
+ }
+
+ rv = vnet_classify_add_del_table
+ (cm, mp->mask, nbuckets, memory_size,
+ skip_n_vectors, match_n_vectors,
+ next_table_index, miss_next_index, &table_index,
+ current_data_flag, current_data_offset, mp->is_add, mp->del_chain);
+
+out:
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_CLASSIFY_ADD_DEL_TABLE_REPLY,
+ ({
+ if (rv == 0 && mp->is_add)
+ {
+ t = pool_elt_at_index (cm->tables, table_index);
+ rmp->skip_n_vectors = ntohl(t->skip_n_vectors);
+ rmp->match_n_vectors = ntohl(t->match_n_vectors);
+ rmp->new_table_index = ntohl(table_index);
+ }
+ else
+ {
+ rmp->skip_n_vectors = ~0;
+ rmp->match_n_vectors = ~0;
+ rmp->new_table_index = ~0;
+ }
+ }));
+ /* *INDENT-ON* */
+}
+
+static void vl_api_classify_add_del_session_t_handler
+ (vl_api_classify_add_del_session_t * mp)
+{
+ vnet_classify_main_t *cm = &vnet_classify_main;
+ vl_api_classify_add_del_session_reply_t *rmp;
+ int rv;
+ u32 table_index, hit_next_index, opaque_index, metadata;
+ i32 advance;
+ u8 action;
+
+ table_index = ntohl (mp->table_index);
+ hit_next_index = ntohl (mp->hit_next_index);
+ opaque_index = ntohl (mp->opaque_index);
+ advance = ntohl (mp->advance);
+ action = mp->action;
+ metadata = ntohl (mp->metadata);
+
+ rv = vnet_classify_add_del_session
+ (cm, table_index, mp->match, hit_next_index, opaque_index,
+ advance, action, metadata, mp->is_add);
+
+ REPLY_MACRO (VL_API_CLASSIFY_ADD_DEL_SESSION_REPLY);
+}
+
+static void vl_api_classify_set_interface_ip_table_t_handler
+ (vl_api_classify_set_interface_ip_table_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_classify_set_interface_ip_table_reply_t *rmp;
+ int rv;
+ u32 table_index, sw_if_index;
+
+ table_index = ntohl (mp->table_index);
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ if (mp->is_ipv6)
+ rv = vnet_set_ip6_classify_intfc (vm, sw_if_index, table_index);
+ else
+ rv = vnet_set_ip4_classify_intfc (vm, sw_if_index, table_index);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_CLASSIFY_SET_INTERFACE_IP_TABLE_REPLY);
+}
+
+static void vl_api_classify_set_interface_l2_tables_t_handler
+ (vl_api_classify_set_interface_l2_tables_t * mp)
+{
+ vl_api_classify_set_interface_l2_tables_reply_t *rmp;
+ int rv;
+ u32 sw_if_index, ip4_table_index, ip6_table_index, other_table_index;
+ int enable;
+
+ ip4_table_index = ntohl (mp->ip4_table_index);
+ ip6_table_index = ntohl (mp->ip6_table_index);
+ other_table_index = ntohl (mp->other_table_index);
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ if (mp->is_input)
+ rv = vnet_l2_input_classify_set_tables (sw_if_index, ip4_table_index,
+ ip6_table_index,
+ other_table_index);
+ else
+ rv = vnet_l2_output_classify_set_tables (sw_if_index, ip4_table_index,
+ ip6_table_index,
+ other_table_index);
+
+ if (rv == 0)
+ {
+ if (ip4_table_index != ~0 || ip6_table_index != ~0
+ || other_table_index != ~0)
+ enable = 1;
+ else
+ enable = 0;
+
+ if (mp->is_input)
+ vnet_l2_input_classify_enable_disable (sw_if_index, enable);
+ else
+ vnet_l2_output_classify_enable_disable (sw_if_index, enable);
+ }
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_CLASSIFY_SET_INTERFACE_L2_TABLES_REPLY);
+}
+
+static void
+vl_api_l2_fib_clear_table_t_handler (vl_api_l2_fib_clear_table_t * mp)
+{
+ int rv = 0;
+ vl_api_l2_fib_clear_table_reply_t *rmp;
+
+ /* DAW-FIXME: This API should only clear non-static l2fib entries, but
+ * that is not currently implemented. When that TODO is fixed
+ * this call should be changed to pass 1 instead of 0.
+ */
+ l2fib_clear_table (0);
+
+ REPLY_MACRO (VL_API_L2_FIB_CLEAR_TABLE_REPLY);
+}
+
+extern void l2_efp_filter_configure (vnet_main_t * vnet_main,
+ u32 sw_if_index, u32 enable);
+
+static void
+vl_api_l2_interface_efp_filter_t_handler (vl_api_l2_interface_efp_filter_t *
+ mp)
+{
+ int rv;
+ vl_api_l2_interface_efp_filter_reply_t *rmp;
+ vnet_main_t *vnm = vnet_get_main ();
+
+ // enable/disable the feature
+ l2_efp_filter_configure (vnm, mp->sw_if_index, mp->enable_disable);
+ rv = vnm->api_errno;
+
+ REPLY_MACRO (VL_API_L2_INTERFACE_EFP_FILTER_REPLY);
+}
+
+static void
+ vl_api_l2_interface_vlan_tag_rewrite_t_handler
+ (vl_api_l2_interface_vlan_tag_rewrite_t * mp)
+{
+ int rv = 0;
+ vl_api_l2_interface_vlan_tag_rewrite_reply_t *rmp;
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_main_t *vm = vlib_get_main ();
+ u32 vtr_op;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ vtr_op = ntohl (mp->vtr_op);
+
+ /* The L2 code is unsuspicious */
+ switch (vtr_op)
+ {
+ case L2_VTR_DISABLED:
+ case L2_VTR_PUSH_1:
+ case L2_VTR_PUSH_2:
+ case L2_VTR_POP_1:
+ case L2_VTR_POP_2:
+ case L2_VTR_TRANSLATE_1_1:
+ case L2_VTR_TRANSLATE_1_2:
+ case L2_VTR_TRANSLATE_2_1:
+ case L2_VTR_TRANSLATE_2_2:
+ break;
+
+ default:
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto bad_sw_if_index;
+ }
+
+ rv = l2vtr_configure (vm, vnm, ntohl (mp->sw_if_index), vtr_op,
+ ntohl (mp->push_dot1q), ntohl (mp->tag1),
+ ntohl (mp->tag2));
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_L2_INTERFACE_VLAN_TAG_REWRITE_REPLY);
+}
+
+static void
+vl_api_l2_fib_table_entry_t_handler (vl_api_l2_fib_table_entry_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void
+send_l2fib_table_entry (vpe_api_main_t * am,
+ unix_shared_memory_queue_t * q,
+ l2fib_entry_key_t * l2fe_key,
+ l2fib_entry_result_t * l2fe_res, u32 context)
+{
+ vl_api_l2_fib_table_entry_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_L2_FIB_TABLE_ENTRY);
+
+ mp->bd_id =
+ ntohl (l2input_main.bd_configs[l2fe_key->fields.bd_index].bd_id);
+
+ mp->mac = l2fib_make_key (l2fe_key->fields.mac, 0);
+ mp->sw_if_index = ntohl (l2fe_res->fields.sw_if_index);
+ mp->static_mac = l2fe_res->fields.static_mac;
+ mp->filter_mac = l2fe_res->fields.filter;
+ mp->bvi_mac = l2fe_res->fields.bvi;
+ mp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+vl_api_l2_fib_table_dump_t_handler (vl_api_l2_fib_table_dump_t * mp)
+{
+ vpe_api_main_t *am = &vpe_api_main;
+ bd_main_t *bdm = &bd_main;
+ l2fib_entry_key_t *l2fe_key = NULL;
+ l2fib_entry_result_t *l2fe_res = NULL;
+ u32 ni, bd_id = ntohl (mp->bd_id);
+ u32 bd_index;
+ unix_shared_memory_queue_t *q;
+ uword *p;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ /* see l2fib_table_dump: ~0 means "any" */
+ if (bd_id == ~0)
+ bd_index = ~0;
+ else
+ {
+ p = hash_get (bdm->bd_index_by_bd_id, bd_id);
+ if (p == 0)
+ return;
+
+ bd_index = p[0];
+ }
+
+ l2fib_table_dump (bd_index, &l2fe_key, &l2fe_res);
+
+ vec_foreach_index (ni, l2fe_key)
+ {
+ send_l2fib_table_entry (am, q, vec_elt_at_index (l2fe_key, ni),
+ vec_elt_at_index (l2fe_res, ni), mp->context);
+ }
+ vec_free (l2fe_key);
+ vec_free (l2fe_res);
+}
+
+static void
+vl_api_show_version_t_handler (vl_api_show_version_t * mp)
+{
+ vl_api_show_version_reply_t *rmp;
+ int rv = 0;
+ char *vpe_api_get_build_directory (void);
+ char *vpe_api_get_version (void);
+ char *vpe_api_get_build_date (void);
+
+ unix_shared_memory_queue_t *q =
+ vl_api_client_index_to_input_queue (mp->client_index);
+
+ if (!q)
+ return;
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_SHOW_VERSION_REPLY,
+ ({
+ strncpy ((char *) rmp->program, "vpe", ARRAY_LEN(rmp->program)-1);
+ strncpy ((char *) rmp->build_directory, vpe_api_get_build_directory(),
+ ARRAY_LEN(rmp->build_directory)-1);
+ strncpy ((char *) rmp->version, vpe_api_get_version(),
+ ARRAY_LEN(rmp->version)-1);
+ strncpy ((char *) rmp->build_date, vpe_api_get_build_date(),
+ ARRAY_LEN(rmp->build_date)-1);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_get_node_index_t_handler (vl_api_get_node_index_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_get_node_index_reply_t *rmp;
+ vlib_node_t *n;
+ int rv = 0;
+ u32 node_index = ~0;
+
+ n = vlib_get_node_by_name (vm, mp->node_name);
+
+ if (n == 0)
+ rv = VNET_API_ERROR_NO_SUCH_NODE;
+ else
+ node_index = n->index;
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_GET_NODE_INDEX_REPLY,
+ ({
+ rmp->node_index = ntohl(node_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_get_next_index_t_handler (vl_api_get_next_index_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_get_next_index_reply_t *rmp;
+ vlib_node_t *node, *next_node;
+ int rv = 0;
+ u32 next_node_index = ~0, next_index = ~0;
+ uword *p;
+
+ node = vlib_get_node_by_name (vm, mp->node_name);
+
+ if (node == 0)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_NODE;
+ goto out;
+ }
+
+ next_node = vlib_get_node_by_name (vm, mp->next_name);
+
+ if (next_node == 0)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_NODE2;
+ goto out;
+ }
+ else
+ next_node_index = next_node->index;
+
+ p = hash_get (node->next_slot_by_node, next_node_index);
+
+ if (p == 0)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_ENTRY;
+ goto out;
+ }
+ else
+ next_index = p[0];
+
+out:
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_GET_NEXT_INDEX_REPLY,
+ ({
+ rmp->next_index = ntohl(next_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_add_node_next_t_handler (vl_api_add_node_next_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_add_node_next_reply_t *rmp;
+ vlib_node_t *n, *next;
+ int rv = 0;
+ u32 next_index = ~0;
+
+ n = vlib_get_node_by_name (vm, mp->node_name);
+
+ if (n == 0)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_NODE;
+ goto out;
+ }
+
+ next = vlib_get_node_by_name (vm, mp->next_name);
+
+ if (next == 0)
+ rv = VNET_API_ERROR_NO_SUCH_NODE2;
+ else
+ next_index = vlib_node_add_next (vm, n->index, next->index);
+
+out:
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_GET_NODE_INDEX_REPLY,
+ ({
+ rmp->next_index = ntohl(next_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void vl_api_vxlan_add_del_tunnel_t_handler
+ (vl_api_vxlan_add_del_tunnel_t * mp)
+{
+ vl_api_vxlan_add_del_tunnel_reply_t *rmp;
+ int rv = 0;
+ vnet_vxlan_add_del_tunnel_args_t _a, *a = &_a;
+ u32 encap_fib_index;
+ uword *p;
+ ip4_main_t *im = &ip4_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 sw_if_index = ~0;
+
+ p = hash_get (im->fib_index_by_table_id, ntohl (mp->encap_vrf_id));
+ if (!p)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_FIB;
+ goto out;
+ }
+ encap_fib_index = p[0];
+ memset (a, 0, sizeof (*a));
+
+ a->is_add = mp->is_add;
+ a->is_ip6 = mp->is_ipv6;
+
+ /* ip addresses sent in network byte order */
+ ip46_from_addr_buf (mp->is_ipv6, mp->dst_address, &a->dst);
+ ip46_from_addr_buf (mp->is_ipv6, mp->src_address, &a->src);
+
+ /* Check src & dst are different */
+ if (ip46_address_cmp (&a->dst, &a->src) == 0)
+ {
+ rv = VNET_API_ERROR_SAME_SRC_DST;
+ goto out;
+ }
+ a->mcast_sw_if_index = ntohl (mp->mcast_sw_if_index);
+ if (ip46_address_is_multicast (&a->dst) &&
+ pool_is_free_index (vnm->interface_main.sw_interfaces,
+ a->mcast_sw_if_index))
+ {
+ rv = VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ goto out;
+ }
+ a->encap_fib_index = encap_fib_index;
+ a->decap_next_index = ntohl (mp->decap_next_index);
+ a->vni = ntohl (mp->vni);
+ rv = vnet_vxlan_add_del_tunnel (a, &sw_if_index);
+
+out:
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_VXLAN_ADD_DEL_TUNNEL_REPLY,
+ ({
+ rmp->sw_if_index = ntohl (sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void send_vxlan_tunnel_details
+ (vxlan_tunnel_t * t, unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_vxlan_tunnel_details_t *rmp;
+ ip4_main_t *im4 = &ip4_main;
+ ip6_main_t *im6 = &ip6_main;
+ u8 is_ipv6 = !ip46_address_is_ip4 (&t->dst);
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_VXLAN_TUNNEL_DETAILS);
+ if (is_ipv6)
+ {
+ memcpy (rmp->src_address, t->src.ip6.as_u8, 16);
+ memcpy (rmp->dst_address, t->dst.ip6.as_u8, 16);
+ rmp->encap_vrf_id = htonl (im6->fibs[t->encap_fib_index].ft_table_id);
+ }
+ else
+ {
+ memcpy (rmp->src_address, t->src.ip4.as_u8, 4);
+ memcpy (rmp->dst_address, t->dst.ip4.as_u8, 4);
+ rmp->encap_vrf_id = htonl (im4->fibs[t->encap_fib_index].ft_table_id);
+ }
+ rmp->mcast_sw_if_index = htonl (t->mcast_sw_if_index);
+ rmp->vni = htonl (t->vni);
+ rmp->decap_next_index = htonl (t->decap_next_index);
+ rmp->sw_if_index = htonl (t->sw_if_index);
+ rmp->is_ipv6 = is_ipv6;
+ rmp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void vl_api_vxlan_tunnel_dump_t_handler
+ (vl_api_vxlan_tunnel_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+ vxlan_main_t *vxm = &vxlan_main;
+ vxlan_tunnel_t *t;
+ u32 sw_if_index;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ if (~0 == sw_if_index)
+ {
+ /* *INDENT-OFF* */
+ pool_foreach (t, vxm->tunnels,
+ ({
+ send_vxlan_tunnel_details(t, q, mp->context);
+ }));
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ if ((sw_if_index >= vec_len (vxm->tunnel_index_by_sw_if_index)) ||
+ (~0 == vxm->tunnel_index_by_sw_if_index[sw_if_index]))
+ {
+ return;
+ }
+ t = &vxm->tunnels[vxm->tunnel_index_by_sw_if_index[sw_if_index]];
+ send_vxlan_tunnel_details (t, q, mp->context);
+ }
+}
+
+static void
+vl_api_l2_patch_add_del_t_handler (vl_api_l2_patch_add_del_t * mp)
+{
+ extern int vnet_l2_patch_add_del (u32 rx_sw_if_index, u32 tx_sw_if_index,
+ int is_add);
+ vl_api_l2_patch_add_del_reply_t *rmp;
+ int vnet_l2_patch_add_del (u32 rx_sw_if_index, u32 tx_sw_if_index,
+ int is_add);
+ int rv = 0;
+
+ VALIDATE_RX_SW_IF_INDEX (mp);
+ VALIDATE_TX_SW_IF_INDEX (mp);
+
+ rv = vnet_l2_patch_add_del (ntohl (mp->rx_sw_if_index),
+ ntohl (mp->tx_sw_if_index),
+ (int) (mp->is_add != 0));
+
+ BAD_RX_SW_IF_INDEX_LABEL;
+ BAD_TX_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_L2_PATCH_ADD_DEL_REPLY);
+}
+
+static void
+ vl_api_vxlan_gpe_add_del_tunnel_t_handler
+ (vl_api_vxlan_gpe_add_del_tunnel_t * mp)
+{
+ vl_api_vxlan_gpe_add_del_tunnel_reply_t *rmp;
+ int rv = 0;
+ vnet_vxlan_gpe_add_del_tunnel_args_t _a, *a = &_a;
+ u32 encap_fib_index, decap_fib_index;
+ u8 protocol;
+ uword *p;
+ ip4_main_t *im = &ip4_main;
+ u32 sw_if_index = ~0;
+
+
+ p = hash_get (im->fib_index_by_table_id, ntohl (mp->encap_vrf_id));
+ if (!p)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_FIB;
+ goto out;
+ }
+ encap_fib_index = p[0];
+
+ protocol = mp->protocol;
+
+ /* Interpret decap_vrf_id as an opaque if sending to other-than-ip4-input */
+ if (protocol == VXLAN_GPE_INPUT_NEXT_IP4_INPUT)
+ {
+ p = hash_get (im->fib_index_by_table_id, ntohl (mp->decap_vrf_id));
+ if (!p)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_INNER_FIB;
+ goto out;
+ }
+ decap_fib_index = p[0];
+ }
+ else
+ {
+ decap_fib_index = ntohl (mp->decap_vrf_id);
+ }
+
+ /* Check src & dst are different */
+ if ((mp->is_ipv6 && memcmp (mp->local, mp->remote, 16) == 0) ||
+ (!mp->is_ipv6 && memcmp (mp->local, mp->remote, 4) == 0))
+ {
+ rv = VNET_API_ERROR_SAME_SRC_DST;
+ goto out;
+ }
+ memset (a, 0, sizeof (*a));
+
+ a->is_add = mp->is_add;
+ a->is_ip6 = mp->is_ipv6;
+ /* ip addresses sent in network byte order */
+ if (a->is_ip6)
+ {
+ clib_memcpy (&(a->local.ip6), mp->local, 16);
+ clib_memcpy (&(a->remote.ip6), mp->remote, 16);
+ }
+ else
+ {
+ clib_memcpy (&(a->local.ip4), mp->local, 4);
+ clib_memcpy (&(a->remote.ip4), mp->remote, 4);
+ }
+ a->encap_fib_index = encap_fib_index;
+ a->decap_fib_index = decap_fib_index;
+ a->protocol = protocol;
+ a->vni = ntohl (mp->vni);
+ rv = vnet_vxlan_gpe_add_del_tunnel (a, &sw_if_index);
+
+out:
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_VXLAN_GPE_ADD_DEL_TUNNEL_REPLY,
+ ({
+ rmp->sw_if_index = ntohl (sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void send_vxlan_gpe_tunnel_details
+ (vxlan_gpe_tunnel_t * t, unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_vxlan_gpe_tunnel_details_t *rmp;
+ ip4_main_t *im4 = &ip4_main;
+ ip6_main_t *im6 = &ip6_main;
+ u8 is_ipv6 = !(t->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_VXLAN_GPE_TUNNEL_DETAILS);
+ if (is_ipv6)
+ {
+ memcpy (rmp->local, &(t->local.ip6), 16);
+ memcpy (rmp->remote, &(t->remote.ip6), 16);
+ rmp->encap_vrf_id = htonl (im6->fibs[t->encap_fib_index].ft_table_id);
+ rmp->decap_vrf_id = htonl (im6->fibs[t->decap_fib_index].ft_table_id);
+ }
+ else
+ {
+ memcpy (rmp->local, &(t->local.ip4), 4);
+ memcpy (rmp->remote, &(t->remote.ip4), 4);
+ rmp->encap_vrf_id = htonl (im4->fibs[t->encap_fib_index].ft_table_id);
+ rmp->decap_vrf_id = htonl (im4->fibs[t->decap_fib_index].ft_table_id);
+ }
+ rmp->vni = htonl (t->vni);
+ rmp->protocol = t->protocol;
+ rmp->sw_if_index = htonl (t->sw_if_index);
+ rmp->is_ipv6 = is_ipv6;
+ rmp->context = context;
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void vl_api_vxlan_gpe_tunnel_dump_t_handler
+ (vl_api_vxlan_gpe_tunnel_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+ vxlan_gpe_main_t *vgm = &vxlan_gpe_main;
+ vxlan_gpe_tunnel_t *t;
+ u32 sw_if_index;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ if (~0 == sw_if_index)
+ {
+ /* *INDENT-OFF* */
+ pool_foreach (t, vgm->tunnels,
+ ({
+ send_vxlan_gpe_tunnel_details(t, q, mp->context);
+ }));
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ if ((sw_if_index >= vec_len (vgm->tunnel_index_by_sw_if_index)) ||
+ (~0 == vgm->tunnel_index_by_sw_if_index[sw_if_index]))
+ {
+ return;
+ }
+ t = &vgm->tunnels[vgm->tunnel_index_by_sw_if_index[sw_if_index]];
+ send_vxlan_gpe_tunnel_details (t, q, mp->context);
+ }
+}
+
+static void
+vl_api_interface_name_renumber_t_handler (vl_api_interface_name_renumber_t *
+ mp)
+{
+ vl_api_interface_name_renumber_reply_t *rmp;
+ int rv = 0;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ rv = vnet_interface_name_renumber
+ (ntohl (mp->sw_if_index), ntohl (mp->new_show_dev_instance));
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_INTERFACE_NAME_RENUMBER_REPLY);
+}
+
+static int
+arp_change_data_callback (u32 pool_index, u8 * new_mac,
+ u32 sw_if_index, u32 address)
+{
+ vpe_api_main_t *am = &vpe_api_main;
+ vlib_main_t *vm = am->vlib_main;
+ vl_api_ip4_arp_event_t *event;
+ static f64 arp_event_last_time;
+ f64 now = vlib_time_now (vm);
+
+ if (pool_is_free_index (am->arp_events, pool_index))
+ return 1;
+
+ event = pool_elt_at_index (am->arp_events, pool_index);
+ /* *INDENT-OFF* */
+ if (memcmp (&event->new_mac, new_mac, sizeof (event->new_mac)))
+ {
+ clib_memcpy (event->new_mac, new_mac, sizeof (event->new_mac));
+ }
+ else
+ { /* same mac */
+ if (sw_if_index == event->sw_if_index &&
+ (!event->mac_ip ||
+ /* for BD case, also check IP address with 10 sec timeout */
+ (address == event->address &&
+ (now - arp_event_last_time) < 10.0)))
+ return 1;
+ }
+ /* *INDENT-ON* */
+
+ arp_event_last_time = now;
+ event->sw_if_index = sw_if_index;
+ if (event->mac_ip)
+ event->address = address;
+ return 0;
+}
+
+static int
+nd_change_data_callback (u32 pool_index, u8 * new_mac,
+ u32 sw_if_index, ip6_address_t * address)
+{
+ vpe_api_main_t *am = &vpe_api_main;
+ vlib_main_t *vm = am->vlib_main;
+ vl_api_ip6_nd_event_t *event;
+ static f64 nd_event_last_time;
+ f64 now = vlib_time_now (vm);
+
+ if (pool_is_free_index (am->nd_events, pool_index))
+ return 1;
+
+ event = pool_elt_at_index (am->nd_events, pool_index);
+
+ /* *INDENT-OFF* */
+ if (memcmp (&event->new_mac, new_mac, sizeof (event->new_mac)))
+ {
+ clib_memcpy (event->new_mac, new_mac, sizeof (event->new_mac));
+ }
+ else
+ { /* same mac */
+ if (sw_if_index == event->sw_if_index &&
+ (!event->mac_ip ||
+ /* for BD case, also check IP address with 10 sec timeout */
+ (ip6_address_is_equal (address,
+ (ip6_address_t *) event->address) &&
+ (now - nd_event_last_time) < 10.0)))
+ return 1;
+ }
+ /* *INDENT-ON* */
+
+ nd_event_last_time = now;
+ event->sw_if_index = sw_if_index;
+ if (event->mac_ip)
+ clib_memcpy (event->address, address, sizeof (event->address));
+ return 0;
+}
+
+static int
+arp_change_delete_callback (u32 pool_index, u8 * notused)
+{
+ vpe_api_main_t *am = &vpe_api_main;
+
+ if (pool_is_free_index (am->arp_events, pool_index))
+ return 1;
+
+ pool_put_index (am->arp_events, pool_index);
+ return 0;
+}
+
+static int
+nd_change_delete_callback (u32 pool_index, u8 * notused)
+{
+ vpe_api_main_t *am = &vpe_api_main;
+
+ if (pool_is_free_index (am->nd_events, pool_index))
+ return 1;
+
+ pool_put_index (am->nd_events, pool_index);
+ return 0;
+}
+
+static void
+vl_api_want_ip4_arp_events_t_handler (vl_api_want_ip4_arp_events_t * mp)
+{
+ vpe_api_main_t *am = &vpe_api_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ vl_api_want_ip4_arp_events_reply_t *rmp;
+ vl_api_ip4_arp_event_t *event;
+ int rv;
+
+ if (mp->enable_disable)
+ {
+ pool_get (am->arp_events, event);
+ memset (event, 0, sizeof (*event));
+
+ event->_vl_msg_id = ntohs (VL_API_IP4_ARP_EVENT);
+ event->client_index = mp->client_index;
+ event->context = mp->context;
+ event->address = mp->address;
+ event->pid = mp->pid;
+ if (mp->address == 0)
+ event->mac_ip = 1;
+
+ rv = vnet_add_del_ip4_arp_change_event
+ (vnm, arp_change_data_callback,
+ mp->pid, &mp->address /* addr, in net byte order */ ,
+ vpe_resolver_process_node.index,
+ IP4_ARP_EVENT, event - am->arp_events, 1 /* is_add */ );
+ }
+ else
+ {
+ rv = vnet_add_del_ip4_arp_change_event
+ (vnm, arp_change_delete_callback,
+ mp->pid, &mp->address /* addr, in net byte order */ ,
+ vpe_resolver_process_node.index,
+ IP4_ARP_EVENT, ~0 /* pool index */ , 0 /* is_add */ );
+ }
+ REPLY_MACRO (VL_API_WANT_IP4_ARP_EVENTS_REPLY);
+}
+
+static void
+vl_api_want_ip6_nd_events_t_handler (vl_api_want_ip6_nd_events_t * mp)
+{
+ vpe_api_main_t *am = &vpe_api_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ vl_api_want_ip6_nd_events_reply_t *rmp;
+ vl_api_ip6_nd_event_t *event;
+ int rv;
+
+ if (mp->enable_disable)
+ {
+ pool_get (am->nd_events, event);
+ memset (event, 0, sizeof (*event));
+
+ event->_vl_msg_id = ntohs (VL_API_IP6_ND_EVENT);
+ event->client_index = mp->client_index;
+ event->context = mp->context;
+ clib_memcpy (event->address, mp->address, 16);
+ event->pid = mp->pid;
+ if (ip6_address_is_zero ((ip6_address_t *) mp->address))
+ event->mac_ip = 1;
+
+ rv = vnet_add_del_ip6_nd_change_event
+ (vnm, nd_change_data_callback,
+ mp->pid, mp->address /* addr, in net byte order */ ,
+ vpe_resolver_process_node.index,
+ IP6_ND_EVENT, event - am->nd_events, 1 /* is_add */ );
+ }
+ else
+ {
+ rv = vnet_add_del_ip6_nd_change_event
+ (vnm, nd_change_delete_callback,
+ mp->pid, mp->address /* addr, in net byte order */ ,
+ vpe_resolver_process_node.index,
+ IP6_ND_EVENT, ~0 /* pool index */ , 0 /* is_add */ );
+ }
+ REPLY_MACRO (VL_API_WANT_IP6_ND_EVENTS_REPLY);
+}
+
+static void vl_api_input_acl_set_interface_t_handler
+ (vl_api_input_acl_set_interface_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_input_acl_set_interface_reply_t *rmp;
+ int rv;
+ u32 sw_if_index, ip4_table_index, ip6_table_index, l2_table_index;
+
+ ip4_table_index = ntohl (mp->ip4_table_index);
+ ip6_table_index = ntohl (mp->ip6_table_index);
+ l2_table_index = ntohl (mp->l2_table_index);
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ rv = vnet_set_input_acl_intfc (vm, sw_if_index, ip4_table_index,
+ ip6_table_index, l2_table_index, mp->is_add);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_INPUT_ACL_SET_INTERFACE_REPLY);
+}
+
+static void vl_api_cop_interface_enable_disable_t_handler
+ (vl_api_cop_interface_enable_disable_t * mp)
+{
+ vl_api_cop_interface_enable_disable_reply_t *rmp;
+ int rv;
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+ int enable_disable;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ enable_disable = (int) mp->enable_disable;
+
+ rv = cop_interface_enable_disable (sw_if_index, enable_disable);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_COP_INTERFACE_ENABLE_DISABLE_REPLY);
+}
+
+static void vl_api_cop_whitelist_enable_disable_t_handler
+ (vl_api_cop_whitelist_enable_disable_t * mp)
+{
+ vl_api_cop_whitelist_enable_disable_reply_t *rmp;
+ cop_whitelist_enable_disable_args_t _a, *a = &_a;
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+ int rv;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ a->sw_if_index = sw_if_index;
+ a->ip4 = mp->ip4;
+ a->ip6 = mp->ip6;
+ a->default_cop = mp->default_cop;
+ a->fib_id = ntohl (mp->fib_id);
+
+ rv = cop_whitelist_enable_disable (a);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_COP_WHITELIST_ENABLE_DISABLE_REPLY);
+}
+
+static void
+vl_api_get_node_graph_t_handler (vl_api_get_node_graph_t * mp)
+{
+ int rv = 0;
+ u8 *vector = 0;
+ api_main_t *am = &api_main;
+ vlib_main_t *vm = vlib_get_main ();
+ void *oldheap;
+ vl_api_get_node_graph_reply_t *rmp;
+
+ pthread_mutex_lock (&am->vlib_rp->mutex);
+ oldheap = svm_push_data_heap (am->vlib_rp);
+
+ /*
+ * Keep the number of memcpy ops to a minimum (e.g. 1).
+ */
+ vec_validate (vector, 16384);
+ vec_reset_length (vector);
+
+ /* $$$$ FIXME */
+ vector = vlib_node_serialize (&vm->node_main, vector,
+ (u32) ~ 0 /* all threads */ ,
+ 1 /* include nexts */ ,
+ 1 /* include stats */ );
+
+ svm_pop_heap (oldheap);
+ pthread_mutex_unlock (&am->vlib_rp->mutex);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_GET_NODE_GRAPH_REPLY,
+ ({
+ rmp->reply_in_shmem = (uword) vector;
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_ioam_enable_t_handler (vl_api_ioam_enable_t * mp)
+{
+ int rv = 0;
+ vl_api_ioam_enable_reply_t *rmp;
+ clib_error_t *error;
+
+ /* Ignoring the profile id as currently a single profile
+ * is supported */
+ error = ip6_ioam_enable (mp->trace_enable, mp->pot_enable,
+ mp->seqno, mp->analyse);
+ if (error)
+ {
+ clib_error_report (error);
+ rv = clib_error_get_code (error);
+ }
+
+ REPLY_MACRO (VL_API_IOAM_ENABLE_REPLY);
+}
+
+static void
+vl_api_ioam_disable_t_handler (vl_api_ioam_disable_t * mp)
+{
+ int rv = 0;
+ vl_api_ioam_disable_reply_t *rmp;
+ clib_error_t *error;
+
+ error = clear_ioam_rewrite_fn ();
+ if (error)
+ {
+ clib_error_report (error);
+ rv = clib_error_get_code (error);
+ }
+
+ REPLY_MACRO (VL_API_IOAM_DISABLE_REPLY);
+}
+
+static void
+vl_api_policer_add_del_t_handler (vl_api_policer_add_del_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_policer_add_del_reply_t *rmp;
+ int rv = 0;
+ u8 *name = NULL;
+ sse2_qos_pol_cfg_params_st cfg;
+ clib_error_t *error;
+ u32 policer_index;
+
+ name = format (0, "%s", mp->name);
+
+ memset (&cfg, 0, sizeof (cfg));
+ cfg.rfc = mp->type;
+ cfg.rnd_type = mp->round_type;
+ cfg.rate_type = mp->rate_type;
+ cfg.rb.kbps.cir_kbps = mp->cir;
+ cfg.rb.kbps.eir_kbps = mp->eir;
+ cfg.rb.kbps.cb_bytes = mp->cb;
+ cfg.rb.kbps.eb_bytes = mp->eb;
+ cfg.conform_action.action_type = mp->conform_action_type;
+ cfg.conform_action.dscp = mp->conform_dscp;
+ cfg.exceed_action.action_type = mp->exceed_action_type;
+ cfg.exceed_action.dscp = mp->exceed_dscp;
+ cfg.violate_action.action_type = mp->violate_action_type;
+ cfg.violate_action.dscp = mp->violate_dscp;
+ cfg.color_aware = mp->color_aware;
+
+ error = policer_add_del (vm, name, &cfg, &policer_index, mp->is_add);
+
+ if (error)
+ rv = VNET_API_ERROR_UNSPECIFIED;
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_POLICER_ADD_DEL_REPLY,
+ ({
+ if (rv == 0 && mp->is_add)
+ rmp->policer_index = ntohl(policer_index);
+ else
+ rmp->policer_index = ~0;
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+send_policer_details (u8 * name,
+ sse2_qos_pol_cfg_params_st * config,
+ policer_read_response_type_st * templ,
+ unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_policer_details_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_POLICER_DETAILS);
+ mp->context = context;
+ mp->cir = htonl (config->rb.kbps.cir_kbps);
+ mp->eir = htonl (config->rb.kbps.eir_kbps);
+ mp->cb = htonl (config->rb.kbps.cb_bytes);
+ mp->eb = htonl (config->rb.kbps.eb_bytes);
+ mp->rate_type = config->rate_type;
+ mp->round_type = config->rnd_type;
+ mp->type = config->rfc;
+ mp->conform_action_type = config->conform_action.action_type;
+ mp->conform_dscp = config->conform_action.dscp;
+ mp->exceed_action_type = config->exceed_action.action_type;
+ mp->exceed_dscp = config->exceed_action.dscp;
+ mp->violate_action_type = config->violate_action.action_type;
+ mp->violate_dscp = config->violate_action.dscp;
+ mp->single_rate = templ->single_rate ? 1 : 0;
+ mp->color_aware = templ->color_aware ? 1 : 0;
+ mp->scale = htonl (templ->scale);
+ mp->cir_tokens_per_period = htonl (templ->cir_tokens_per_period);
+ mp->pir_tokens_per_period = htonl (templ->pir_tokens_per_period);
+ mp->current_limit = htonl (templ->current_limit);
+ mp->current_bucket = htonl (templ->current_bucket);
+ mp->extended_limit = htonl (templ->extended_limit);
+ mp->extended_bucket = htonl (templ->extended_bucket);
+ mp->last_update_time = clib_host_to_net_u64 (templ->last_update_time);
+
+ strncpy ((char *) mp->name, (char *) name, ARRAY_LEN (mp->name) - 1);
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+vl_api_policer_dump_t_handler (vl_api_policer_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+ vnet_policer_main_t *pm = &vnet_policer_main;
+ hash_pair_t *hp;
+ uword *p;
+ u32 pool_index;
+ u8 *match_name = 0;
+ u8 *name;
+ sse2_qos_pol_cfg_params_st *config;
+ policer_read_response_type_st *templ;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ if (mp->match_name_valid)
+ {
+ match_name = format (0, "%s%c", mp->match_name, 0);
+ }
+
+ if (mp->match_name_valid)
+ {
+ p = hash_get_mem (pm->policer_config_by_name, match_name);
+ if (p)
+ {
+ pool_index = p[0];
+ config = pool_elt_at_index (pm->configs, pool_index);
+ templ = pool_elt_at_index (pm->policer_templates, pool_index);
+ send_policer_details (match_name, config, templ, q, mp->context);
+ }
+ }
+ else
+ {
+ /* *INDENT-OFF* */
+ hash_foreach_pair (hp, pm->policer_config_by_name,
+ ({
+ name = (u8 *) hp->key;
+ pool_index = hp->value[0];
+ config = pool_elt_at_index (pm->configs, pool_index);
+ templ = pool_elt_at_index (pm->policer_templates, pool_index);
+ send_policer_details(name, config, templ, q, mp->context);
+ }));
+ /* *INDENT-ON* */
+ }
+}
+
+static void
+ vl_api_policer_classify_set_interface_t_handler
+ (vl_api_policer_classify_set_interface_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_policer_classify_set_interface_reply_t *rmp;
+ int rv;
+ u32 sw_if_index, ip4_table_index, ip6_table_index, l2_table_index;
+
+ ip4_table_index = ntohl (mp->ip4_table_index);
+ ip6_table_index = ntohl (mp->ip6_table_index);
+ l2_table_index = ntohl (mp->l2_table_index);
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ rv = vnet_set_policer_classify_intfc (vm, sw_if_index, ip4_table_index,
+ ip6_table_index, l2_table_index,
+ mp->is_add);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_POLICER_CLASSIFY_SET_INTERFACE_REPLY);
+}
+
+static void
+send_policer_classify_details (u32 sw_if_index,
+ u32 table_index,
+ unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_policer_classify_details_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_POLICER_CLASSIFY_DETAILS);
+ mp->context = context;
+ mp->sw_if_index = htonl (sw_if_index);
+ mp->table_index = htonl (table_index);
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+vl_api_policer_classify_dump_t_handler (vl_api_policer_classify_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+ policer_classify_main_t *pcm = &policer_classify_main;
+ u32 *vec_tbl;
+ int i;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ vec_tbl = pcm->classify_table_index_by_sw_if_index[mp->type];
+
+ if (vec_len (vec_tbl))
+ {
+ for (i = 0; i < vec_len (vec_tbl); i++)
+ {
+ if (vec_elt (vec_tbl, i) == ~0)
+ continue;
+
+ send_policer_classify_details (i, vec_elt (vec_tbl, i), q,
+ mp->context);
+ }
+ }
+}
+
+static void
+vl_api_mpls_tunnel_details_t_handler (vl_api_mpls_fib_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+typedef struct mpls_tunnel_send_walk_ctx_t_
+{
+ unix_shared_memory_queue_t *q;
+ u32 index;
+ u32 context;
+} mpls_tunnel_send_walk_ctx_t;
+
+static void
+send_mpls_tunnel_entry (u32 mti, void *arg)
+{
+ mpls_tunnel_send_walk_ctx_t *ctx;
+ vl_api_mpls_tunnel_details_t *mp;
+ const mpls_tunnel_t *mt;
+ u32 nlabels;
+
+ ctx = arg;
+
+ if (~0 != ctx->index && mti != ctx->index)
+ return;
+
+ mt = mpls_tunnel_get (mti);
+ nlabels = vec_len (mt->mt_label_stack);
+
+ mp = vl_msg_api_alloc (sizeof (*mp) + nlabels * sizeof (u32));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_MPLS_TUNNEL_DETAILS);
+ mp->context = ctx->context;
+
+ mp->tunnel_index = ntohl (mti);
+ memcpy (mp->mt_next_hop_out_labels,
+ mt->mt_label_stack, nlabels * sizeof (u32));
+
+ // FIXME
+
+ vl_msg_api_send_shmem (ctx->q, (u8 *) & mp);
+}
+
+static void
+vl_api_mpls_tunnel_dump_t_handler (vl_api_mpls_tunnel_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ mpls_tunnel_send_walk_ctx_t ctx = {
+ .q = q,
+ .index = ntohl (mp->tunnel_index),
+ .context = mp->context,
+ };
+ mpls_tunnel_walk (send_mpls_tunnel_entry, &ctx);
+}
+
+static void
+vl_api_mpls_fib_details_t_handler (vl_api_mpls_fib_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void
+vl_api_mpls_fib_details_t_endian (vl_api_mpls_fib_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void
+vl_api_mpls_fib_details_t_print (vl_api_mpls_fib_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void
+send_mpls_fib_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t * q,
+ u32 table_id, u32 label, u32 eos,
+ fib_route_path_encode_t * api_rpaths, u32 context)
+{
+ vl_api_mpls_fib_details_t *mp;
+ fib_route_path_encode_t *api_rpath;
+ vl_api_fib_path2_t *fp;
+ int path_count;
+
+ path_count = vec_len (api_rpaths);
+ mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp));
+ if (!mp)
+ return;
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_MPLS_FIB_DETAILS);
+ mp->context = context;
+
+ mp->table_id = htonl (table_id);
+ mp->eos_bit = eos;
+ mp->label = htonl (label);
+
+ mp->count = htonl (path_count);
+ fp = mp->path;
+ vec_foreach (api_rpath, api_rpaths)
+ {
+ memset (fp, 0, sizeof (*fp));
+ fp->weight = htonl (api_rpath->rpath.frp_weight);
+ fp->sw_if_index = htonl (api_rpath->rpath.frp_sw_if_index);
+ copy_fib_next_hop (api_rpath, fp);
+ fp++;
+ }
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+vl_api_mpls_fib_dump_t_handler (vl_api_mpls_fib_dump_t * mp)
+{
+ vpe_api_main_t *am = &vpe_api_main;
+ unix_shared_memory_queue_t *q;
+ mpls_main_t *mm = &mpls_main;
+ fib_table_t *fib_table;
+ fib_node_index_t lfei, *lfeip, *lfeis = NULL;
+ mpls_label_t key;
+ fib_prefix_t pfx;
+ u32 fib_index;
+ fib_route_path_encode_t *api_rpaths;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ /* *INDENT-OFF* */
+ pool_foreach (fib_table, mm->fibs,
+ ({
+ hash_foreach(key, lfei, fib_table->mpls.mf_entries,
+ ({
+ vec_add1(lfeis, lfei);
+ }));
+ }));
+ vec_sort_with_function(lfeis, fib_entry_cmp_for_sort);
+
+ vec_foreach(lfeip, lfeis)
+ {
+ fib_entry_get_prefix(*lfeip, &pfx);
+ fib_index = fib_entry_get_fib_index(*lfeip);
+ fib_table = fib_table_get(fib_index, pfx.fp_proto);
+ api_rpaths = NULL;
+ fib_entry_encode(*lfeip, &api_rpaths);
+ send_mpls_fib_details (am, q,
+ fib_table->ft_table_id,
+ pfx.fp_label,
+ pfx.fp_eos,
+ api_rpaths,
+ mp->context);
+ vec_free(api_rpaths);
+ }
+
+ vec_free (lfeis);
+}
+
+static void
+vl_api_classify_table_ids_t_handler (vl_api_classify_table_ids_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ vnet_classify_main_t *cm = &vnet_classify_main;
+ vnet_classify_table_t *t;
+ u32 *table_ids = 0;
+ u32 count;
+
+ /* *INDENT-OFF* */
+ pool_foreach (t, cm->tables,
+ ({
+ vec_add1 (table_ids, ntohl(t - cm->tables));
+ }));
+ /* *INDENT-ON* */
+ count = vec_len (table_ids);
+
+ vl_api_classify_table_ids_reply_t *rmp;
+ rmp = vl_msg_api_alloc_as_if_client (sizeof (*rmp) + count * sizeof (u32));
+ rmp->_vl_msg_id = ntohs (VL_API_CLASSIFY_TABLE_IDS_REPLY);
+ rmp->context = mp->context;
+ rmp->count = ntohl (count);
+ clib_memcpy (rmp->ids, table_ids, count * sizeof (u32));
+ rmp->retval = 0;
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+
+ vec_free (table_ids);
+}
+
+static void
+ vl_api_classify_table_by_interface_t_handler
+ (vl_api_classify_table_by_interface_t * mp)
+{
+ vl_api_classify_table_by_interface_reply_t *rmp;
+ int rv = 0;
+
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+ u32 *acl = 0;
+
+ vec_validate (acl, INPUT_ACL_N_TABLES - 1);
+ vec_set (acl, ~0);
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ input_acl_main_t *am = &input_acl_main;
+
+ int if_idx;
+ u32 type;
+
+ for (type = 0; type < INPUT_ACL_N_TABLES; type++)
+ {
+ u32 *vec_tbl = am->classify_table_index_by_sw_if_index[type];
+ if (vec_len (vec_tbl))
+ {
+ for (if_idx = 0; if_idx < vec_len (vec_tbl); if_idx++)
+ {
+ if (vec_elt (vec_tbl, if_idx) == ~0 || sw_if_index != if_idx)
+ {
+ continue;
+ }
+ acl[type] = vec_elt (vec_tbl, if_idx);
+ }
+ }
+ }
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_CLASSIFY_TABLE_BY_INTERFACE_REPLY,
+ ({
+ rmp->sw_if_index = ntohl(sw_if_index);
+ rmp->l2_table_id = ntohl(acl[INPUT_ACL_TABLE_L2]);
+ rmp->ip4_table_id = ntohl(acl[INPUT_ACL_TABLE_IP4]);
+ rmp->ip6_table_id = ntohl(acl[INPUT_ACL_TABLE_IP6]);
+ }));
+ /* *INDENT-ON* */
+ vec_free (acl);
+}
+
+static void
+vl_api_classify_table_info_t_handler (vl_api_classify_table_info_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ vl_api_classify_table_info_reply_t *rmp = 0;
+
+ vnet_classify_main_t *cm = &vnet_classify_main;
+ u32 table_id = ntohl (mp->table_id);
+ vnet_classify_table_t *t;
+
+ /* *INDENT-OFF* */
+ pool_foreach (t, cm->tables,
+ ({
+ if (table_id == t - cm->tables)
+ {
+ rmp = vl_msg_api_alloc_as_if_client
+ (sizeof (*rmp) + t->match_n_vectors * sizeof (u32x4));
+ rmp->_vl_msg_id = ntohs (VL_API_CLASSIFY_TABLE_INFO_REPLY);
+ rmp->context = mp->context;
+ rmp->table_id = ntohl(table_id);
+ rmp->nbuckets = ntohl(t->nbuckets);
+ rmp->match_n_vectors = ntohl(t->match_n_vectors);
+ rmp->skip_n_vectors = ntohl(t->skip_n_vectors);
+ rmp->active_sessions = ntohl(t->active_elements);
+ rmp->next_table_index = ntohl(t->next_table_index);
+ rmp->miss_next_index = ntohl(t->miss_next_index);
+ rmp->mask_length = ntohl(t->match_n_vectors * sizeof (u32x4));
+ clib_memcpy(rmp->mask, t->mask, t->match_n_vectors * sizeof(u32x4));
+ rmp->retval = 0;
+ break;
+ }
+ }));
+ /* *INDENT-ON* */
+
+ if (rmp == 0)
+ {
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs ((VL_API_CLASSIFY_TABLE_INFO_REPLY));
+ rmp->context = mp->context;
+ rmp->retval = ntohl (VNET_API_ERROR_CLASSIFY_TABLE_NOT_FOUND);
+ }
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+vl_api_classify_session_details_t_handler (vl_api_classify_session_details_t *
+ mp)
+{
+ clib_warning ("BUG");
+}
+
+static void
+send_classify_session_details (unix_shared_memory_queue_t * q,
+ u32 table_id,
+ u32 match_length,
+ vnet_classify_entry_t * e, u32 context)
+{
+ vl_api_classify_session_details_t *rmp;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_CLASSIFY_SESSION_DETAILS);
+ rmp->context = context;
+ rmp->table_id = ntohl (table_id);
+ rmp->hit_next_index = ntohl (e->next_index);
+ rmp->advance = ntohl (e->advance);
+ rmp->opaque_index = ntohl (e->opaque_index);
+ rmp->match_length = ntohl (match_length);
+ clib_memcpy (rmp->match, e->key, match_length);
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+vl_api_classify_session_dump_t_handler (vl_api_classify_session_dump_t * mp)
+{
+ vnet_classify_main_t *cm = &vnet_classify_main;
+ unix_shared_memory_queue_t *q;
+
+ u32 table_id = ntohl (mp->table_id);
+ vnet_classify_table_t *t;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ /* *INDENT-OFF* */
+ pool_foreach (t, cm->tables,
+ ({
+ if (table_id == t - cm->tables)
+ {
+ vnet_classify_bucket_t * b;
+ vnet_classify_entry_t * v, * save_v;
+ int i, j, k;
+
+ for (i = 0; i < t->nbuckets; i++)
+ {
+ b = &t->buckets [i];
+ if (b->offset == 0)
+ continue;
+
+ save_v = vnet_classify_get_entry (t, b->offset);
+ for (j = 0; j < (1<<b->log2_pages); j++)
+ {
+ for (k = 0; k < t->entries_per_page; k++)
+ {
+ v = vnet_classify_entry_at_index
+ (t, save_v, j*t->entries_per_page + k);
+ if (vnet_classify_entry_is_free (v))
+ continue;
+
+ send_classify_session_details
+ (q, table_id, t->match_n_vectors * sizeof (u32x4),
+ v, mp->context);
+ }
+ }
+ }
+ break;
+ }
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_set_ipfix_exporter_t_handler (vl_api_set_ipfix_exporter_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ flow_report_main_t *frm = &flow_report_main;
+ vl_api_set_ipfix_exporter_reply_t *rmp;
+ ip4_address_t collector, src;
+ u16 collector_port = UDP_DST_PORT_ipfix;
+ u32 path_mtu;
+ u32 template_interval;
+ u8 udp_checksum;
+ u32 fib_id;
+ u32 fib_index = ~0;
+ int rv = 0;
+
+ memcpy (collector.data, mp->collector_address, sizeof (collector.data));
+ collector_port = ntohs (mp->collector_port);
+ if (collector_port == (u16) ~ 0)
+ collector_port = UDP_DST_PORT_ipfix;
+ memcpy (src.data, mp->src_address, sizeof (src.data));
+ fib_id = ntohl (mp->vrf_id);
+
+ ip4_main_t *im = &ip4_main;
+ if (fib_id == ~0)
+ {
+ fib_index = ~0;
+ }
+ else
+ {
+ uword *p = hash_get (im->fib_index_by_table_id, fib_id);
+ if (!p)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_FIB;
+ goto out;
+ }
+ fib_index = p[0];
+ }
+
+ path_mtu = ntohl (mp->path_mtu);
+ if (path_mtu == ~0)
+ path_mtu = 512; // RFC 7011 section 10.3.3.
+ template_interval = ntohl (mp->template_interval);
+ if (template_interval == ~0)
+ template_interval = 20;
+ udp_checksum = mp->udp_checksum;
+
+ if (collector.as_u32 == 0)
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto out;
+ }
+
+ if (src.as_u32 == 0)
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto out;
+ }
+
+ if (path_mtu > 1450 /* vpp does not support fragmentation */ )
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto out;
+ }
+
+ if (path_mtu < 68)
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto out;
+ }
+
+ /* Reset report streams if we are reconfiguring IP addresses */
+ if (frm->ipfix_collector.as_u32 != collector.as_u32 ||
+ frm->src_address.as_u32 != src.as_u32 ||
+ frm->collector_port != collector_port)
+ vnet_flow_reports_reset (frm);
+
+ frm->ipfix_collector.as_u32 = collector.as_u32;
+ frm->collector_port = collector_port;
+ frm->src_address.as_u32 = src.as_u32;
+ frm->fib_index = fib_index;
+ frm->path_mtu = path_mtu;
+ frm->template_interval = template_interval;
+ frm->udp_checksum = udp_checksum;
+
+ /* Turn on the flow reporting process */
+ vlib_process_signal_event (vm, flow_report_process_node.index, 1, 0);
+
+out:
+ REPLY_MACRO (VL_API_SET_IPFIX_EXPORTER_REPLY);
+}
+
+static void
+vl_api_ipfix_exporter_dump_t_handler (vl_api_ipfix_exporter_dump_t * mp)
+{
+ flow_report_main_t *frm = &flow_report_main;
+ unix_shared_memory_queue_t *q;
+ vl_api_ipfix_exporter_details_t *rmp;
+ ip4_main_t *im = &ip4_main;
+ u32 vrf_id;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_IPFIX_EXPORTER_DETAILS);
+ rmp->context = mp->context;
+ memcpy (rmp->collector_address, frm->ipfix_collector.data,
+ sizeof (frm->ipfix_collector.data));
+ rmp->collector_port = htons (frm->collector_port);
+ memcpy (rmp->src_address, frm->src_address.data,
+ sizeof (frm->src_address.data));
+ if (frm->fib_index == ~0)
+ vrf_id = ~0;
+ else
+ vrf_id = im->fibs[frm->fib_index].ft_table_id;
+ rmp->vrf_id = htonl (vrf_id);
+ rmp->path_mtu = htonl (frm->path_mtu);
+ rmp->template_interval = htonl (frm->template_interval);
+ rmp->udp_checksum = (frm->udp_checksum != 0);
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+ vl_api_set_ipfix_classify_stream_t_handler
+ (vl_api_set_ipfix_classify_stream_t * mp)
+{
+ vl_api_set_ipfix_classify_stream_reply_t *rmp;
+ flow_report_classify_main_t *fcm = &flow_report_classify_main;
+ flow_report_main_t *frm = &flow_report_main;
+ u32 domain_id = 0;
+ u32 src_port = UDP_DST_PORT_ipfix;
+ int rv = 0;
+
+ domain_id = ntohl (mp->domain_id);
+ src_port = ntohs (mp->src_port);
+
+ if (fcm->src_port != 0 &&
+ (fcm->domain_id != domain_id || fcm->src_port != (u16) src_port))
+ {
+ int rv = vnet_stream_change (frm, fcm->domain_id, fcm->src_port,
+ domain_id, (u16) src_port);
+ ASSERT (rv == 0);
+ }
+
+ fcm->domain_id = domain_id;
+ fcm->src_port = (u16) src_port;
+
+ REPLY_MACRO (VL_API_SET_IPFIX_CLASSIFY_STREAM_REPLY);
+}
+
+static void
+ vl_api_ipfix_classify_stream_dump_t_handler
+ (vl_api_ipfix_classify_stream_dump_t * mp)
+{
+ flow_report_classify_main_t *fcm = &flow_report_classify_main;
+ unix_shared_memory_queue_t *q;
+ vl_api_ipfix_classify_stream_details_t *rmp;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_IPFIX_CLASSIFY_STREAM_DETAILS);
+ rmp->context = mp->context;
+ rmp->domain_id = htonl (fcm->domain_id);
+ rmp->src_port = htons (fcm->src_port);
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+ vl_api_ipfix_classify_table_add_del_t_handler
+ (vl_api_ipfix_classify_table_add_del_t * mp)
+{
+ vl_api_ipfix_classify_table_add_del_reply_t *rmp;
+ flow_report_classify_main_t *fcm = &flow_report_classify_main;
+ flow_report_main_t *frm = &flow_report_main;
+ vnet_flow_report_add_del_args_t args;
+ ipfix_classify_table_t *table;
+ int is_add;
+ u32 classify_table_index;
+ u8 ip_version;
+ u8 transport_protocol;
+ int rv = 0;
+
+ classify_table_index = ntohl (mp->table_id);
+ ip_version = mp->ip_version;
+ transport_protocol = mp->transport_protocol;
+ is_add = mp->is_add;
+
+ if (fcm->src_port == 0)
+ {
+ /* call set_ipfix_classify_stream first */
+ rv = VNET_API_ERROR_UNSPECIFIED;
+ goto out;
+ }
+
+ memset (&args, 0, sizeof (args));
+
+ table = 0;
+ int i;
+ for (i = 0; i < vec_len (fcm->tables); i++)
+ if (ipfix_classify_table_index_valid (i))
+ if (fcm->tables[i].classify_table_index == classify_table_index)
+ {
+ table = &fcm->tables[i];
+ break;
+ }
+
+ if (is_add)
+ {
+ if (table)
+ {
+ rv = VNET_API_ERROR_VALUE_EXIST;
+ goto out;
+ }
+ table = ipfix_classify_add_table ();
+ table->classify_table_index = classify_table_index;
+ }
+ else
+ {
+ if (!table)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_ENTRY;
+ goto out;
+ }
+ }
+
+ table->ip_version = ip_version;
+ table->transport_protocol = transport_protocol;
+
+ args.opaque.as_uword = table - fcm->tables;
+ args.rewrite_callback = ipfix_classify_template_rewrite;
+ args.flow_data_callback = ipfix_classify_send_flows;
+ args.is_add = is_add;
+ args.domain_id = fcm->domain_id;
+ args.src_port = fcm->src_port;
+
+ rv = vnet_flow_report_add_del (frm, &args);
+
+ /* If deleting, or add failed */
+ if (is_add == 0 || (rv && is_add))
+ ipfix_classify_delete_table (table - fcm->tables);
+
+out:
+ REPLY_MACRO (VL_API_SET_IPFIX_CLASSIFY_STREAM_REPLY);
+}
+
+static void
+send_ipfix_classify_table_details (u32 table_index,
+ unix_shared_memory_queue_t * q,
+ u32 context)
+{
+ flow_report_classify_main_t *fcm = &flow_report_classify_main;
+ vl_api_ipfix_classify_table_details_t *mp;
+
+ ipfix_classify_table_t *table = &fcm->tables[table_index];
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_IPFIX_CLASSIFY_TABLE_DETAILS);
+ mp->context = context;
+ mp->table_id = htonl (table->classify_table_index);
+ mp->ip_version = table->ip_version;
+ mp->transport_protocol = table->transport_protocol;
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+ vl_api_ipfix_classify_table_dump_t_handler
+ (vl_api_ipfix_classify_table_dump_t * mp)
+{
+ flow_report_classify_main_t *fcm = &flow_report_classify_main;
+ unix_shared_memory_queue_t *q;
+ u32 i;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ for (i = 0; i < vec_len (fcm->tables); i++)
+ if (ipfix_classify_table_index_valid (i))
+ send_ipfix_classify_table_details (i, q, mp->context);
+}
+
+static void
+vl_api_pg_create_interface_t_handler (vl_api_pg_create_interface_t * mp)
+{
+ vl_api_pg_create_interface_reply_t *rmp;
+ int rv = 0;
+
+ pg_main_t *pg = &pg_main;
+ u32 pg_if_id = pg_interface_add_or_get (pg, ntohl (mp->interface_id));
+ pg_interface_t *pi = pool_elt_at_index (pg->interfaces, pg_if_id);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_PG_CREATE_INTERFACE_REPLY,
+ ({
+ rmp->sw_if_index = ntohl(pi->sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_pg_capture_t_handler (vl_api_pg_capture_t * mp)
+{
+ vl_api_pg_capture_reply_t *rmp;
+ int rv = 0;
+
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_hw_interface_t *hi = 0;
+
+ u8 *intf_name = format (0, "pg%d", ntohl (mp->interface_id), 0);
+ u32 hw_if_index = ~0;
+ uword *p = hash_get_mem (im->hw_interface_by_name, intf_name);
+ if (p)
+ hw_if_index = *p;
+ vec_free (intf_name);
+
+ if (hw_if_index != ~0)
+ {
+ pg_capture_args_t _a, *a = &_a;
+
+ u32 len = ntohl (mp->pcap_name_length);
+ u8 *pcap_file_name = vec_new (u8, len);
+ clib_memcpy (pcap_file_name, mp->pcap_file_name, len);
+
+ hi = vnet_get_sup_hw_interface (vnm, hw_if_index);
+ a->hw_if_index = hw_if_index;
+ a->dev_instance = hi->dev_instance;
+ a->is_enabled = mp->is_enabled;
+ a->pcap_file_name = pcap_file_name;
+ a->count = ntohl (mp->count);
+
+ clib_error_t *e = pg_capture (a);
+ if (e)
+ {
+ clib_error_report (e);
+ rv = VNET_API_ERROR_CANNOT_CREATE_PCAP_FILE;
+ }
+
+ vec_free (pcap_file_name);
+ }
+ REPLY_MACRO (VL_API_PG_CAPTURE_REPLY);
+}
+
+static void
+vl_api_pg_enable_disable_t_handler (vl_api_pg_enable_disable_t * mp)
+{
+ vl_api_pg_enable_disable_reply_t *rmp;
+ int rv = 0;
+
+ pg_main_t *pg = &pg_main;
+ u32 stream_index = ~0;
+
+ int is_enable = mp->is_enabled != 0;
+ u32 len = ntohl (mp->stream_name_length) - 1;
+
+ if (len > 0)
+ {
+ u8 *stream_name = vec_new (u8, len);
+ clib_memcpy (stream_name, mp->stream_name, len);
+ uword *p = hash_get_mem (pg->stream_index_by_name, stream_name);
+ if (p)
+ stream_index = *p;
+ vec_free (stream_name);
+ }
+
+ pg_enable_disable (stream_index, is_enable);
+
+ REPLY_MACRO (VL_API_PG_ENABLE_DISABLE_REPLY);
+}
+
+static void
+ vl_api_ip_source_and_port_range_check_add_del_t_handler
+ (vl_api_ip_source_and_port_range_check_add_del_t * mp)
+{
+ vl_api_ip_source_and_port_range_check_add_del_reply_t *rmp;
+ int rv = 0;
+
+ u8 is_ipv6 = mp->is_ipv6;
+ u8 is_add = mp->is_add;
+ u8 mask_length = mp->mask_length;
+ ip4_address_t ip4_addr;
+ ip6_address_t ip6_addr;
+ u16 *low_ports = 0;
+ u16 *high_ports = 0;
+ u32 vrf_id;
+ u16 tmp_low, tmp_high;
+ u8 num_ranges;
+ int i;
+
+ // Validate port range
+ num_ranges = mp->number_of_ranges;
+ if (num_ranges > 32)
+ { // This is size of array in VPE.API
+ rv = VNET_API_ERROR_EXCEEDED_NUMBER_OF_RANGES_CAPACITY;
+ goto reply;
+ }
+
+ vec_reset_length (low_ports);
+ vec_reset_length (high_ports);
+
+ for (i = 0; i < num_ranges; i++)
+ {
+ tmp_low = mp->low_ports[i];
+ tmp_high = mp->high_ports[i];
+ // If tmp_low <= tmp_high then only need to check tmp_low = 0
+ // If tmp_low <= tmp_high then only need to check tmp_high > 65535
+ if (tmp_low > tmp_high || tmp_low == 0 || tmp_high > 65535)
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto reply;
+ }
+ vec_add1 (low_ports, tmp_low);
+ vec_add1 (high_ports, tmp_high + 1);
+ }
+
+ // Validate mask_length
+ if ((is_ipv6 && mask_length > 128) || (!is_ipv6 && mask_length > 32))
+ {
+ rv = VNET_API_ERROR_ADDRESS_LENGTH_MISMATCH;
+ goto reply;
+ }
+
+ vrf_id = ntohl (mp->vrf_id);
+
+ if (vrf_id < 1)
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto reply;
+ }
+
+
+ if (is_ipv6)
+ {
+ clib_memcpy (ip6_addr.as_u8, mp->address, sizeof (ip6_addr.as_u8));
+ rv = ip6_source_and_port_range_check_add_del (&ip6_addr,
+ mask_length,
+ vrf_id,
+ low_ports,
+ high_ports, is_add);
+ }
+ else
+ {
+ clib_memcpy (ip4_addr.data, mp->address, sizeof (ip4_addr));
+ rv = ip4_source_and_port_range_check_add_del (&ip4_addr,
+ mask_length,
+ vrf_id,
+ low_ports,
+ high_ports, is_add);
+ }
+
+reply:
+ vec_free (low_ports);
+ vec_free (high_ports);
+ REPLY_MACRO (VL_API_IP_SOURCE_AND_PORT_RANGE_CHECK_ADD_DEL_REPLY);
+}
+
+static void
+ vl_api_ip_source_and_port_range_check_interface_add_del_t_handler
+ (vl_api_ip_source_and_port_range_check_interface_add_del_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_ip_source_and_port_range_check_interface_add_del_reply_t *rmp;
+ ip4_main_t *im = &ip4_main;
+ int rv;
+ u32 sw_if_index;
+ u32 fib_index[IP_SOURCE_AND_PORT_RANGE_CHECK_N_PROTOCOLS];
+ u32 vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_N_PROTOCOLS];
+ uword *p = 0;
+ int i;
+
+ vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_TCP_OUT] =
+ ntohl (mp->tcp_out_vrf_id);
+ vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_UDP_OUT] =
+ ntohl (mp->udp_out_vrf_id);
+ vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_TCP_IN] =
+ ntohl (mp->tcp_in_vrf_id);
+ vrf_id[IP_SOURCE_AND_PORT_RANGE_CHECK_PROTOCOL_UDP_IN] =
+ ntohl (mp->udp_in_vrf_id);
+
+
+ for (i = 0; i < IP_SOURCE_AND_PORT_RANGE_CHECK_N_PROTOCOLS; i++)
+ {
+ if (vrf_id[i] != 0 && vrf_id[i] != ~0)
+ {
+ p = hash_get (im->fib_index_by_table_id, vrf_id[i]);
+
+ if (p == 0)
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto reply;
+ }
+
+ fib_index[i] = p[0];
+ }
+ else
+ fib_index[i] = ~0;
+ }
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ rv =
+ set_ip_source_and_port_range_check (vm, fib_index, sw_if_index,
+ mp->is_add);
+
+ BAD_SW_IF_INDEX_LABEL;
+reply:
+
+ REPLY_MACRO (VL_API_IP_SOURCE_AND_PORT_RANGE_CHECK_INTERFACE_ADD_DEL_REPLY);
+}
+
+static void
+vl_api_delete_subif_t_handler (vl_api_delete_subif_t * mp)
+{
+ vl_api_delete_subif_reply_t *rmp;
+ int rv;
+
+ rv = vnet_delete_sub_interface (ntohl (mp->sw_if_index));
+
+ REPLY_MACRO (VL_API_DELETE_SUBIF_REPLY);
+}
+
+static void
+ vl_api_l2_interface_pbb_tag_rewrite_t_handler
+ (vl_api_l2_interface_pbb_tag_rewrite_t * mp)
+{
+ vl_api_l2_interface_pbb_tag_rewrite_reply_t *rmp;
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_main_t *vm = vlib_get_main ();
+ u32 vtr_op;
+ int rv = 0;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ vtr_op = ntohl (mp->vtr_op);
+
+ switch (vtr_op)
+ {
+ case L2_VTR_DISABLED:
+ case L2_VTR_PUSH_2:
+ case L2_VTR_POP_2:
+ case L2_VTR_TRANSLATE_2_1:
+ break;
+
+ default:
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto bad_sw_if_index;
+ }
+
+ rv = l2pbb_configure (vm, vnm, ntohl (mp->sw_if_index), vtr_op,
+ mp->b_dmac, mp->b_smac, ntohs (mp->b_vlanid),
+ ntohl (mp->i_sid), ntohs (mp->outer_tag));
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_L2_INTERFACE_PBB_TAG_REWRITE_REPLY);
+
+}
+
+static void
+vl_api_punt_t_handler (vl_api_punt_t * mp)
+{
+ vl_api_punt_reply_t *rmp;
+ vlib_main_t *vm = vlib_get_main ();
+ int rv = 0;
+ clib_error_t *error;
+
+ error = vnet_punt_add_del (vm, mp->ipv, mp->l4_protocol,
+ ntohs (mp->l4_port), mp->is_add);
+ if (error)
+ {
+ rv = -1;
+ clib_error_report (error);
+ }
+
+ REPLY_MACRO (VL_API_PUNT_REPLY);
+}
+
+static void
+ vl_api_flow_classify_set_interface_t_handler
+ (vl_api_flow_classify_set_interface_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_flow_classify_set_interface_reply_t *rmp;
+ int rv;
+ u32 sw_if_index, ip4_table_index, ip6_table_index;
+
+ ip4_table_index = ntohl (mp->ip4_table_index);
+ ip6_table_index = ntohl (mp->ip6_table_index);
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ rv = vnet_set_flow_classify_intfc (vm, sw_if_index, ip4_table_index,
+ ip6_table_index, mp->is_add);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_FLOW_CLASSIFY_SET_INTERFACE_REPLY);
+}
+
+static void
+send_flow_classify_details (u32 sw_if_index,
+ u32 table_index,
+ unix_shared_memory_queue_t * q, u32 context)
+{
+ vl_api_flow_classify_details_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_FLOW_CLASSIFY_DETAILS);
+ mp->context = context;
+ mp->sw_if_index = htonl (sw_if_index);
+ mp->table_index = htonl (table_index);
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+vl_api_flow_classify_dump_t_handler (vl_api_flow_classify_dump_t * mp)
+{
+ unix_shared_memory_queue_t *q;
+ flow_classify_main_t *pcm = &flow_classify_main;
+ u32 *vec_tbl;
+ int i;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ vec_tbl = pcm->classify_table_index_by_sw_if_index[mp->type];
+
+ if (vec_len (vec_tbl))
+ {
+ for (i = 0; i < vec_len (vec_tbl); i++)
+ {
+ if (vec_elt (vec_tbl, i) == ~0)
+ continue;
+
+ send_flow_classify_details (i, vec_elt (vec_tbl, i), q,
+ mp->context);
+ }
+ }
+}
+
+static void
+vl_api_feature_enable_disable_t_handler (vl_api_feature_enable_disable_t * mp)
+{
+ vl_api_feature_enable_disable_reply_t *rmp;
+ int rv = 0;
+ u8 *arc_name, *feature_name;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ arc_name = format (0, "%s%c", mp->arc_name, 0);
+ feature_name = format (0, "%s%c", mp->feature_name, 0);
+
+ vnet_feature_registration_t *reg;
+ reg =
+ vnet_get_feature_reg ((const char *) arc_name,
+ (const char *) feature_name);
+ if (reg == 0)
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ else
+ {
+ u32 sw_if_index;
+ clib_error_t *error = 0;
+
+ sw_if_index = ntohl (mp->sw_if_index);
+ if (reg->enable_disable_cb)
+ error = reg->enable_disable_cb (sw_if_index, mp->enable);
+ if (!error)
+ vnet_feature_enable_disable ((const char *) arc_name,
+ (const char *) feature_name,
+ sw_if_index, mp->enable, 0, 0);
+ else
+ {
+ clib_error_report (error);
+ rv = VNET_API_ERROR_CANNOT_ENABLE_DISABLE_FEATURE;
+ }
+ }
+
+ vec_free (feature_name);
+ vec_free (arc_name);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_FEATURE_ENABLE_DISABLE_REPLY);
+}
+
+#define BOUNCE_HANDLER(nn) \
+static void vl_api_##nn##_t_handler ( \
+ vl_api_##nn##_t *mp) \
+{ \
+ vpe_client_registration_t *reg; \
+ vpe_api_main_t * vam = &vpe_api_main; \
+ unix_shared_memory_queue_t * q; \
+ \
+ /* One registration only... */ \
+ pool_foreach(reg, vam->nn##_registrations, \
+ ({ \
+ q = vl_api_client_index_to_input_queue (reg->client_index); \
+ if (q) { \
+ /* \
+ * If the queue is stuffed, turf the msg and complain \
+ * It's unlikely that the intended recipient is \
+ * alive; avoid deadlock at all costs. \
+ */ \
+ if (q->cursize == q->maxsize) { \
+ clib_warning ("ERROR: receiver queue full, drop msg"); \
+ vl_msg_api_free (mp); \
+ return; \
+ } \
+ vl_msg_api_send_shmem (q, (u8 *)&mp); \
+ return; \
+ } \
+ })); \
+ vl_msg_api_free (mp); \
+}
+
+static void setup_message_id_table (api_main_t * am);
+
+/*
+ * vpe_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../open-repo/vlib/memclnt_vlib.c:memclnt_process()
+ */
+static clib_error_t *
+vpe_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Manually register the sr tunnel add del msg, so we trace
+ * enough bytes to capture a typical segment list
+ */
+ vl_msg_api_set_handlers (VL_API_SR_TUNNEL_ADD_DEL,
+ "sr_tunnel_add_del",
+ vl_api_sr_tunnel_add_del_t_handler,
+ vl_noop_handler,
+ vl_api_sr_tunnel_add_del_t_endian,
+ vl_api_sr_tunnel_add_del_t_print, 256, 1);
+
+
+ /*
+ * Manually register the sr policy add del msg, so we trace
+ * enough bytes to capture a typical tunnel name list
+ */
+ vl_msg_api_set_handlers (VL_API_SR_POLICY_ADD_DEL,
+ "sr_policy_add_del",
+ vl_api_sr_policy_add_del_t_handler,
+ vl_noop_handler,
+ vl_api_sr_policy_add_del_t_endian,
+ vl_api_sr_policy_add_del_t_print, 256, 1);
+
+ /*
+ * Trace space for 8 MPLS encap labels, classifier mask+match
+ */
+ am->api_trace_cfg[VL_API_MPLS_TUNNEL_ADD_DEL].size += 8 * sizeof (u32);
+ am->api_trace_cfg[VL_API_CLASSIFY_ADD_DEL_TABLE].size += 5 * sizeof (u32x4);
+ am->api_trace_cfg[VL_API_CLASSIFY_ADD_DEL_SESSION].size
+ += 5 * sizeof (u32x4);
+ am->api_trace_cfg[VL_API_VXLAN_ADD_DEL_TUNNEL].size += 16 * sizeof (u32);
+
+ /*
+ * Thread-safe API messages
+ */
+ am->is_mp_safe[VL_API_IP_ADD_DEL_ROUTE] = 1;
+ am->is_mp_safe[VL_API_GET_NODE_GRAPH] = 1;
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (vpe_api_hookup);
+
+static clib_error_t *
+vpe_api_init (vlib_main_t * vm)
+{
+ vpe_api_main_t *am = &vpe_api_main;
+
+ am->vlib_main = vm;
+ am->vnet_main = vnet_get_main ();
+ am->interface_events_registration_hash = hash_create (0, sizeof (uword));
+ am->to_netconf_server_registration_hash = hash_create (0, sizeof (uword));
+ am->from_netconf_server_registration_hash = hash_create (0, sizeof (uword));
+ am->to_netconf_client_registration_hash = hash_create (0, sizeof (uword));
+ am->from_netconf_client_registration_hash = hash_create (0, sizeof (uword));
+ am->oam_events_registration_hash = hash_create (0, sizeof (uword));
+ am->bfd_events_registration_hash = hash_create (0, sizeof (uword));
+
+ vl_api_init (vm);
+ vl_set_memory_region_name ("/vpe-api");
+ vl_enable_disable_memory_api (vm, 1 /* enable it */ );
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (vpe_api_init);
+
+
+static clib_error_t *
+api_segment_config (vlib_main_t * vm, unformat_input_t * input)
+{
+ u8 *chroot_path;
+ u64 baseva, size, pvt_heap_size;
+ int uid, gid, rv;
+ const int max_buf_size = 4096;
+ char *s, *buf;
+ struct passwd _pw, *pw;
+ struct group _grp, *grp;
+ clib_error_t *e;
+ buf = vec_new (char, 128);
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "prefix %s", &chroot_path))
+ {
+ vec_add1 (chroot_path, 0);
+ vl_set_memory_root_path ((char *) chroot_path);
+ }
+ else if (unformat (input, "uid %d", &uid))
+ vl_set_memory_uid (uid);
+ else if (unformat (input, "gid %d", &gid))
+ vl_set_memory_gid (gid);
+ else if (unformat (input, "baseva %llx", &baseva))
+ vl_set_global_memory_baseva (baseva);
+ else if (unformat (input, "global-size %lldM", &size))
+ vl_set_global_memory_size (size * (1ULL << 20));
+ else if (unformat (input, "global-size %lldG", &size))
+ vl_set_global_memory_size (size * (1ULL << 30));
+ else if (unformat (input, "global-size %lld", &size))
+ vl_set_global_memory_size (size);
+ else if (unformat (input, "global-pvt-heap-size %lldM", &pvt_heap_size))
+ vl_set_global_pvt_heap_size (pvt_heap_size * (1ULL << 20));
+ else if (unformat (input, "global-pvt-heap-size size %lld",
+ &pvt_heap_size))
+ vl_set_global_pvt_heap_size (pvt_heap_size);
+ else if (unformat (input, "api-pvt-heap-size %lldM", &pvt_heap_size))
+ vl_set_api_pvt_heap_size (pvt_heap_size * (1ULL << 20));
+ else if (unformat (input, "api-pvt-heap-size size %lld",
+ &pvt_heap_size))
+ vl_set_api_pvt_heap_size (pvt_heap_size);
+ else if (unformat (input, "api-size %lldM", &size))
+ vl_set_api_memory_size (size * (1ULL << 20));
+ else if (unformat (input, "api-size %lldG", &size))
+ vl_set_api_memory_size (size * (1ULL << 30));
+ else if (unformat (input, "api-size %lld", &size))
+ vl_set_api_memory_size (size);
+ else if (unformat (input, "uid %s", &s))
+ {
+ /* lookup the username */
+ pw = NULL;
+ while (((rv =
+ getpwnam_r (s, &_pw, buf, vec_len (buf), &pw)) == ERANGE)
+ && (vec_len (buf) <= max_buf_size))
+ {
+ vec_resize (buf, vec_len (buf) * 2);
+ }
+ if (rv < 0)
+ {
+ e = clib_error_return_code (0, rv,
+ CLIB_ERROR_ERRNO_VALID |
+ CLIB_ERROR_FATAL,
+ "cannot fetch username %s", s);
+ vec_free (s);
+ vec_free (buf);
+ return e;
+ }
+ if (pw == NULL)
+ {
+ e =
+ clib_error_return_fatal (0, "username %s does not exist", s);
+ vec_free (s);
+ vec_free (buf);
+ return e;
+ }
+ vec_free (s);
+ vl_set_memory_uid (pw->pw_uid);
+ }
+ else if (unformat (input, "gid %s", &s))
+ {
+ /* lookup the group name */
+ grp = NULL;
+ while (((rv =
+ getgrnam_r (s, &_grp, buf, vec_len (buf), &grp)) == ERANGE)
+ && (vec_len (buf) <= max_buf_size))
+ {
+ vec_resize (buf, vec_len (buf) * 2);
+ }
+ if (rv != 0)
+ {
+ e = clib_error_return_code (0, rv,
+ CLIB_ERROR_ERRNO_VALID |
+ CLIB_ERROR_FATAL,
+ "cannot fetch group %s", s);
+ vec_free (s);
+ vec_free (buf);
+ return e;
+ }
+ if (grp == NULL)
+ {
+ e = clib_error_return_fatal (0, "group %s does not exist", s);
+ vec_free (s);
+ vec_free (buf);
+ return e;
+ }
+ vec_free (s);
+ vec_free (buf);
+ vl_set_memory_gid (grp->gr_gid);
+ }
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ return 0;
+}
+
+VLIB_EARLY_CONFIG_FUNCTION (api_segment_config, "api-segment");
+
+void *
+get_unformat_vnet_sw_interface (void)
+{
+ return (void *) &unformat_vnet_sw_interface;
+}
+
+static u8 *
+format_arp_event (u8 * s, va_list * args)
+{
+ vl_api_ip4_arp_event_t *event = va_arg (*args, vl_api_ip4_arp_event_t *);
+
+ s = format (s, "pid %d: ", event->pid);
+ if (event->mac_ip)
+ s = format (s, "bd mac/ip4 binding events");
+ else
+ s = format (s, "resolution for %U", format_ip4_address, &event->address);
+ return s;
+}
+
+static u8 *
+format_nd_event (u8 * s, va_list * args)
+{
+ vl_api_ip6_nd_event_t *event = va_arg (*args, vl_api_ip6_nd_event_t *);
+
+ s = format (s, "pid %d: ", event->pid);
+ if (event->mac_ip)
+ s = format (s, "bd mac/ip6 binding events");
+ else
+ s = format (s, "resolution for %U", format_ip6_address, event->address);
+ return s;
+}
+
+static clib_error_t *
+show_ip_arp_nd_events_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vpe_api_main_t *am = &vpe_api_main;
+ vl_api_ip4_arp_event_t *arp_event;
+ vl_api_ip6_nd_event_t *nd_event;
+
+ if ((pool_elts (am->arp_events) == 0) && (pool_elts (am->nd_events) == 0))
+ {
+ vlib_cli_output (vm, "No active arp or nd event registrations");
+ return 0;
+ }
+
+ /* *INDENT-OFF* */
+ pool_foreach (arp_event, am->arp_events,
+ ({
+ vlib_cli_output (vm, "%U", format_arp_event, arp_event);
+ }));
+
+ pool_foreach (nd_event, am->nd_events,
+ ({
+ vlib_cli_output (vm, "%U", format_nd_event, nd_event);
+ }));
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_ip_arp_nd_events, static) = {
+ .path = "show arp-nd-event registrations",
+ .function = show_ip_arp_nd_events_fn,
+ .short_help = "Show ip4 arp and ip6 nd event registrations",
+};
+/* *INDENT-ON* */
+
+#define vl_msg_name_crc_list
+#include <vpp/api/vpe_all_api_h.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_memclnt;
+ foreach_vl_msg_name_crc_vpe;
+#undef _
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/api/api_format.c b/src/vpp/api/api_format.c
new file mode 120000
index 00000000..cec72cc0
--- /dev/null
+++ b/src/vpp/api/api_format.c
@@ -0,0 +1 @@
+../../vat/api_format.c \ No newline at end of file
diff --git a/src/vpp/api/api_main.c b/src/vpp/api/api_main.c
new file mode 100644
index 00000000..db532061
--- /dev/null
+++ b/src/vpp/api/api_main.c
@@ -0,0 +1,192 @@
+#include "vat.h"
+
+vat_main_t vat_main;
+
+void
+vat_suspend (vlib_main_t * vm, f64 interval)
+{
+ vlib_process_suspend (vm, interval);
+}
+
+static u8 *
+format_api_error (u8 * s, va_list * args)
+{
+ vat_main_t *vam = va_arg (*args, vat_main_t *);
+ i32 error = va_arg (*args, u32);
+ uword *p;
+
+ p = hash_get (vam->error_string_by_error_number, -error);
+
+ if (p)
+ s = format (s, "%s", p[0]);
+ else
+ s = format (s, "%d", error);
+ return s;
+}
+
+
+static void
+init_error_string_table (vat_main_t * vam)
+{
+
+ vam->error_string_by_error_number = hash_create (0, sizeof (uword));
+
+#define _(n,v,s) hash_set (vam->error_string_by_error_number, -v, s);
+ foreach_vnet_api_error;
+#undef _
+
+ hash_set (vam->error_string_by_error_number, 99, "Misc");
+}
+
+static clib_error_t *
+api_main_init (vlib_main_t * vm)
+{
+ vat_main_t *vam = &vat_main;
+
+ vam->vlib_main = vm;
+ vam->my_client_index = (u32) ~ 0;
+ init_error_string_table (vam);
+ vat_api_hookup (vam);
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (api_main_init);
+
+static clib_error_t *
+api_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vat_main_t *vam = &vat_main;
+ unformat_input_t _input;
+ uword c;
+ u8 *cmdp, *argsp, *this_cmd;
+ uword *p;
+ u32 arg_len;
+ int rv;
+ int (*fp) (vat_main_t *);
+ api_main_t *am = &api_main;
+
+ vam->vl_input_queue = am->shmem_hdr->vl_input_queue;
+
+ vec_reset_length (vam->inbuf);
+ vam->input = &_input;
+
+ while (((c = unformat_get_input (input)) != '\n') &&
+ (c != UNFORMAT_END_OF_INPUT))
+ vec_add1 (vam->inbuf, c);
+
+ /* Add 1 octet's worth of extra space in case there are no args... */
+ vec_add1 (vam->inbuf, 0);
+
+ /*$$$$ reinstall macro evaluator */
+
+ /* Split input into cmd + args */
+ this_cmd = cmdp = vam->inbuf;
+
+ while (cmdp < (this_cmd + vec_len (this_cmd)))
+ {
+ if (*cmdp == ' ' || *cmdp == '\t' || *cmdp == '\n')
+ {
+ cmdp++;
+ }
+ else
+ break;
+ }
+
+ argsp = cmdp;
+ while (argsp < (this_cmd + vec_len (this_cmd)))
+ {
+ if (*argsp != ' ' && *argsp != '\t' && *argsp != '\n')
+ {
+ argsp++;
+ }
+ else
+ break;
+ }
+ *argsp++ = 0;
+
+ while (argsp < (this_cmd + vec_len (this_cmd)))
+ {
+ if (*argsp == ' ' || *argsp == '\t' || *argsp == '\n')
+ {
+ argsp++;
+ }
+ else
+ break;
+ }
+
+ /* Blank input line? */
+ if (*cmdp == 0)
+ return 0;
+
+ p = hash_get_mem (vam->function_by_name, cmdp);
+ if (p == 0)
+ {
+ return clib_error_return (0, "'%s': function not found\n", cmdp);
+ }
+
+ arg_len = strlen ((char *) argsp);
+
+ unformat_init_string (vam->input, (char *) argsp, arg_len);
+ fp = (void *) p[0];
+
+ rv = (*fp) (vam);
+
+ if (rv < 0)
+ {
+ unformat_free (vam->input);
+ return clib_error_return (0,
+ "%s error: %U\n", cmdp,
+ format_api_error, vam, rv);
+
+ if (vam->regenerate_interface_table)
+ {
+ vam->regenerate_interface_table = 0;
+ api_sw_interface_dump (vam);
+ }
+ }
+ unformat_free (vam->input);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (api_command, static) =
+{
+ .path = "binary-api",
+ .short_help = "binary-api <name> [<args>]",
+ .function = api_command_fn,
+};
+/* *INDENT-ON* */
+
+void
+api_cli_output (void *notused, const char *fmt, ...)
+{
+ va_list va;
+ vat_main_t *vam = &vat_main;
+ vlib_main_t *vm = vam->vlib_main;
+ vlib_process_t *cp = vlib_get_current_process (vm);
+ u8 *s;
+
+ va_start (va, fmt);
+ s = va_format (0, fmt, &va);
+ va_end (va);
+
+ /* Terminate with \n if not present. */
+ if (vec_len (s) > 0 && s[vec_len (s) - 1] != '\n')
+ vec_add1 (s, '\n');
+
+ if ((!cp) || (!cp->output_function))
+ fformat (stdout, "%v", s);
+ else
+ cp->output_function (cp->output_function_arg, s, vec_len (s));
+
+ vec_free (s);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/api/custom_dump.c b/src/vpp/api/custom_dump.c
new file mode 100644
index 00000000..1964533e
--- /dev/null
+++ b/src/vpp/api/custom_dump.c
@@ -0,0 +1,3139 @@
+/*
+ *------------------------------------------------------------------
+ * custom_dump.c - pretty-print API messages for replay
+ *
+ * Copyright (c) 2014-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/unix/tuntap.h>
+#include <vnet/mpls/mpls.h>
+#include <vnet/dhcp/proxy.h>
+#include <vnet/dhcpv6/proxy.h>
+#include <vnet/l2tp/l2tp.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/sr/sr_packet.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/classify/policer_classify.h>
+#include <vnet/policer/xlate.h>
+#include <vnet/policer/policer.h>
+#include <vnet/classify/flow_classify.h>
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vnet/lisp-cp/lisp_types.h>
+
+#include <vpp/stats/stats.h>
+#include <vpp/oam/oam.h>
+
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/l2/l2_vtr.h>
+
+#include <vpp/api/vpe_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vpp/api/vpe_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vpp/api/vpe_all_api_h.h>
+#undef vl_endianfun
+
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+
+#define FINISH \
+ vec_add1 (s, 0); \
+ vl_print (handle, (char *)s); \
+ vec_free (s); \
+ return handle;
+
+
+static void *vl_api_create_loopback_t_print
+ (vl_api_create_loopback_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: create_loopback ");
+ s = format (s, "mac %U ", format_ethernet_address, &mp->mac_address);
+
+ FINISH;
+}
+
+static void *vl_api_delete_loopback_t_print
+ (vl_api_delete_loopback_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: delete_loopback ");
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_set_flags_t_print
+ (vl_api_sw_interface_set_flags_t * mp, void *handle)
+{
+ u8 *s;
+ s = format (0, "SCRIPT: sw_interface_set_flags ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ if (mp->admin_up_down)
+ s = format (s, "admin-up ");
+ else
+ s = format (s, "admin-down ");
+
+ if (mp->link_up_down)
+ s = format (s, "link-up");
+ else
+ s = format (s, "link-down");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_add_del_address_t_print
+ (vl_api_sw_interface_add_del_address_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_add_del_address ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ if (mp->is_ipv6)
+ s = format (s, "%U/%d ", format_ip6_address,
+ (ip6_address_t *) mp->address, mp->address_length);
+ else
+ s = format (s, "%U/%d ", format_ip4_address,
+ (ip4_address_t *) mp->address, mp->address_length);
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+ if (mp->del_all)
+ s = format (s, "del-all ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_set_table_t_print
+ (vl_api_sw_interface_set_table_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_set_table ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ if (mp->vrf_id)
+ s = format (s, "vrf %d ", ntohl (mp->vrf_id));
+
+ if (mp->is_ipv6)
+ s = format (s, "ipv6 ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_set_mpls_enable_t_print
+ (vl_api_sw_interface_set_mpls_enable_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_set_mpls_enable ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ if (mp->enable == 0)
+ s = format (s, "disable");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_set_vpath_t_print
+ (vl_api_sw_interface_set_vpath_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_set_vpath ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ if (mp->enable)
+ s = format (s, "enable ");
+ else
+ s = format (s, "disable ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_set_vxlan_bypass_t_print
+ (vl_api_sw_interface_set_vxlan_bypass_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_set_vxlan_bypass ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ if (mp->is_ipv6)
+ s = format (s, "ip6");
+
+ if (mp->enable)
+ s = format (s, "enable ");
+ else
+ s = format (s, "disable ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_set_l2_xconnect_t_print
+ (vl_api_sw_interface_set_l2_xconnect_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_set_l2_xconnect ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->rx_sw_if_index));
+
+ if (mp->enable)
+ {
+ s = format (s, "tx_sw_if_index %d ", ntohl (mp->tx_sw_if_index));
+ }
+ else
+ s = format (s, "delete ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_set_l2_bridge_t_print
+ (vl_api_sw_interface_set_l2_bridge_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_set_l2_bridge ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->rx_sw_if_index));
+
+ if (mp->enable)
+ {
+ s = format (s, "bd_id %d shg %d %senable ", ntohl (mp->bd_id),
+ mp->shg, ((mp->bvi) ? "bvi " : " "));
+ }
+ else
+ s = format (s, "disable ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_set_dpdk_hqos_pipe_t_print
+ (vl_api_sw_interface_set_dpdk_hqos_pipe_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_set_dpdk_hqos_pipe ");
+
+ s = format (s, "sw_if_index %u ", ntohl (mp->sw_if_index));
+
+ s = format (s, "subport %u pipe %u profile %u ",
+ ntohl (mp->subport), ntohl (mp->pipe), ntohl (mp->profile));
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_set_dpdk_hqos_subport_t_print
+ (vl_api_sw_interface_set_dpdk_hqos_subport_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_set_dpdk_hqos_subport ");
+
+ s = format (s, "sw_if_index %u ", ntohl (mp->sw_if_index));
+
+ s =
+ format (s,
+ "subport %u rate %u bkt_size %u tc0 %u tc1 %u tc2 %u tc3 %u period %u",
+ ntohl (mp->subport), ntohl (mp->tb_rate), ntohl (mp->tb_size),
+ ntohl (mp->tc_rate[0]), ntohl (mp->tc_rate[1]),
+ ntohl (mp->tc_rate[2]), ntohl (mp->tc_rate[3]),
+ ntohl (mp->tc_period));
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_set_dpdk_hqos_tctbl_t_print
+ (vl_api_sw_interface_set_dpdk_hqos_tctbl_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_set_dpdk_hqos_tctbl ");
+
+ s = format (s, "sw_if_index %u ", ntohl (mp->sw_if_index));
+
+ s = format (s, "entry %u tc %u queue %u",
+ ntohl (mp->entry), ntohl (mp->tc), ntohl (mp->queue));
+
+ FINISH;
+}
+
+static void *vl_api_bridge_domain_add_del_t_print
+ (vl_api_bridge_domain_add_del_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: bridge_domain_add_del ");
+
+ s = format (s, "bd_id %d ", ntohl (mp->bd_id));
+
+ if (mp->is_add)
+ {
+ s = format (s, "flood %d uu-flood %d forward %d learn %d arp-term %d",
+ mp->flood, mp->uu_flood, mp->forward, mp->learn,
+ mp->arp_term);
+ }
+ else
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_bridge_domain_dump_t_print
+ (vl_api_bridge_domain_dump_t * mp, void *handle)
+{
+ u8 *s;
+ u32 bd_id = ntohl (mp->bd_id);
+
+ s = format (0, "SCRIPT: bridge_domain_dump ");
+
+ if (bd_id != ~0)
+ s = format (s, "bd_id %d ", bd_id);
+
+ FINISH;
+}
+
+static void *vl_api_l2fib_add_del_t_print
+ (vl_api_l2fib_add_del_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: l2fib_add_del ");
+
+ s = format (s, "mac %U ", format_ethernet_address, &mp->mac);
+
+ s = format (s, "bd_id %d ", ntohl (mp->bd_id));
+
+
+ if (mp->is_add)
+ {
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+ if (mp->static_mac)
+ s = format (s, "%s", "static ");
+ if (mp->filter_mac)
+ s = format (s, "%s", "filter ");
+ if (mp->bvi_mac)
+ s = format (s, "%s", "bvi ");
+ }
+ else
+ {
+ s = format (s, "del ");
+ }
+
+ FINISH;
+}
+
+static void *
+vl_api_l2_flags_t_print (vl_api_l2_flags_t * mp, void *handle)
+{
+ u8 *s;
+ u32 flags = ntohl (mp->feature_bitmap);
+
+ s = format (0, "SCRIPT: l2_flags ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+#define _(a,b) \
+ if (flags & L2INPUT_FEAT_ ## a) s = format (s, #a " ");
+ foreach_l2input_feat;
+#undef _
+
+ FINISH;
+}
+
+static void *vl_api_bridge_flags_t_print
+ (vl_api_bridge_flags_t * mp, void *handle)
+{
+ u8 *s;
+ u32 flags = ntohl (mp->feature_bitmap);
+
+ s = format (0, "SCRIPT: bridge_flags ");
+
+ s = format (s, "bd_id %d ", ntohl (mp->bd_id));
+
+ if (flags & L2_LEARN)
+ s = format (s, "learn ");
+ if (flags & L2_FWD)
+ s = format (s, "forward ");
+ if (flags & L2_FLOOD)
+ s = format (s, "flood ");
+ if (flags & L2_UU_FLOOD)
+ s = format (s, "uu-flood ");
+ if (flags & L2_ARP_TERM)
+ s = format (s, "arp-term ");
+
+ if (mp->is_set == 0)
+ s = format (s, "clear ");
+
+ FINISH;
+}
+
+static void *vl_api_bd_ip_mac_add_del_t_print
+ (vl_api_bd_ip_mac_add_del_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: bd_ip_mac_add_del ");
+ s = format (s, "bd_id %d ", ntohl (mp->bd_id));
+
+ if (mp->is_ipv6)
+ s = format (s, "%U ", format_ip6_address,
+ (ip6_address_t *) mp->ip_address);
+ else
+ s = format (s, "%U ", format_ip4_address,
+ (ip4_address_t *) mp->ip_address);
+
+ s = format (s, "%U ", format_ethernet_address, mp->mac_address);
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_tap_connect_t_print
+ (vl_api_tap_connect_t * mp, void *handle)
+{
+ u8 *s;
+ u8 null_mac[6];
+
+ memset (null_mac, 0, sizeof (null_mac));
+
+ s = format (0, "SCRIPT: tap_connect ");
+ s = format (s, "tapname %s ", mp->tap_name);
+ if (mp->use_random_mac)
+ s = format (s, "random-mac ");
+ if (mp->tag[0])
+ s = format (s, "tag %s ", mp->tag);
+ if (memcmp (mp->mac_address, null_mac, 6))
+ s = format (s, "mac %U ", format_ethernet_address, mp->mac_address);
+
+ FINISH;
+}
+
+static void *vl_api_tap_modify_t_print
+ (vl_api_tap_modify_t * mp, void *handle)
+{
+ u8 *s;
+ u8 null_mac[6];
+
+ memset (null_mac, 0, sizeof (null_mac));
+
+ s = format (0, "SCRIPT: tap_modify ");
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+ s = format (s, "tapname %s ", mp->tap_name);
+ if (mp->use_random_mac)
+ s = format (s, "random-mac ");
+
+ if (memcmp (mp->mac_address, null_mac, 6))
+ s = format (s, "mac %U ", format_ethernet_address, mp->mac_address);
+
+ FINISH;
+}
+
+static void *vl_api_tap_delete_t_print
+ (vl_api_tap_delete_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: tap_delete ");
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_tap_dump_t_print
+ (vl_api_sw_interface_tap_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_tap_dump ");
+
+ FINISH;
+}
+
+
+static void *vl_api_ip_add_del_route_t_print
+ (vl_api_ip_add_del_route_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: ip_add_del_route ");
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ if (mp->next_hop_sw_if_index)
+ s = format (s, "sw_if_index %d ", ntohl (mp->next_hop_sw_if_index));
+
+ if (mp->is_ipv6)
+ s = format (s, "%U/%d ", format_ip6_address, mp->dst_address,
+ mp->dst_address_length);
+ else
+ s = format (s, "%U/%d ", format_ip4_address, mp->dst_address,
+ mp->dst_address_length);
+ if (mp->is_local)
+ s = format (s, "local ");
+ else if (mp->is_drop)
+ s = format (s, "drop ");
+ else if (mp->is_classify)
+ s = format (s, "classify %d", ntohl (mp->classify_table_index));
+ else
+ {
+ if (mp->is_ipv6)
+ s = format (s, "via %U ", format_ip6_address, mp->next_hop_address);
+ else
+ s = format (s, "via %U ", format_ip4_address, mp->next_hop_address);
+ }
+
+ if (mp->table_id != 0)
+ s = format (s, "vrf %d ", ntohl (mp->table_id));
+
+ if (mp->create_vrf_if_needed)
+ s = format (s, "create-vrf ");
+
+ if (mp->next_hop_weight != 1)
+ s = format (s, "weight %d ", mp->next_hop_weight);
+
+ if (mp->not_last)
+ s = format (s, "not-last ");
+
+ if (mp->is_multipath)
+ s = format (s, "multipath ");
+
+ if (mp->is_multipath)
+ s = format (s, "multipath ");
+
+ if (mp->next_hop_table_id)
+ s = format (s, "lookup-in-vrf %d ", ntohl (mp->next_hop_table_id));
+
+ FINISH;
+}
+
+static void *vl_api_proxy_arp_add_del_t_print
+ (vl_api_proxy_arp_add_del_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: proxy_arp_add_del ");
+
+ s = format (s, "%U - %U ", format_ip4_address, mp->low_address,
+ format_ip4_address, mp->hi_address);
+
+ if (mp->vrf_id)
+ s = format (s, "vrf %d ", ntohl (mp->vrf_id));
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_proxy_arp_intfc_enable_disable_t_print
+ (vl_api_proxy_arp_intfc_enable_disable_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: proxy_arp_intfc_enable_disable ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ s = format (s, "enable %d ", mp->enable_disable);
+
+ FINISH;
+}
+
+static void *vl_api_mpls_tunnel_add_del_t_print
+ (vl_api_mpls_tunnel_add_del_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: mpls_tunnel_add_del ");
+
+ if (mp->mt_next_hop_sw_if_index)
+ s = format (s, "sw_if_index %d ", ntohl (mp->mt_next_hop_sw_if_index));
+
+ if (mp->mt_next_hop_proto_is_ip4)
+ s = format (s, "%U ", format_ip4_address, mp->mt_next_hop);
+ else
+ s = format (s, "%U ", format_ip6_address, mp->mt_next_hop);
+
+ if (mp->mt_l2_only)
+ s = format (s, "l2-only ");
+
+ if (mp->mt_is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_set_unnumbered_t_print
+ (vl_api_sw_interface_set_unnumbered_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_set_unnumbered ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ s = format (s, "unnum_if_index %d ", ntohl (mp->unnumbered_sw_if_index));
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_ip_neighbor_add_del_t_print
+ (vl_api_ip_neighbor_add_del_t * mp, void *handle)
+{
+ u8 *s;
+ u8 null_mac[6];
+
+ memset (null_mac, 0, sizeof (null_mac));
+
+ s = format (0, "SCRIPT: ip_neighbor_add_del ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ if (mp->is_static)
+ s = format (s, "is_static ");
+
+ s = format (s, "vrf_id %d ", ntohl (mp->vrf_id));
+
+ if (memcmp (mp->mac_address, null_mac, 6))
+ s = format (s, "mac %U ", format_ethernet_address, mp->mac_address);
+
+ if (mp->is_ipv6)
+ s =
+ format (s, "dst %U ", format_ip6_address,
+ (ip6_address_t *) mp->dst_address);
+ else
+ s =
+ format (s, "dst %U ", format_ip4_address,
+ (ip4_address_t *) mp->dst_address);
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *
+vl_api_reset_vrf_t_print (vl_api_reset_vrf_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: reset_vrf ");
+
+ if (mp->vrf_id)
+ s = format (s, "vrf %d ", ntohl (mp->vrf_id));
+
+ if (mp->is_ipv6 != 0)
+ s = format (s, "ipv6 ");
+
+ FINISH;
+}
+
+static void *vl_api_create_vlan_subif_t_print
+ (vl_api_create_vlan_subif_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: create_vlan_subif ");
+
+ if (mp->sw_if_index)
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ if (mp->vlan_id)
+ s = format (s, "vlan_id %d ", ntohl (mp->vlan_id));
+
+ FINISH;
+}
+
+#define foreach_create_subif_bit \
+_(no_tags) \
+_(one_tag) \
+_(two_tags) \
+_(dot1ad) \
+_(exact_match) \
+_(default_sub) \
+_(outer_vlan_id_any) \
+_(inner_vlan_id_any)
+
+static void *vl_api_create_subif_t_print
+ (vl_api_create_subif_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: create_subif ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ s = format (s, "sub_id %d ", ntohl (mp->sub_id));
+
+ if (mp->outer_vlan_id)
+ s = format (s, "outer_vlan_id %d ", ntohs (mp->outer_vlan_id));
+
+ if (mp->inner_vlan_id)
+ s = format (s, "inner_vlan_id %d ", ntohs (mp->inner_vlan_id));
+
+#define _(a) if (mp->a) s = format (s, "%s ", #a);
+ foreach_create_subif_bit;
+#undef _
+
+ FINISH;
+}
+
+static void *vl_api_delete_subif_t_print
+ (vl_api_delete_subif_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: delete_subif ");
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ FINISH;
+}
+
+static void *vl_api_oam_add_del_t_print
+ (vl_api_oam_add_del_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: oam_add_del ");
+
+ if (mp->vrf_id)
+ s = format (s, "vrf %d ", ntohl (mp->vrf_id));
+
+ s = format (s, "src %U ", format_ip4_address, mp->src_address);
+
+ s = format (s, "dst %U ", format_ip4_address, mp->dst_address);
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *
+vl_api_reset_fib_t_print (vl_api_reset_fib_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: reset_fib ");
+
+ if (mp->vrf_id)
+ s = format (s, "vrf %d ", ntohl (mp->vrf_id));
+
+ if (mp->is_ipv6 != 0)
+ s = format (s, "ipv6 ");
+
+ FINISH;
+}
+
+static void *vl_api_dhcp_proxy_config_t_print
+ (vl_api_dhcp_proxy_config_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: dhcp_proxy_config ");
+
+ s = format (s, "vrf_id %d ", ntohl (mp->vrf_id));
+
+ if (mp->is_ipv6)
+ {
+ s = format (s, "svr %U ", format_ip6_address,
+ (ip6_address_t *) mp->dhcp_server);
+ s = format (s, "src %U ", format_ip6_address,
+ (ip6_address_t *) mp->dhcp_src_address);
+ }
+ else
+ {
+ s = format (s, "svr %U ", format_ip4_address,
+ (ip4_address_t *) mp->dhcp_server);
+ s = format (s, "src %U ", format_ip4_address,
+ (ip4_address_t *) mp->dhcp_src_address);
+ }
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ s = format (s, "insert-cid %d ", mp->insert_circuit_id);
+
+ FINISH;
+}
+
+static void *vl_api_dhcp_proxy_config_2_t_print
+ (vl_api_dhcp_proxy_config_2_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: dhcp_proxy_config_2 ");
+
+ s = format (s, "rx_vrf_id %d ", ntohl (mp->rx_vrf_id));
+ s = format (s, "server_vrf_id %d ", ntohl (mp->server_vrf_id));
+
+ if (mp->is_ipv6)
+ {
+ s = format (s, "svr %U ", format_ip6_address,
+ (ip6_address_t *) mp->dhcp_server);
+ s = format (s, "src %U ", format_ip6_address,
+ (ip6_address_t *) mp->dhcp_src_address);
+ }
+ else
+ {
+ s = format (s, "svr %U ", format_ip4_address,
+ (ip4_address_t *) mp->dhcp_server);
+ s = format (s, "src %U ", format_ip4_address,
+ (ip4_address_t *) mp->dhcp_src_address);
+ }
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ s = format (s, "insert-cid %d ", mp->insert_circuit_id);
+
+ FINISH;
+}
+
+static void *vl_api_dhcp_proxy_set_vss_t_print
+ (vl_api_dhcp_proxy_set_vss_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: dhcp_proxy_set_vss ");
+
+ s = format (s, "tbl_id %d ", ntohl (mp->tbl_id));
+
+ s = format (s, "fib_id %d ", ntohl (mp->fib_id));
+
+ s = format (s, "oui %d ", ntohl (mp->oui));
+
+ if (mp->is_ipv6 != 0)
+ s = format (s, "ipv6 ");
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_dhcp_client_config_t_print
+ (vl_api_dhcp_client_config_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: dhcp_client_config ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ s = format (s, "hostname %s ", mp->hostname);
+
+ s = format (s, "want_dhcp_event %d ", mp->want_dhcp_event);
+
+ s = format (s, "pid %d ", mp->pid);
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+
+static void *vl_api_set_ip_flow_hash_t_print
+ (vl_api_set_ip_flow_hash_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: set_ip_flow_hash ");
+
+ s = format (s, "vrf_id %d ", ntohl (mp->vrf_id));
+
+ if (mp->src)
+ s = format (s, "src ");
+
+ if (mp->dst)
+ s = format (s, "dst ");
+
+ if (mp->sport)
+ s = format (s, "sport ");
+
+ if (mp->dport)
+ s = format (s, "dport ");
+
+ if (mp->proto)
+ s = format (s, "proto ");
+
+ if (mp->reverse)
+ s = format (s, "reverse ");
+
+ if (mp->is_ipv6 != 0)
+ s = format (s, "ipv6 ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_ip6_set_link_local_address_t_print
+ (vl_api_sw_interface_ip6_set_link_local_address_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_ip6_set_link_local_address ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ s = format (s, "%U/%d ", format_ip6_address, mp->address,
+ mp->address_length);
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_ip6nd_ra_prefix_t_print
+ (vl_api_sw_interface_ip6nd_ra_prefix_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_ip6nd_ra_prefix ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ s = format (s, "%U/%d ", format_ip6_address, mp->address,
+ mp->address_length);
+
+ s = format (s, "val_life %d ", ntohl (mp->val_lifetime));
+
+ s = format (s, "pref_life %d ", ntohl (mp->pref_lifetime));
+
+ if (mp->use_default)
+ s = format (s, "def ");
+
+ if (mp->no_advertise)
+ s = format (s, "noadv ");
+
+ if (mp->off_link)
+ s = format (s, "offl ");
+
+ if (mp->no_autoconfig)
+ s = format (s, "noauto ");
+
+ if (mp->no_onlink)
+ s = format (s, "nolink ");
+
+ if (mp->is_no)
+ s = format (s, "isno ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_ip6nd_ra_config_t_print
+ (vl_api_sw_interface_ip6nd_ra_config_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_ip6nd_ra_config ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ s = format (s, "maxint %d ", ntohl (mp->max_interval));
+
+ s = format (s, "minint %d ", ntohl (mp->min_interval));
+
+ s = format (s, "life %d ", ntohl (mp->lifetime));
+
+ s = format (s, "count %d ", ntohl (mp->initial_count));
+
+ s = format (s, "interval %d ", ntohl (mp->initial_interval));
+
+ if (mp->suppress)
+ s = format (s, "suppress ");
+
+ if (mp->managed)
+ s = format (s, "managed ");
+
+ if (mp->other)
+ s = format (s, "other ");
+
+ if (mp->ll_option)
+ s = format (s, "ll ");
+
+ if (mp->send_unicast)
+ s = format (s, "send ");
+
+ if (mp->cease)
+ s = format (s, "cease ");
+
+ if (mp->is_no)
+ s = format (s, "isno ");
+
+ if (mp->default_router)
+ s = format (s, "def ");
+
+ FINISH;
+}
+
+static void *vl_api_set_arp_neighbor_limit_t_print
+ (vl_api_set_arp_neighbor_limit_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: set_arp_neighbor_limit ");
+
+ s = format (s, "arp_nbr_limit %d ", ntohl (mp->arp_neighbor_limit));
+
+ if (mp->is_ipv6 != 0)
+ s = format (s, "ipv6 ");
+
+ FINISH;
+}
+
+static void *vl_api_l2_patch_add_del_t_print
+ (vl_api_l2_patch_add_del_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: l2_patch_add_del ");
+
+ s = format (s, "rx_sw_if_index %d ", ntohl (mp->rx_sw_if_index));
+
+ s = format (s, "tx_sw_if_index %d ", ntohl (mp->tx_sw_if_index));
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_sr_tunnel_add_del_t_print
+ (vl_api_sr_tunnel_add_del_t * mp, void *handle)
+{
+ u8 *s;
+ ip6_address_t *this_address;
+ int i;
+ u16 flags_host_byte_order;
+ u8 pl_flag;
+
+ s = format (0, "SCRIPT: sr_tunnel_add_del ");
+
+ if (mp->name[0])
+ s = format (s, "name %s ", mp->name);
+
+ s = format (s, "src %U dst %U/%d ", format_ip6_address,
+ (ip6_address_t *) mp->src_address,
+ format_ip6_address,
+ (ip6_address_t *) mp->dst_address, mp->dst_mask_width);
+
+ this_address = (ip6_address_t *) mp->segs_and_tags;
+ for (i = 0; i < mp->n_segments; i++)
+ {
+ s = format (s, "next %U ", format_ip6_address, this_address);
+ this_address++;
+ }
+ for (i = 0; i < mp->n_tags; i++)
+ {
+ s = format (s, "tag %U ", format_ip6_address, this_address);
+ this_address++;
+ }
+
+ flags_host_byte_order = clib_net_to_host_u16 (mp->flags_net_byte_order);
+
+ if (flags_host_byte_order & IP6_SR_HEADER_FLAG_CLEANUP)
+ s = format (s, " clean ");
+
+ if (flags_host_byte_order & IP6_SR_HEADER_FLAG_PROTECTED)
+ s = format (s, "protected ");
+
+ for (i = 1; i <= 4; i++)
+ {
+ pl_flag = ip6_sr_policy_list_flags (flags_host_byte_order, i);
+
+ switch (pl_flag)
+ {
+ case IP6_SR_HEADER_FLAG_PL_ELT_NOT_PRESENT:
+ continue;
+
+ case IP6_SR_HEADER_FLAG_PL_ELT_INGRESS_PE:
+ s = format (s, "InPE %d ", i);
+ break;
+
+ case IP6_SR_HEADER_FLAG_PL_ELT_EGRESS_PE:
+ s = format (s, "EgPE %d ", i);
+ break;
+
+ case IP6_SR_HEADER_FLAG_PL_ELT_ORIG_SRC_ADDR:
+ s = format (s, "OrgSrc %d ", i);
+ break;
+
+ default:
+ clib_warning ("BUG: pl elt %d value %d", i, pl_flag);
+ break;
+ }
+ }
+
+ if (mp->policy_name[0])
+ s = format (s, "policy_name %s ", mp->policy_name);
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_sr_policy_add_del_t_print
+ (vl_api_sr_policy_add_del_t * mp, void *handle)
+{
+ u8 *s;
+ int i;
+
+ s = format (0, "SCRIPT: sr_policy_add_del ");
+
+ if (mp->name[0])
+ s = format (s, "name %s ", mp->name);
+
+
+ if (mp->tunnel_names[0])
+ {
+ // start deserializing tunnel_names
+ int num_tunnels = mp->tunnel_names[0]; //number of tunnels
+ u8 *deser_tun_names = mp->tunnel_names;
+ deser_tun_names += 1; //moving along
+
+ u8 *tun_name = 0;
+ int tun_name_len = 0;
+
+ for (i = 0; i < num_tunnels; i++)
+ {
+ tun_name_len = *deser_tun_names;
+ deser_tun_names += 1;
+ vec_resize (tun_name, tun_name_len);
+ memcpy (tun_name, deser_tun_names, tun_name_len);
+ s = format (s, "tunnel %s ", tun_name);
+ deser_tun_names += tun_name_len;
+ tun_name = 0;
+ }
+ }
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_sr_multicast_map_add_del_t_print
+ (vl_api_sr_multicast_map_add_del_t * mp, void *handle)
+{
+
+ u8 *s = 0;
+ /* int i; */
+
+ s = format (0, "SCRIPT: sr_multicast_map_add_del ");
+
+ if (mp->multicast_address[0])
+ s = format (s, "address %U ", format_ip6_address, &mp->multicast_address);
+
+ if (mp->policy_name[0])
+ s = format (s, "sr-policy %s ", &mp->policy_name);
+
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+
+static void *vl_api_classify_add_del_table_t_print
+ (vl_api_classify_add_del_table_t * mp, void *handle)
+{
+ u8 *s;
+ int i;
+
+ s = format (0, "SCRIPT: classify_add_del_table ");
+
+ if (mp->is_add == 0)
+ {
+ s = format (s, "table %d ", ntohl (mp->table_index));
+ s = format (s, "%s ", mp->del_chain ? "del-chain" : "del");
+ }
+ else
+ {
+ s = format (s, "nbuckets %d ", ntohl (mp->nbuckets));
+ s = format (s, "memory_size %d ", ntohl (mp->memory_size));
+ s = format (s, "skip %d ", ntohl (mp->skip_n_vectors));
+ s = format (s, "match %d ", ntohl (mp->match_n_vectors));
+ s = format (s, "next-table %d ", ntohl (mp->next_table_index));
+ s = format (s, "miss-next %d ", ntohl (mp->miss_next_index));
+ s = format (s, "current-data-flag %d ", ntohl (mp->current_data_flag));
+ if (mp->current_data_flag)
+ s = format (s, "current-data-offset %d ",
+ ntohl (mp->current_data_offset));
+ s = format (s, "mask hex ");
+ for (i = 0; i < ntohl (mp->match_n_vectors) * sizeof (u32x4); i++)
+ s = format (s, "%02x", mp->mask[i]);
+ vec_add1 (s, ' ');
+ }
+
+ FINISH;
+}
+
+static void *vl_api_classify_add_del_session_t_print
+ (vl_api_classify_add_del_session_t * mp, void *handle)
+{
+ u8 *s;
+ int i, limit = 0;
+
+ s = format (0, "SCRIPT: classify_add_del_session ");
+
+ s = format (s, "table_index %d ", ntohl (mp->table_index));
+ s = format (s, "hit_next_index %d ", ntohl (mp->hit_next_index));
+ s = format (s, "opaque_index %d ", ntohl (mp->opaque_index));
+ s = format (s, "advance %d ", ntohl (mp->advance));
+ s = format (s, "action %d ", mp->action);
+ if (mp->action)
+ s = format (s, "metadata %d ", ntohl (mp->metadata));
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ s = format (s, "match hex ");
+ for (i = 5 * sizeof (u32x4) - 1; i > 0; i--)
+ {
+ if (mp->match[i] != 0)
+ {
+ limit = i + 1;
+ break;
+ }
+ }
+
+ for (i = 0; i < limit; i++)
+ s = format (s, "%02x", mp->match[i]);
+
+ FINISH;
+}
+
+static void *vl_api_classify_set_interface_ip_table_t_print
+ (vl_api_classify_set_interface_ip_table_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: classify_set_interface_ip_table ");
+
+ if (mp->is_ipv6)
+ s = format (s, "ipv6 ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+ s = format (s, "table %d ", ntohl (mp->table_index));
+
+ FINISH;
+}
+
+static void *vl_api_classify_set_interface_l2_tables_t_print
+ (vl_api_classify_set_interface_l2_tables_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: classify_set_interface_l2_tables ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+ s = format (s, "ip4-table %d ", ntohl (mp->ip4_table_index));
+ s = format (s, "ip6-table %d ", ntohl (mp->ip6_table_index));
+ s = format (s, "other-table %d ", ntohl (mp->other_table_index));
+ s = format (s, "is-input %d ", mp->is_input);
+
+ FINISH;
+}
+
+static void *vl_api_add_node_next_t_print
+ (vl_api_add_node_next_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: add_node_next ");
+
+ s = format (0, "node %s next %s ", mp->node_name, mp->next_name);
+
+ FINISH;
+}
+
+static void *vl_api_l2tpv3_create_tunnel_t_print
+ (vl_api_l2tpv3_create_tunnel_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: l2tpv3_create_tunnel ");
+
+ s = format (s, "client_address %U our_address %U ",
+ format_ip6_address, (ip6_address_t *) (mp->client_address),
+ format_ip6_address, (ip6_address_t *) (mp->our_address));
+ s = format (s, "local_session_id %d ", ntohl (mp->local_session_id));
+ s = format (s, "remote_session_id %d ", ntohl (mp->remote_session_id));
+ s = format (s, "local_cookie %lld ",
+ clib_net_to_host_u64 (mp->local_cookie));
+ s = format (s, "remote_cookie %lld ",
+ clib_net_to_host_u64 (mp->remote_cookie));
+ if (mp->l2_sublayer_present)
+ s = format (s, "l2-sublayer-present ");
+
+ FINISH;
+}
+
+static void *vl_api_l2tpv3_set_tunnel_cookies_t_print
+ (vl_api_l2tpv3_set_tunnel_cookies_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: l2tpv3_set_tunnel_cookies ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ s = format (s, "new_local_cookie %llu ",
+ clib_net_to_host_u64 (mp->new_local_cookie));
+
+ s = format (s, "new_remote_cookie %llu ",
+ clib_net_to_host_u64 (mp->new_remote_cookie));
+
+ FINISH;
+}
+
+static void *vl_api_l2tpv3_interface_enable_disable_t_print
+ (vl_api_l2tpv3_interface_enable_disable_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: l2tpv3_interface_enable_disable ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ if (mp->enable_disable == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_l2tpv3_set_lookup_key_t_print
+ (vl_api_l2tpv3_set_lookup_key_t * mp, void *handle)
+{
+ u8 *s;
+ char *str = "unknown";
+
+ s = format (0, "SCRIPT: l2tpv3_set_lookup_key ");
+
+ switch (mp->key)
+ {
+ case L2T_LOOKUP_SRC_ADDRESS:
+ str = "lookup_v6_src";
+ break;
+ case L2T_LOOKUP_DST_ADDRESS:
+ str = "lookup_v6_dst";
+ break;
+ case L2T_LOOKUP_SESSION_ID:
+ str = "lookup_session_id";
+ break;
+ default:
+ break;
+ }
+
+ s = format (s, "%s ", str);
+
+ FINISH;
+}
+
+static void *vl_api_sw_if_l2tpv3_tunnel_dump_t_print
+ (vl_api_sw_if_l2tpv3_tunnel_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_if_l2tpv3_tunnel_dump ");
+
+ FINISH;
+}
+
+static void *vl_api_vxlan_add_del_tunnel_t_print
+ (vl_api_vxlan_add_del_tunnel_t * mp, void *handle)
+{
+ u8 *s;
+ s = format (0, "SCRIPT: vxlan_add_del_tunnel ");
+
+ ip46_address_t src, dst;
+
+ ip46_from_addr_buf (mp->is_ipv6, mp->dst_address, &dst);
+ ip46_from_addr_buf (mp->is_ipv6, mp->src_address, &src);
+
+ u8 is_grp = ip46_address_is_multicast (&dst);
+ char *dst_name = is_grp ? "group" : "dst";
+
+ s = format (s, "src %U ", format_ip46_address, &src, IP46_TYPE_ANY);
+ s = format (s, "%s %U ", dst_name, format_ip46_address,
+ &dst, IP46_TYPE_ANY);
+
+ if (is_grp)
+ s = format (s, "mcast_sw_if_index %d ", ntohl (mp->mcast_sw_if_index));
+
+ if (mp->encap_vrf_id)
+ s = format (s, "encap-vrf-id %d ", ntohl (mp->encap_vrf_id));
+
+ s = format (s, "decap-next %d ", ntohl (mp->decap_next_index));
+
+ s = format (s, "vni %d ", ntohl (mp->vni));
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_vxlan_tunnel_dump_t_print
+ (vl_api_vxlan_tunnel_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: vxlan_tunnel_dump ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ FINISH;
+}
+
+static void *vl_api_gre_add_del_tunnel_t_print
+ (vl_api_gre_add_del_tunnel_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: gre_add_del_tunnel ");
+
+ s = format (s, "dst %U ", format_ip46_address,
+ (ip46_address_t *) & (mp->dst_address),
+ mp->is_ipv6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4);
+
+ s = format (s, "src %U ", format_ip46_address,
+ (ip46_address_t *) & (mp->src_address),
+ mp->is_ipv6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4);
+
+ if (mp->teb)
+ s = format (s, "teb ");
+
+ if (mp->outer_fib_id)
+ s = format (s, "outer-fib-id %d ", ntohl (mp->outer_fib_id));
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_gre_tunnel_dump_t_print
+ (vl_api_gre_tunnel_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: gre_tunnel_dump ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ FINISH;
+}
+
+static void *vl_api_l2_fib_clear_table_t_print
+ (vl_api_l2_fib_clear_table_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: l2_fib_clear_table ");
+
+ FINISH;
+}
+
+static void *vl_api_l2_interface_efp_filter_t_print
+ (vl_api_l2_interface_efp_filter_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: l2_interface_efp_filter ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+ if (mp->enable_disable)
+ s = format (s, "enable ");
+ else
+ s = format (s, "disable ");
+
+ FINISH;
+}
+
+static void *vl_api_l2_interface_vlan_tag_rewrite_t_print
+ (vl_api_l2_interface_vlan_tag_rewrite_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: l2_interface_vlan_tag_rewrite ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+ s = format (s, "vtr_op %d ", ntohl (mp->vtr_op));
+ s = format (s, "push_dot1q %d ", ntohl (mp->push_dot1q));
+ s = format (s, "tag1 %d ", ntohl (mp->tag1));
+ s = format (s, "tag2 %d ", ntohl (mp->tag2));
+
+ FINISH;
+}
+
+static void *vl_api_create_vhost_user_if_t_print
+ (vl_api_create_vhost_user_if_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: create_vhost_user_if ");
+
+ s = format (s, "socket %s ", mp->sock_filename);
+ if (mp->is_server)
+ s = format (s, "server ");
+ if (mp->renumber)
+ s = format (s, "renumber %d ", ntohl (mp->custom_dev_instance));
+ if (mp->tag[0])
+ s = format (s, "tag %s", mp->tag);
+
+ FINISH;
+}
+
+static void *vl_api_modify_vhost_user_if_t_print
+ (vl_api_modify_vhost_user_if_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: modify_vhost_user_if ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+ s = format (s, "socket %s ", mp->sock_filename);
+ if (mp->is_server)
+ s = format (s, "server ");
+ if (mp->renumber)
+ s = format (s, "renumber %d ", ntohl (mp->custom_dev_instance));
+
+ FINISH;
+}
+
+static void *vl_api_delete_vhost_user_if_t_print
+ (vl_api_delete_vhost_user_if_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: delete_vhost_user_if ");
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_vhost_user_dump_t_print
+ (vl_api_sw_interface_vhost_user_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_vhost_user_dump ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_dump_t_print
+ (vl_api_sw_interface_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_dump ");
+
+ if (mp->name_filter_valid)
+ s = format (s, "name_filter %s ", mp->name_filter);
+ else
+ s = format (s, "all ");
+
+ FINISH;
+}
+
+static void *vl_api_l2_fib_table_dump_t_print
+ (vl_api_l2_fib_table_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: l2_fib_table_dump ");
+
+ s = format (s, "bd_id %d ", ntohl (mp->bd_id));
+
+ FINISH;
+}
+
+static void *vl_api_control_ping_t_print
+ (vl_api_control_ping_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: control_ping ");
+
+ FINISH;
+}
+
+static void *vl_api_want_interface_events_t_print
+ (vl_api_want_interface_events_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: want_interface_events pid %d enable %d ",
+ ntohl (mp->pid), ntohl (mp->enable_disable));
+
+ FINISH;
+}
+
+static void *vl_api_cli_request_t_print
+ (vl_api_cli_request_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: cli_request ");
+
+ FINISH;
+}
+
+static void *vl_api_cli_inband_t_print
+ (vl_api_cli_inband_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: cli_inband ");
+
+ FINISH;
+}
+
+static void *vl_api_memclnt_create_t_print
+ (vl_api_memclnt_create_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: memclnt_create name %s ", mp->name);
+
+ FINISH;
+}
+
+static void *vl_api_show_version_t_print
+ (vl_api_show_version_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: show_version ");
+
+ FINISH;
+}
+
+static void *vl_api_vxlan_gpe_add_del_tunnel_t_print
+ (vl_api_vxlan_gpe_add_del_tunnel_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: vxlan_gpe_add_del_tunnel ");
+
+ s = format (s, "local %U ", format_ip46_address, &mp->local, mp->is_ipv6);
+
+ s = format (s, "remote %U ", format_ip46_address, &mp->remote, mp->is_ipv6);
+
+ s = format (s, "protocol %d ", ntohl (mp->protocol));
+
+ s = format (s, "vni %d ", ntohl (mp->vni));
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ if (mp->encap_vrf_id)
+ s = format (s, "encap-vrf-id %d ", ntohl (mp->encap_vrf_id));
+
+ if (mp->decap_vrf_id)
+ s = format (s, "decap-vrf-id %d ", ntohl (mp->decap_vrf_id));
+
+ FINISH;
+}
+
+static void *vl_api_vxlan_gpe_tunnel_dump_t_print
+ (vl_api_vxlan_gpe_tunnel_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: vxlan_gpe_tunnel_dump ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ FINISH;
+}
+
+static void *vl_api_interface_name_renumber_t_print
+ (vl_api_interface_name_renumber_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: interface_renumber ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ s = format (s, "new_show_dev_instance %d ",
+ ntohl (mp->new_show_dev_instance));
+
+ FINISH;
+}
+
+static void *vl_api_want_ip4_arp_events_t_print
+ (vl_api_want_ip4_arp_events_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: want_ip4_arp_events ");
+ s = format (s, "pid %d address %U ", mp->pid,
+ format_ip4_address, &mp->address);
+ if (mp->enable_disable == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_want_ip6_nd_events_t_print
+ (vl_api_want_ip6_nd_events_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: want_ip6_nd_events ");
+ s = format (s, "pid %d address %U ", mp->pid,
+ format_ip6_address, mp->address);
+ if (mp->enable_disable == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_input_acl_set_interface_t_print
+ (vl_api_input_acl_set_interface_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: input_acl_set_interface ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+ s = format (s, "ip4-table %d ", ntohl (mp->ip4_table_index));
+ s = format (s, "ip6-table %d ", ntohl (mp->ip6_table_index));
+ s = format (s, "l2-table %d ", ntohl (mp->l2_table_index));
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_ip_address_dump_t_print
+ (vl_api_ip_address_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: ip6_address_dump ");
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+ s = format (s, "is_ipv6 %d ", mp->is_ipv6 != 0);
+
+ FINISH;
+}
+
+static void *
+vl_api_ip_dump_t_print (vl_api_ip_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: ip_dump ");
+ s = format (s, "is_ipv6 %d ", mp->is_ipv6 != 0);
+
+ FINISH;
+}
+
+static void *vl_api_cop_interface_enable_disable_t_print
+ (vl_api_cop_interface_enable_disable_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: cop_interface_enable_disable ");
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+ if (mp->enable_disable)
+ s = format (s, "enable ");
+ else
+ s = format (s, "disable ");
+
+ FINISH;
+}
+
+static void *vl_api_cop_whitelist_enable_disable_t_print
+ (vl_api_cop_whitelist_enable_disable_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: cop_whitelist_enable_disable ");
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+ s = format (s, "fib-id %d ", ntohl (mp->fib_id));
+ if (mp->ip4)
+ s = format (s, "ip4 ");
+ if (mp->ip6)
+ s = format (s, "ip6 ");
+ if (mp->default_cop)
+ s = format (s, "default ");
+
+ FINISH;
+}
+
+static void *vl_api_af_packet_create_t_print
+ (vl_api_af_packet_create_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: af_packet_create ");
+ s = format (s, "host_if_name %s ", mp->host_if_name);
+ if (mp->use_random_hw_addr)
+ s = format (s, "hw_addr random ");
+ else
+ s = format (s, "hw_addr %U ", format_ethernet_address, mp->hw_addr);
+
+ FINISH;
+}
+
+static void *vl_api_af_packet_delete_t_print
+ (vl_api_af_packet_delete_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: af_packet_delete ");
+ s = format (s, "host_if_name %s ", mp->host_if_name);
+
+ FINISH;
+}
+
+static u8 *
+format_policer_action (u8 * s, va_list * va)
+{
+ u32 action = va_arg (*va, u32);
+ u32 dscp = va_arg (*va, u32);
+ char *t = 0;
+
+ if (action == SSE2_QOS_ACTION_DROP)
+ s = format (s, "drop");
+ else if (action == SSE2_QOS_ACTION_TRANSMIT)
+ s = format (s, "transmit");
+ else if (action == SSE2_QOS_ACTION_MARK_AND_TRANSMIT)
+ {
+ s = format (s, "mark-and-transmit ");
+ switch (dscp)
+ {
+#define _(v,f,str) case VNET_DSCP_##f: t = str; break;
+ foreach_vnet_dscp
+#undef _
+ default:
+ break;
+ }
+ s = format (s, "%s", t);
+ }
+
+ return s;
+}
+
+static void *vl_api_policer_add_del_t_print
+ (vl_api_policer_add_del_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: policer_add_del ");
+ s = format (s, "name %s ", mp->name);
+ s = format (s, "cir %d ", mp->cir);
+ s = format (s, "eir %d ", mp->eir);
+ s = format (s, "cb %d ", mp->cb);
+ s = format (s, "eb %d ", mp->eb);
+
+ switch (mp->rate_type)
+ {
+ case SSE2_QOS_RATE_KBPS:
+ s = format (s, "rate_type kbps ");
+ break;
+ case SSE2_QOS_RATE_PPS:
+ s = format (s, "rate_type pps ");
+ break;
+ default:
+ break;
+ }
+
+ switch (mp->round_type)
+ {
+ case SSE2_QOS_ROUND_TO_CLOSEST:
+ s = format (s, "round_type closest ");
+ break;
+ case SSE2_QOS_ROUND_TO_UP:
+ s = format (s, "round_type up ");
+ break;
+ case SSE2_QOS_ROUND_TO_DOWN:
+ s = format (s, "round_type down ");
+ break;
+ default:
+ break;
+ }
+
+ switch (mp->type)
+ {
+ case SSE2_QOS_POLICER_TYPE_1R2C:
+ s = format (s, "type 1r2c ");
+ break;
+ case SSE2_QOS_POLICER_TYPE_1R3C_RFC_2697:
+ s = format (s, "type 1r3c ");
+ break;
+ case SSE2_QOS_POLICER_TYPE_2R3C_RFC_2698:
+ s = format (s, "type 2r3c-2698 ");
+ break;
+ case SSE2_QOS_POLICER_TYPE_2R3C_RFC_4115:
+ s = format (s, "type 2r3c-4115 ");
+ break;
+ case SSE2_QOS_POLICER_TYPE_2R3C_RFC_MEF5CF1:
+ s = format (s, "type 2r3c-mef5cf1 ");
+ break;
+ default:
+ break;
+ }
+
+ s = format (s, "conform_action %U ", format_policer_action,
+ mp->conform_action_type, mp->conform_dscp);
+ s = format (s, "exceed_action %U ", format_policer_action,
+ mp->exceed_action_type, mp->exceed_dscp);
+ s = format (s, "violate_action %U ", format_policer_action,
+ mp->violate_action_type, mp->violate_dscp);
+
+ if (mp->color_aware)
+ s = format (s, "color-aware ");
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_policer_dump_t_print
+ (vl_api_policer_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: policer_dump ");
+ if (mp->match_name_valid)
+ s = format (s, "name %s ", mp->match_name);
+
+ FINISH;
+}
+
+static void *vl_api_policer_classify_set_interface_t_print
+ (vl_api_policer_classify_set_interface_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: policer_classify_set_interface ");
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+ if (mp->ip4_table_index != ~0)
+ s = format (s, "ip4-table %d ", ntohl (mp->ip4_table_index));
+ if (mp->ip6_table_index != ~0)
+ s = format (s, "ip6-table %d ", ntohl (mp->ip6_table_index));
+ if (mp->l2_table_index != ~0)
+ s = format (s, "l2-table %d ", ntohl (mp->l2_table_index));
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_policer_classify_dump_t_print
+ (vl_api_policer_classify_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: policer_classify_dump ");
+ switch (mp->type)
+ {
+ case POLICER_CLASSIFY_TABLE_IP4:
+ s = format (s, "type ip4 ");
+ break;
+ case POLICER_CLASSIFY_TABLE_IP6:
+ s = format (s, "type ip6 ");
+ break;
+ case POLICER_CLASSIFY_TABLE_L2:
+ s = format (s, "type l2 ");
+ break;
+ default:
+ break;
+ }
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_clear_stats_t_print
+ (vl_api_sw_interface_clear_stats_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_clear_stats ");
+ if (mp->sw_if_index != ~0)
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ FINISH;
+}
+
+static void *vl_api_mpls_tunnel_dump_t_print
+ (vl_api_mpls_tunnel_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: mpls_tunnel_dump ");
+
+ s = format (s, "tunnel_index %d ", ntohl (mp->tunnel_index));
+
+ FINISH;
+}
+
+static void *vl_api_mpls_fib_dump_t_print
+ (vl_api_mpls_fib_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: mpls_fib_decap_dump ");
+
+ FINISH;
+}
+
+static void *vl_api_ip_fib_dump_t_print
+ (vl_api_ip_fib_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: ip_fib_dump ");
+
+ FINISH;
+}
+
+static void *vl_api_ip6_fib_dump_t_print
+ (vl_api_ip6_fib_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: ip6_fib_dump ");
+
+ FINISH;
+}
+
+static void *vl_api_classify_table_ids_t_print
+ (vl_api_classify_table_ids_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: classify_table_ids ");
+
+ FINISH;
+}
+
+static void *vl_api_classify_table_by_interface_t_print
+ (vl_api_classify_table_by_interface_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: classify_table_by_interface ");
+ if (mp->sw_if_index != ~0)
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ FINISH;
+}
+
+static void *vl_api_classify_table_info_t_print
+ (vl_api_classify_table_info_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: classify_table_info ");
+ if (mp->table_id != ~0)
+ s = format (s, "table_id %d ", ntohl (mp->table_id));
+
+ FINISH;
+}
+
+static void *vl_api_classify_session_dump_t_print
+ (vl_api_classify_session_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: classify_session_dump ");
+ if (mp->table_id != ~0)
+ s = format (s, "table_id %d ", ntohl (mp->table_id));
+
+ FINISH;
+}
+
+static void *vl_api_set_ipfix_exporter_t_print
+ (vl_api_set_ipfix_exporter_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: set_ipfix_exporter ");
+
+ s = format (s, "collector-address %U ", format_ip4_address,
+ (ip4_address_t *) mp->collector_address);
+ s = format (s, "collector-port %d ", ntohs (mp->collector_port));
+ s = format (s, "src-address %U ", format_ip4_address,
+ (ip4_address_t *) mp->src_address);
+ s = format (s, "vrf-id %d ", ntohl (mp->vrf_id));
+ s = format (s, "path-mtu %d ", ntohl (mp->path_mtu));
+ s = format (s, "template-interval %d ", ntohl (mp->template_interval));
+ s = format (s, "udp-checksum %d ", mp->udp_checksum);
+
+ FINISH;
+}
+
+static void *vl_api_ipfix_exporter_dump_t_print
+ (vl_api_ipfix_exporter_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: ipfix_exporter_dump ");
+
+ FINISH;
+}
+
+static void *vl_api_set_ipfix_classify_stream_t_print
+ (vl_api_set_ipfix_classify_stream_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: set_ipfix_classify_stream ");
+
+ s = format (s, "domain-id %d ", ntohl (mp->domain_id));
+ s = format (s, "src-port %d ", ntohs (mp->src_port));
+
+ FINISH;
+}
+
+static void *vl_api_ipfix_classify_stream_dump_t_print
+ (vl_api_ipfix_classify_stream_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: ipfix_classify_stream_dump ");
+
+ FINISH;
+}
+
+static void *vl_api_ipfix_classify_table_add_del_t_print
+ (vl_api_ipfix_classify_table_add_del_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: ipfix_classify_table_add_del ");
+
+ s = format (s, "table-id %d ", ntohl (mp->table_id));
+ s = format (s, "ip-version %d ", mp->ip_version);
+ s = format (s, "transport-protocol %d ", mp->transport_protocol);
+
+ FINISH;
+}
+
+static void *vl_api_ipfix_classify_table_dump_t_print
+ (vl_api_ipfix_classify_table_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: ipfix_classify_table_dump ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_span_enable_disable_t_print
+ (vl_api_sw_interface_span_enable_disable_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_span_enable_disable ");
+ s = format (s, "src_sw_if_index %u ", ntohl (mp->sw_if_index_from));
+ s = format (s, "dst_sw_if_index %u ", ntohl (mp->sw_if_index_to));
+
+ switch (mp->state)
+ {
+ case 0:
+ s = format (s, "disable ");
+ break;
+ case 1:
+ s = format (s, "rx ");
+ break;
+ case 2:
+ s = format (s, "tx ");
+ break;
+ case 3:
+ default:
+ s = format (s, "both ");
+ break;
+ }
+
+ FINISH;
+}
+
+static void *
+vl_api_sw_interface_span_dump_t_print (vl_api_sw_interface_span_dump_t * mp,
+ void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_span_dump ");
+
+ FINISH;
+}
+
+static void *vl_api_get_next_index_t_print
+ (vl_api_get_next_index_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: get_next_index ");
+ s = format (s, "node-name %s ", mp->node_name);
+ s = format (s, "next-node-name %s ", mp->next_name);
+
+ FINISH;
+}
+
+static void *vl_api_pg_create_interface_t_print
+ (vl_api_pg_create_interface_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: pg_create_interface ");
+ s = format (0, "if_id %d", ntohl (mp->interface_id));
+
+ FINISH;
+}
+
+static void *vl_api_pg_capture_t_print
+ (vl_api_pg_capture_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: pg_capture ");
+ s = format (0, "if_id %d ", ntohl (mp->interface_id));
+ s = format (0, "pcap %s", mp->pcap_file_name);
+ if (mp->count != ~0)
+ s = format (s, "count %d ", ntohl (mp->count));
+ if (!mp->is_enabled)
+ s = format (s, "disable");
+
+ FINISH;
+}
+
+static void *vl_api_pg_enable_disable_t_print
+ (vl_api_pg_enable_disable_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: pg_enable_disable ");
+ if (ntohl (mp->stream_name_length) > 0)
+ s = format (s, "stream %s", mp->stream_name);
+ if (!mp->is_enabled)
+ s = format (s, "disable");
+
+ FINISH;
+}
+
+static void *vl_api_ip_source_and_port_range_check_add_del_t_print
+ (vl_api_ip_source_and_port_range_check_add_del_t * mp, void *handle)
+{
+ u8 *s;
+ int i;
+
+ s = format (0, "SCRIPT: ip_source_and_port_range_check_add_del ");
+ if (mp->is_ipv6)
+ s = format (s, "%U/%d ", format_ip6_address, mp->address,
+ mp->mask_length);
+ else
+ s = format (s, "%U/%d ", format_ip4_address, mp->address,
+ mp->mask_length);
+
+ for (i = 0; i < mp->number_of_ranges; i++)
+ {
+ s = format (s, "range %d - %d ", mp->low_ports[i], mp->high_ports[i]);
+ }
+
+ s = format (s, "vrf %d ", ntohl (mp->vrf_id));
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_ip_source_and_port_range_check_interface_add_del_t_print
+ (vl_api_ip_source_and_port_range_check_interface_add_del_t * mp,
+ void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: ip_source_and_port_range_check_interface_add_del ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ if (mp->tcp_out_vrf_id != ~0)
+ s = format (s, "tcp-out-vrf %d ", ntohl (mp->tcp_out_vrf_id));
+
+ if (mp->udp_out_vrf_id != ~0)
+ s = format (s, "udp-out-vrf %d ", ntohl (mp->udp_out_vrf_id));
+
+ if (mp->tcp_in_vrf_id != ~0)
+ s = format (s, "tcp-in-vrf %d ", ntohl (mp->tcp_in_vrf_id));
+
+ if (mp->udp_in_vrf_id != ~0)
+ s = format (s, "udp-in-vrf %d ", ntohl (mp->udp_in_vrf_id));
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_lisp_enable_disable_t_print
+ (vl_api_lisp_enable_disable_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: lisp_enable_disable %s",
+ mp->is_en ? "enable" : "disable");
+
+ FINISH;
+}
+
+static void *vl_api_lisp_gpe_add_del_iface_t_print
+ (vl_api_lisp_gpe_add_del_iface_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: lisp_gpe_add_del_iface ");
+
+ s = format (s, "%s ", mp->is_add ? "up" : "down");
+ s = format (s, "vni %d ", mp->vni);
+ s = format (s, "%s %d ", mp->is_l2 ? "bd_id" : "table_id", mp->dp_table);
+
+ FINISH;
+}
+
+static void *vl_api_lisp_pitr_set_locator_set_t_print
+ (vl_api_lisp_pitr_set_locator_set_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: lisp_pitr_set_locator_set ");
+
+ if (mp->is_add)
+ s = format (s, "locator-set %s ", mp->ls_name);
+ else
+ s = format (s, "del");
+
+ FINISH;
+}
+
+static u8 *
+format_lisp_flat_eid (u8 * s, va_list * args)
+{
+ u32 type = va_arg (*args, u32);
+ u8 *eid = va_arg (*args, u8 *);
+ u32 eid_len = va_arg (*args, u32);
+
+ switch (type)
+ {
+ case 0:
+ return format (s, "%U/%d", format_ip4_address, eid, eid_len);
+ case 1:
+ return format (s, "%U/%d", format_ip6_address, eid, eid_len);
+ case 3:
+ return format (s, "%U", format_ethernet_address, eid);
+ }
+ return 0;
+}
+
+/** Used for transferring locators via VPP API */
+typedef CLIB_PACKED (struct
+ {
+ u8 is_ip4;
+ /**< is locator an IPv4 address */
+ u8 priority;
+ /**< locator priority */
+ u8 weight;
+ /**< locator weight */
+ u8 addr[16];
+ /**< IPv4/IPv6 address */
+ }) rloc_t;
+
+static u8 *
+format_rloc (u8 * s, va_list * args)
+{
+ rloc_t *rloc = va_arg (*args, rloc_t *);
+
+ if (rloc->is_ip4)
+ s = format (s, "%U ", format_ip4_address, rloc->addr);
+ else
+ s = format (s, "%U ", format_ip6_address, rloc->addr);
+
+ s = format (s, "p %d w %d", rloc->priority, rloc->weight);
+
+ return s;
+}
+
+static void *vl_api_lisp_add_del_remote_mapping_t_print
+ (vl_api_lisp_add_del_remote_mapping_t * mp, void *handle)
+{
+ u8 *s;
+ u32 i, rloc_num = 0;
+
+ s = format (0, "SCRIPT: lisp_add_del_remote_mapping ");
+
+ if (mp->del_all)
+ s = format (s, "del-all ");
+
+ s = format (s, "%s ", mp->is_add ? "add" : "del");
+ s = format (s, "vni %d ", clib_net_to_host_u32 (mp->vni));
+
+ s = format (s, "eid %U ", format_lisp_flat_eid,
+ mp->eid_type, mp->eid, mp->eid_len);
+
+ if (mp->is_src_dst)
+ {
+ s = format (s, "seid %U ", format_lisp_flat_eid,
+ mp->eid_type, mp->seid, mp->seid_len);
+ }
+
+ rloc_num = clib_net_to_host_u32 (mp->rloc_num);
+
+ if (0 == rloc_num)
+ s = format (s, "action %d", mp->action);
+ else
+ {
+ rloc_t *rloc = (rloc_t *) mp->rlocs;
+ for (i = 0; i < rloc_num; i++)
+ s = format (s, "%U ", format_rloc, &rloc[i]);
+ }
+
+ FINISH;
+}
+
+static void *vl_api_lisp_add_del_adjacency_t_print
+ (vl_api_lisp_add_del_adjacency_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: lisp_add_del_adjacency ");
+
+ s = format (s, "%s ", mp->is_add ? "add" : "del");
+ s = format (s, "vni %d ", clib_net_to_host_u32 (mp->vni));
+ s = format (s, "reid %U leid %U ",
+ format_lisp_flat_eid, mp->eid_type, mp->reid, mp->reid_len,
+ format_lisp_flat_eid, mp->eid_type, mp->leid, mp->leid_len);
+
+ FINISH;
+}
+
+static void *vl_api_lisp_add_del_map_request_itr_rlocs_t_print
+ (vl_api_lisp_add_del_map_request_itr_rlocs_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: lisp_add_del_map_request_itr_rlocs ");
+
+ if (mp->is_add)
+ s = format (s, "%s", mp->locator_set_name);
+ else
+ s = format (s, "del");
+
+ FINISH;
+}
+
+static void *vl_api_lisp_eid_table_add_del_map_t_print
+ (vl_api_lisp_eid_table_add_del_map_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: lisp_eid_table_add_del_map ");
+
+ if (!mp->is_add)
+ s = format (s, "del ");
+
+ s = format (s, "vni %d ", clib_net_to_host_u32 (mp->vni));
+ s = format (s, "%s %d ",
+ mp->is_l2 ? "bd_index" : "vrf",
+ clib_net_to_host_u32 (mp->dp_table));
+ FINISH;
+}
+
+static void *vl_api_lisp_add_del_local_eid_t_print
+ (vl_api_lisp_add_del_local_eid_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: lisp_add_del_local_eid ");
+
+ if (!mp->is_add)
+ s = format (s, "del ");
+
+ s = format (s, "vni %d ", clib_net_to_host_u32 (mp->vni));
+ s = format (s, "eid %U ", format_lisp_flat_eid, mp->eid_type, mp->eid,
+ mp->prefix_len);
+ s = format (s, "locator-set %s ", mp->locator_set_name);
+ if (*mp->key)
+ {
+ u32 key_id = mp->key_id;
+ s = format (s, "key-id %U", format_hmac_key_id, key_id);
+ s = format (s, "secret-key %s", mp->key);
+ }
+ FINISH;
+}
+
+static void *vl_api_lisp_gpe_add_del_fwd_entry_t_print
+ (vl_api_lisp_gpe_add_del_fwd_entry_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: lisp_gpe_add_del_fwd_entry TODO");
+
+ FINISH;
+}
+
+static void *vl_api_lisp_add_del_map_resolver_t_print
+ (vl_api_lisp_add_del_map_resolver_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: lisp_add_del_map_resolver ");
+
+ if (!mp->is_add)
+ s = format (s, "del ");
+
+ if (mp->is_ipv6)
+ s = format (s, "%U ", format_ip6_address, mp->ip_address);
+ else
+ s = format (s, "%U ", format_ip4_address, mp->ip_address);
+
+ FINISH;
+}
+
+static void *vl_api_lisp_gpe_enable_disable_t_print
+ (vl_api_lisp_gpe_enable_disable_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: lisp_gpe_enable_disable ");
+
+ s = format (s, "%s ", mp->is_en ? "enable" : "disable");
+
+ FINISH;
+}
+
+typedef CLIB_PACKED (struct
+ {
+ u32 sw_if_index;
+ /**< locator sw_if_index */
+ u8 priority;
+ /**< locator priority */
+ u8 weight;
+ /**< locator weight */
+ }) ls_locator_t;
+
+static u8 *
+format_locator (u8 * s, va_list * args)
+{
+ ls_locator_t *l = va_arg (*args, ls_locator_t *);
+
+ return format (s, "sw_if_index %d p %d w %d",
+ l->sw_if_index, l->priority, l->weight);
+}
+
+static void *vl_api_lisp_add_del_locator_set_t_print
+ (vl_api_lisp_add_del_locator_set_t * mp, void *handle)
+{
+ u8 *s;
+ u32 loc_num = 0, i;
+ ls_locator_t *locs;
+
+ s = format (0, "SCRIPT: lisp_add_del_locator_set ");
+
+ if (!mp->is_add)
+ s = format (s, "del ");
+
+ s = format (s, "locator-set %s ", mp->locator_set_name);
+
+ loc_num = clib_net_to_host_u32 (mp->locator_num);
+ locs = (ls_locator_t *) mp->locators;
+
+ for (i = 0; i < loc_num; i++)
+ s = format (s, "%U ", format_locator, &locs[i]);
+
+ FINISH;
+}
+
+static void *vl_api_lisp_add_del_locator_t_print
+ (vl_api_lisp_add_del_locator_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: lisp_add_del_locator ");
+
+ if (!mp->is_add)
+ s = format (s, "del ");
+
+ s = format (s, "locator-set %s ", mp->locator_set_name);
+ s = format (s, "sw_if_index %d ", mp->sw_if_index);
+ s = format (s, "p %d w %d ", mp->priority, mp->weight);
+
+ FINISH;
+}
+
+static void *vl_api_lisp_locator_set_dump_t_print
+ (vl_api_lisp_locator_set_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: lisp_locator_set_dump ");
+ if (mp->filter == 1)
+ s = format (s, "local");
+ else if (mp->filter == 2)
+ s = format (s, "remote");
+
+ FINISH;
+}
+
+static void *vl_api_lisp_locator_dump_t_print
+ (vl_api_lisp_locator_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: lisp_locator_dump ");
+ if (mp->is_index_set)
+ s = format (s, "ls_index %d", clib_net_to_host_u32 (mp->ls_index));
+ else
+ s = format (s, "ls_name %s", mp->ls_name);
+
+ FINISH;
+}
+
+static void *vl_api_lisp_map_request_mode_t_print
+ (vl_api_lisp_map_request_mode_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: lisp_map_request_mode ");
+
+ switch (mp->mode)
+ {
+ case 0:
+ s = format (s, "dst-only");
+ break;
+ case 1:
+ s = format (s, "src-dst");
+ default:
+ break;
+ }
+
+ FINISH;
+}
+
+static void *vl_api_lisp_eid_table_dump_t_print
+ (vl_api_lisp_eid_table_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: lisp_eid_table_dump ");
+
+ if (mp->eid_set)
+ {
+ s = format (s, "vni %d ", clib_net_to_host_u32 (mp->vni));
+ s = format (s, "eid %U ", format_lisp_flat_eid, mp->eid_type,
+ mp->eid, mp->prefix_length);
+ switch (mp->filter)
+ {
+ case 1:
+ s = format (s, "local ");
+ break;
+ case 2:
+ s = format (s, "remote ");
+ break;
+ }
+ }
+
+ FINISH;
+}
+
+static void *vl_api_lisp_rloc_probe_enable_disable_t_print
+ (vl_api_lisp_rloc_probe_enable_disable_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: lisp_rloc_probe_enable_disable ");
+ if (mp->is_enabled)
+ s = format (s, "enable");
+ else
+ s = format (s, "disable");
+
+ FINISH;
+}
+
+static void *vl_api_lisp_map_register_enable_disable_t_print
+ (vl_api_lisp_map_register_enable_disable_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: lisp_map_register_enable_disable ");
+ if (mp->is_enabled)
+ s = format (s, "enable");
+ else
+ s = format (s, "disable");
+
+ FINISH;
+}
+
+static void *vl_api_lisp_adjacencies_get_t_print
+ (vl_api_lisp_adjacencies_get_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: lisp_adjacencies_get ");
+ s = format (s, "vni %d", clib_net_to_host_u32 (mp->vni));
+
+ FINISH;
+}
+
+static void *vl_api_lisp_eid_table_map_dump_t_print
+ (vl_api_lisp_eid_table_map_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: lisp_eid_table_map_dump ");
+
+ if (mp->is_l2)
+ s = format (s, "l2");
+ else
+ s = format (s, "l3");
+
+ FINISH;
+}
+
+static void *vl_api_ipsec_gre_add_del_tunnel_t_print
+ (vl_api_ipsec_gre_add_del_tunnel_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: ipsec_gre_add_del_tunnel ");
+
+ s = format (s, "dst %U ", format_ip4_address,
+ (ip4_address_t *) & (mp->dst_address));
+
+ s = format (s, "src %U ", format_ip4_address,
+ (ip4_address_t *) & (mp->src_address));
+
+ s = format (s, "local_sa %d ", ntohl (mp->local_sa_id));
+
+ s = format (s, "remote_sa %d ", ntohl (mp->remote_sa_id));
+
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_ipsec_gre_tunnel_dump_t_print
+ (vl_api_ipsec_gre_tunnel_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: ipsec_gre_tunnel_dump ");
+
+ if (mp->sw_if_index != ~0)
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+
+ FINISH;
+}
+
+static void *vl_api_l2_interface_pbb_tag_rewrite_t_print
+ (vl_api_l2_interface_pbb_tag_rewrite_t * mp, void *handle)
+{
+ u8 *s;
+ u32 vtr_op = ntohl (mp->vtr_op);
+
+ s = format (0, "SCRIPT: l2_interface_pbb_tag_rewrite ");
+
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+ s = format (s, "vtr_op %d ", vtr_op);
+ if (vtr_op != L2_VTR_DISABLED && vtr_op != L2_VTR_POP_2)
+ {
+ if (vtr_op == L2_VTR_TRANSLATE_2_2)
+ s = format (s, "%d ", ntohs (mp->outer_tag));
+ s = format (s, "dmac %U ", format_ethernet_address, &mp->b_dmac);
+ s = format (s, "smac %U ", format_ethernet_address, &mp->b_smac);
+ s = format (s, "sid %d ", ntohl (mp->i_sid));
+ s = format (s, "vlanid %d ", ntohs (mp->b_vlanid));
+ }
+
+ FINISH;
+}
+
+static void *vl_api_flow_classify_set_interface_t_print
+ (vl_api_flow_classify_set_interface_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: flow_classify_set_interface ");
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+ if (mp->ip4_table_index != ~0)
+ s = format (s, "ip4-table %d ", ntohl (mp->ip4_table_index));
+ if (mp->ip6_table_index != ~0)
+ s = format (s, "ip6-table %d ", ntohl (mp->ip6_table_index));
+ if (mp->is_add == 0)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *
+vl_api_punt_t_print (vl_api_punt_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: punt ");
+
+ if (mp->ipv != (u8) ~ 0)
+ s = format (s, "ip %d ", mp->ipv);
+
+ s = format (s, "protocol %d ", mp->l4_protocol);
+
+ if (mp->l4_port != (u16) ~ 0)
+ s = format (s, "port %d ", ntohs (mp->l4_port));
+
+ if (!mp->is_add)
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_flow_classify_dump_t_print
+ (vl_api_flow_classify_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: flow_classify_dump ");
+ switch (mp->type)
+ {
+ case FLOW_CLASSIFY_TABLE_IP4:
+ s = format (s, "type ip4 ");
+ break;
+ case FLOW_CLASSIFY_TABLE_IP6:
+ s = format (s, "type ip6 ");
+ break;
+ default:
+ break;
+ }
+
+ FINISH;
+}
+
+static void *vl_api_get_first_msg_id_t_print
+ (vl_api_get_first_msg_id_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: get_first_msg_id %s ", mp->name);
+
+ FINISH;
+}
+
+static void *vl_api_ioam_enable_t_print
+ (vl_api_ioam_enable_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: ioam_enable ");
+
+ if (mp->trace_enable)
+ s = format (s, "trace enabled");
+
+ if (mp->pot_enable)
+ s = format (s, "POT enabled");
+
+ if (mp->seqno)
+ s = format (s, "Seqno enabled");
+
+ if (mp->analyse)
+ s = format (s, "Analyse enabled");
+
+ FINISH;
+}
+
+static void *vl_api_ioam_disable_t_print
+ (vl_api_ioam_disable_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: ioam_disable ");
+ s = format (s, "trace disabled");
+ s = format (s, "POT disabled");
+ s = format (s, "Seqno disabled");
+ s = format (s, "Analyse disabled");
+
+ FINISH;
+}
+
+static void *vl_api_feature_enable_disable_t_print
+ (vl_api_feature_enable_disable_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: feature_enable_disable ");
+ s = format (s, "arc_name %s ", mp->arc_name);
+ s = format (s, "feature_name %s ", mp->feature_name);
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+ if (!mp->enable)
+ s = format (s, "disable");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_tag_add_del_t_print
+ (vl_api_sw_interface_tag_add_del_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_tag_add_del ");
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+ if (mp->is_add)
+ s = format (s, "tag %s ", mp->tag);
+ else
+ s = format (s, "del ");
+
+ FINISH;
+}
+
+static void *vl_api_sw_interface_set_mtu_t_print
+ (vl_api_sw_interface_set_mtu_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_set_mtu ");
+ s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
+ s = format (s, "tag %d ", ntohs (mp->mtu));
+
+ FINISH;
+}
+
+#define foreach_custom_print_no_arg_function \
+_(lisp_eid_table_vni_dump) \
+_(lisp_map_resolver_dump) \
+_(lisp_map_server_dump) \
+_(show_lisp_rloc_probe_state) \
+_(show_lisp_map_register_state) \
+_(show_lisp_map_request_mode) \
+_(lisp_gpe_tunnel_dump)
+
+#define _(f) \
+static void * vl_api_ ## f ## _t_print \
+ (vl_api_ ## f ## _t * mp, void * handle) \
+{ \
+ u8 * s; \
+ s = format (0, "SCRIPT: " #f ); \
+ FINISH; \
+}
+foreach_custom_print_no_arg_function
+#undef _
+#define foreach_custom_print_function \
+_(CREATE_LOOPBACK, create_loopback) \
+_(SW_INTERFACE_SET_FLAGS, sw_interface_set_flags) \
+_(SW_INTERFACE_ADD_DEL_ADDRESS, sw_interface_add_del_address) \
+_(SW_INTERFACE_SET_TABLE, sw_interface_set_table) \
+_(SW_INTERFACE_SET_MPLS_ENABLE, sw_interface_set_mpls_enable) \
+_(SW_INTERFACE_SET_VPATH, sw_interface_set_vpath) \
+_(SW_INTERFACE_SET_VXLAN_BYPASS, sw_interface_set_vxlan_bypass) \
+_(TAP_CONNECT, tap_connect) \
+_(TAP_MODIFY, tap_modify) \
+_(TAP_DELETE, tap_delete) \
+_(SW_INTERFACE_TAP_DUMP, sw_interface_tap_dump) \
+_(IP_ADD_DEL_ROUTE, ip_add_del_route) \
+_(PROXY_ARP_ADD_DEL, proxy_arp_add_del) \
+_(PROXY_ARP_INTFC_ENABLE_DISABLE, proxy_arp_intfc_enable_disable) \
+_(MPLS_TUNNEL_ADD_DEL, mpls_tunnel_add_del) \
+_(SW_INTERFACE_SET_UNNUMBERED, sw_interface_set_unnumbered) \
+_(IP_NEIGHBOR_ADD_DEL, ip_neighbor_add_del) \
+_(RESET_VRF, reset_vrf) \
+_(CREATE_VLAN_SUBIF, create_vlan_subif) \
+_(CREATE_SUBIF, create_subif) \
+_(OAM_ADD_DEL, oam_add_del) \
+_(RESET_FIB, reset_fib) \
+_(DHCP_PROXY_CONFIG, dhcp_proxy_config) \
+_(DHCP_PROXY_SET_VSS, dhcp_proxy_set_vss) \
+_(SET_IP_FLOW_HASH, set_ip_flow_hash) \
+_(SW_INTERFACE_IP6_SET_LINK_LOCAL_ADDRESS, \
+ sw_interface_ip6_set_link_local_address) \
+_(SW_INTERFACE_IP6ND_RA_PREFIX, sw_interface_ip6nd_ra_prefix) \
+_(SW_INTERFACE_IP6ND_RA_CONFIG, sw_interface_ip6nd_ra_config) \
+_(SET_ARP_NEIGHBOR_LIMIT, set_arp_neighbor_limit) \
+_(L2_PATCH_ADD_DEL, l2_patch_add_del) \
+_(SR_TUNNEL_ADD_DEL, sr_tunnel_add_del) \
+_(SR_POLICY_ADD_DEL, sr_policy_add_del) \
+_(SR_MULTICAST_MAP_ADD_DEL, sr_multicast_map_add_del) \
+_(SW_INTERFACE_SET_L2_XCONNECT, sw_interface_set_l2_xconnect) \
+_(L2FIB_ADD_DEL, l2fib_add_del) \
+_(L2_FLAGS, l2_flags) \
+_(BRIDGE_FLAGS, bridge_flags) \
+_(CLASSIFY_ADD_DEL_TABLE, classify_add_del_table) \
+_(CLASSIFY_ADD_DEL_SESSION, classify_add_del_session) \
+_(SW_INTERFACE_SET_L2_BRIDGE, sw_interface_set_l2_bridge) \
+_(SW_INTERFACE_SET_DPDK_HQOS_PIPE, sw_interface_set_dpdk_hqos_pipe) \
+_(SW_INTERFACE_SET_DPDK_HQOS_SUBPORT, sw_interface_set_dpdk_hqos_subport)\
+_(SW_INTERFACE_SET_DPDK_HQOS_TCTBL, sw_interface_set_dpdk_hqos_tctbl) \
+_(BRIDGE_DOMAIN_ADD_DEL, bridge_domain_add_del) \
+_(BRIDGE_DOMAIN_DUMP, bridge_domain_dump) \
+_(CLASSIFY_SET_INTERFACE_IP_TABLE, classify_set_interface_ip_table) \
+_(CLASSIFY_SET_INTERFACE_L2_TABLES, classify_set_interface_l2_tables) \
+_(ADD_NODE_NEXT, add_node_next) \
+_(DHCP_PROXY_CONFIG_2, dhcp_proxy_config_2) \
+_(DHCP_CLIENT_CONFIG, dhcp_client_config) \
+_(L2TPV3_CREATE_TUNNEL, l2tpv3_create_tunnel) \
+_(L2TPV3_SET_TUNNEL_COOKIES, l2tpv3_set_tunnel_cookies) \
+_(L2TPV3_INTERFACE_ENABLE_DISABLE, l2tpv3_interface_enable_disable) \
+_(L2TPV3_SET_LOOKUP_KEY, l2tpv3_set_lookup_key) \
+_(SW_IF_L2TPV3_TUNNEL_DUMP, sw_if_l2tpv3_tunnel_dump) \
+_(VXLAN_ADD_DEL_TUNNEL, vxlan_add_del_tunnel) \
+_(VXLAN_TUNNEL_DUMP, vxlan_tunnel_dump) \
+_(GRE_ADD_DEL_TUNNEL, gre_add_del_tunnel) \
+_(GRE_TUNNEL_DUMP, gre_tunnel_dump) \
+_(L2_FIB_CLEAR_TABLE, l2_fib_clear_table) \
+_(L2_INTERFACE_EFP_FILTER, l2_interface_efp_filter) \
+_(L2_INTERFACE_VLAN_TAG_REWRITE, l2_interface_vlan_tag_rewrite) \
+_(CREATE_VHOST_USER_IF, create_vhost_user_if) \
+_(MODIFY_VHOST_USER_IF, modify_vhost_user_if) \
+_(DELETE_VHOST_USER_IF, delete_vhost_user_if) \
+_(SW_INTERFACE_DUMP, sw_interface_dump) \
+_(CONTROL_PING, control_ping) \
+_(WANT_INTERFACE_EVENTS, want_interface_events) \
+_(CLI_REQUEST, cli_request) \
+_(CLI_INBAND, cli_inband) \
+_(MEMCLNT_CREATE, memclnt_create) \
+_(SW_INTERFACE_VHOST_USER_DUMP, sw_interface_vhost_user_dump) \
+_(SHOW_VERSION, show_version) \
+_(L2_FIB_TABLE_DUMP, l2_fib_table_dump) \
+_(VXLAN_GPE_ADD_DEL_TUNNEL, vxlan_gpe_add_del_tunnel) \
+_(VXLAN_GPE_TUNNEL_DUMP, vxlan_gpe_tunnel_dump) \
+_(INTERFACE_NAME_RENUMBER, interface_name_renumber) \
+_(WANT_IP4_ARP_EVENTS, want_ip4_arp_events) \
+_(WANT_IP6_ND_EVENTS, want_ip6_nd_events) \
+_(INPUT_ACL_SET_INTERFACE, input_acl_set_interface) \
+_(IP_ADDRESS_DUMP, ip_address_dump) \
+_(IP_DUMP, ip_dump) \
+_(DELETE_LOOPBACK, delete_loopback) \
+_(BD_IP_MAC_ADD_DEL, bd_ip_mac_add_del) \
+_(COP_INTERFACE_ENABLE_DISABLE, cop_interface_enable_disable) \
+_(COP_WHITELIST_ENABLE_DISABLE, cop_whitelist_enable_disable) \
+_(AF_PACKET_CREATE, af_packet_create) \
+_(AF_PACKET_DELETE, af_packet_delete) \
+_(SW_INTERFACE_CLEAR_STATS, sw_interface_clear_stats) \
+_(MPLS_FIB_DUMP, mpls_fib_dump) \
+_(MPLS_TUNNEL_DUMP, mpls_tunnel_dump) \
+_(CLASSIFY_TABLE_IDS,classify_table_ids) \
+_(CLASSIFY_TABLE_BY_INTERFACE, classify_table_by_interface) \
+_(CLASSIFY_TABLE_INFO,classify_table_info) \
+_(CLASSIFY_SESSION_DUMP,classify_session_dump) \
+_(SET_IPFIX_EXPORTER, set_ipfix_exporter) \
+_(IPFIX_EXPORTER_DUMP, ipfix_exporter_dump) \
+_(SET_IPFIX_CLASSIFY_STREAM, set_ipfix_classify_stream) \
+_(IPFIX_CLASSIFY_STREAM_DUMP, ipfix_classify_stream_dump) \
+_(IPFIX_CLASSIFY_TABLE_ADD_DEL, ipfix_classify_table_add_del) \
+_(IPFIX_CLASSIFY_TABLE_DUMP, ipfix_classify_table_dump) \
+_(SW_INTERFACE_SPAN_ENABLE_DISABLE, sw_interface_span_enable_disable) \
+_(SW_INTERFACE_SPAN_DUMP, sw_interface_span_dump) \
+_(GET_NEXT_INDEX, get_next_index) \
+_(PG_CREATE_INTERFACE,pg_create_interface) \
+_(PG_CAPTURE, pg_capture) \
+_(PG_ENABLE_DISABLE, pg_enable_disable) \
+_(POLICER_ADD_DEL, policer_add_del) \
+_(POLICER_DUMP, policer_dump) \
+_(POLICER_CLASSIFY_SET_INTERFACE, policer_classify_set_interface) \
+_(POLICER_CLASSIFY_DUMP, policer_classify_dump) \
+_(IP_SOURCE_AND_PORT_RANGE_CHECK_ADD_DEL, \
+ ip_source_and_port_range_check_add_del) \
+_(IP_SOURCE_AND_PORT_RANGE_CHECK_INTERFACE_ADD_DEL, \
+ ip_source_and_port_range_check_interface_add_del) \
+_(LISP_ENABLE_DISABLE, lisp_enable_disable) \
+_(LISP_GPE_ENABLE_DISABLE, lisp_gpe_enable_disable) \
+_(LISP_GPE_ADD_DEL_IFACE, lisp_gpe_add_del_iface) \
+_(LISP_PITR_SET_LOCATOR_SET, lisp_pitr_set_locator_set) \
+_(LISP_MAP_REQUEST_MODE, lisp_map_request_mode) \
+_(SHOW_LISP_MAP_REQUEST_MODE, show_lisp_map_request_mode) \
+_(LISP_ADD_DEL_REMOTE_MAPPING, lisp_add_del_remote_mapping) \
+_(LISP_ADD_DEL_ADJACENCY, lisp_add_del_adjacency) \
+_(LISP_ADD_DEL_MAP_REQUEST_ITR_RLOCS, \
+ lisp_add_del_map_request_itr_rlocs) \
+_(LISP_EID_TABLE_ADD_DEL_MAP, lisp_eid_table_add_del_map) \
+_(LISP_ADD_DEL_LOCAL_EID, lisp_add_del_local_eid) \
+_(LISP_GPE_ADD_DEL_FWD_ENTRY, lisp_gpe_add_del_fwd_entry) \
+_(LISP_ADD_DEL_LOCATOR_SET, lisp_add_del_locator_set) \
+_(LISP_ADD_DEL_MAP_RESOLVER, lisp_add_del_map_resolver) \
+_(LISP_ADD_DEL_LOCATOR, lisp_add_del_locator) \
+_(LISP_EID_TABLE_DUMP, lisp_eid_table_dump) \
+_(LISP_EID_TABLE_MAP_DUMP, lisp_eid_table_map_dump) \
+_(LISP_EID_TABLE_VNI_DUMP, lisp_eid_table_vni_dump) \
+_(LISP_GPE_TUNNEL_DUMP, lisp_gpe_tunnel_dump) \
+_(LISP_MAP_RESOLVER_DUMP, lisp_map_resolver_dump) \
+_(LISP_MAP_SERVER_DUMP, lisp_map_server_dump) \
+_(LISP_LOCATOR_SET_DUMP, lisp_locator_set_dump) \
+_(LISP_LOCATOR_DUMP, lisp_locator_dump) \
+_(LISP_ADJACENCIES_GET, lisp_adjacencies_get) \
+_(SHOW_LISP_RLOC_PROBE_STATE, show_lisp_rloc_probe_state) \
+_(SHOW_LISP_MAP_REGISTER_STATE, show_lisp_map_register_state) \
+_(LISP_RLOC_PROBE_ENABLE_DISABLE, lisp_rloc_probe_enable_disable) \
+_(LISP_MAP_REGISTER_ENABLE_DISABLE, lisp_map_register_enable_disable) \
+_(IPSEC_GRE_ADD_DEL_TUNNEL, ipsec_gre_add_del_tunnel) \
+_(IPSEC_GRE_TUNNEL_DUMP, ipsec_gre_tunnel_dump) \
+_(DELETE_SUBIF, delete_subif) \
+_(L2_INTERFACE_PBB_TAG_REWRITE, l2_interface_pbb_tag_rewrite) \
+_(PUNT, punt) \
+_(FLOW_CLASSIFY_SET_INTERFACE, flow_classify_set_interface) \
+_(FLOW_CLASSIFY_DUMP, flow_classify_dump) \
+_(GET_FIRST_MSG_ID, get_first_msg_id) \
+_(IOAM_ENABLE, ioam_enable) \
+_(IOAM_DISABLE, ioam_disable) \
+_(IP_FIB_DUMP, ip_fib_dump) \
+_(IP6_FIB_DUMP, ip6_fib_dump) \
+_(FEATURE_ENABLE_DISABLE, feature_enable_disable) \
+_(SW_INTERFACE_TAG_ADD_DEL, sw_interface_tag_add_del) \
+_(SW_INTERFACE_SET_MTU, sw_interface_set_mtu)
+ void
+vl_msg_api_custom_dump_configure (api_main_t * am)
+{
+#define _(n,f) am->msg_print_handlers[VL_API_##n] \
+ = (void *) vl_api_##f##_t_print;
+ foreach_custom_print_function;
+#undef _
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/api/gmon.c b/src/vpp/api/gmon.c
new file mode 100644
index 00000000..20deb6a2
--- /dev/null
+++ b/src/vpp/api/gmon.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <netinet/in.h>
+#include <signal.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <time.h>
+#include <fcntl.h>
+#include <string.h>
+#include <vppinfra/clib.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/bitmap.h>
+#include <vppinfra/fifo.h>
+#include <vppinfra/time.h>
+#include <vppinfra/mheap.h>
+#include <vppinfra/heap.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/format.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/api_errno.h>
+
+#include <svm/svmdb.h>
+
+typedef struct
+{
+ svmdb_client_t *svmdb_client;
+ f64 *vector_rate_ptr;
+ f64 *input_rate_ptr;
+ f64 *sig_error_rate_ptr;
+ pid_t *vpef_pid_ptr;
+ u64 last_sig_errors;
+ u64 current_sig_errors;
+ uword *sig_error_bitmap;
+ vlib_main_t *vlib_main;
+ vlib_main_t **my_vlib_mains;
+
+} gmon_main_t;
+
+#if DPDK == 0
+static inline u64
+vnet_get_aggregate_rx_packets (void)
+{
+ return 0;
+}
+#else
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/devices/dpdk/dpdk.h>
+#endif
+
+gmon_main_t gmon_main;
+
+static u64
+get_significant_errors (gmon_main_t * gm)
+{
+ vlib_main_t *this_vlib_main;
+ vlib_error_main_t *em;
+ uword code;
+ int vm_index;
+ u64 significant_errors = 0;
+
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (code, gm->sig_error_bitmap,
+ ({
+ for (vm_index = 0; vm_index < vec_len (gm->my_vlib_mains); vm_index++)
+ {
+ this_vlib_main = gm->my_vlib_mains[vm_index];
+ em = &this_vlib_main->error_main;
+ significant_errors += em->counters[code] -
+ ((vec_len(em->counters_last_clear) > code) ?
+ em->counters_last_clear[code] : 0);
+ }
+ }));
+ /* *INDENT-ON* */
+
+ return (significant_errors);
+}
+
+static clib_error_t *
+publish_pid (vlib_main_t * vm)
+{
+ gmon_main_t *gm = &gmon_main;
+
+ *gm->vpef_pid_ptr = getpid ();
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (publish_pid);
+
+
+static uword
+gmon_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ f64 vector_rate;
+ u64 input_packets, last_input_packets, new_sig_errors;
+ f64 last_runtime, dt, now;
+ gmon_main_t *gm = &gmon_main;
+ int i;
+
+ last_runtime = 0.0;
+ last_input_packets = 0;
+
+ last_runtime = 0.0;
+ last_input_packets = 0;
+
+ /* Initial wait for the world to settle down */
+ vlib_process_suspend (vm, 5.0);
+
+ if (vec_len (vlib_mains) == 0)
+ vec_add1 (gm->my_vlib_mains, &vlib_global_main);
+ else
+ {
+ for (i = 0; i < vec_len (vlib_mains); i++)
+ vec_add1 (gm->my_vlib_mains, vlib_mains[i]);
+ }
+
+ while (1)
+ {
+ vlib_process_suspend (vm, 5.0);
+ vector_rate = vlib_last_vector_length_per_node (vm);
+ *gm->vector_rate_ptr = vector_rate;
+ now = vlib_time_now (vm);
+ dt = now - last_runtime;
+ input_packets = vnet_get_aggregate_rx_packets ();
+ *gm->input_rate_ptr = (f64) (input_packets - last_input_packets) / dt;
+ last_runtime = now;
+ last_input_packets = input_packets;
+
+ new_sig_errors = get_significant_errors (gm);
+ *gm->sig_error_rate_ptr =
+ ((f64) (new_sig_errors - gm->last_sig_errors)) / dt;
+ gm->last_sig_errors = new_sig_errors;
+ }
+
+ return 0; /* not so much */
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gmon_process_node,static) = {
+ .function = gmon_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "gmon-process",
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+gmon_init (vlib_main_t * vm)
+{
+ gmon_main_t *gm = &gmon_main;
+ api_main_t *am = &api_main;
+ pid_t *swp = 0;
+ f64 *v = 0;
+ clib_error_t *error;
+ svmdb_map_args_t _ma, *ma = &_ma;
+
+ if ((error = vlib_call_init_function (vm, vpe_api_init)))
+ return (error);
+
+ if ((error = vlib_call_init_function (vm, vlibmemory_init)))
+ return (error);
+
+ gm->vlib_main = vm;
+
+ memset (ma, 0, sizeof (*ma));
+ ma->root_path = am->root_path;
+ ma->uid = am->api_uid;
+ ma->gid = am->api_gid;
+
+ gm->svmdb_client = svmdb_map (ma);
+
+ /* Find or create, set to zero */
+ vec_add1 (v, 0.0);
+ svmdb_local_set_vec_variable (gm->svmdb_client,
+ "vpp_vector_rate", (char *) v, sizeof (*v));
+ vec_free (v);
+ vec_add1 (v, 0.0);
+ svmdb_local_set_vec_variable (gm->svmdb_client,
+ "vpp_input_rate", (char *) v, sizeof (*v));
+ vec_free (v);
+ vec_add1 (v, 0.0);
+ svmdb_local_set_vec_variable (gm->svmdb_client,
+ "vpp_sig_error_rate",
+ (char *) v, sizeof (*v));
+ vec_free (v);
+
+ vec_add1 (swp, 0.0);
+ svmdb_local_set_vec_variable (gm->svmdb_client,
+ "vpp_pid", (char *) swp, sizeof (*swp));
+ vec_free (swp);
+
+ /* the value cells will never move, so acquire references to them */
+ gm->vector_rate_ptr =
+ svmdb_local_get_variable_reference (gm->svmdb_client,
+ SVMDB_NAMESPACE_VEC,
+ "vpp_vector_rate");
+ gm->input_rate_ptr =
+ svmdb_local_get_variable_reference (gm->svmdb_client,
+ SVMDB_NAMESPACE_VEC,
+ "vpp_input_rate");
+ gm->sig_error_rate_ptr =
+ svmdb_local_get_variable_reference (gm->svmdb_client,
+ SVMDB_NAMESPACE_VEC,
+ "vpp_sig_error_rate");
+ gm->vpef_pid_ptr =
+ svmdb_local_get_variable_reference (gm->svmdb_client,
+ SVMDB_NAMESPACE_VEC, "vpp_pid");
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (gmon_init);
+
+static clib_error_t *
+gmon_exit (vlib_main_t * vm)
+{
+ gmon_main_t *gm = &gmon_main;
+
+ if (gm->vector_rate_ptr)
+ {
+ *gm->vector_rate_ptr = 0.0;
+ *gm->vpef_pid_ptr = 0;
+ *gm->input_rate_ptr = 0.0;
+ *gm->sig_error_rate_ptr = 0.0;
+ svm_region_unmap ((void *) gm->svmdb_client->db_rp);
+ vec_free (gm->svmdb_client);
+ }
+ return 0;
+}
+
+VLIB_MAIN_LOOP_EXIT_FUNCTION (gmon_exit);
+
+static int
+significant_error_enable_disable (gmon_main_t * gm, u32 index, int enable)
+{
+ vlib_main_t *vm = gm->vlib_main;
+ vlib_error_main_t *em = &vm->error_main;
+
+ if (index >= vec_len (em->counters))
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ gm->sig_error_bitmap =
+ clib_bitmap_set (gm->sig_error_bitmap, index, enable);
+ return 0;
+}
+
+static clib_error_t *
+set_significant_error_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ u32 index;
+ int enable = 1;
+ int rv;
+ gmon_main_t *gm = &gmon_main;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%d", &index))
+ ;
+ else if (unformat (input, "disable"))
+ enable = 0;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ rv = significant_error_enable_disable (gm, index, enable);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ default:
+ return clib_error_return
+ (0, "significant_error_enable_disable returned %d", rv);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_significant_error_command, static) = {
+ .path = "set significant error",
+ .short_help = "set significant error <counter-index-nnn> [disable]",
+ .function = set_significant_error_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/api/json_format.c b/src/vpp/api/json_format.c
new file mode 100644
index 00000000..63454b87
--- /dev/null
+++ b/src/vpp/api/json_format.c
@@ -0,0 +1,304 @@
+/*
+ *------------------------------------------------------------------
+ * json_format.c
+ *
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+
+*/
+#include <inttypes.h>
+#include "json_format.h"
+#include <vnet/ip/ip.h>
+#include <vppinfra/vec.h>
+
+#define VAT_TAB_WIDTH 2
+
+typedef struct vat_print_ctx_s
+{
+ FILE *ofp;
+ u32 indent;
+} vat_print_ctx_t;
+
+/* Format an IP4 address. */
+static u8 *
+vat_json_format_ip4_address (u8 * s, va_list * args)
+{
+ u8 *a = va_arg (*args, u8 *);
+ return format (s, "%d.%d.%d.%d", a[0], a[1], a[2], a[3]);
+}
+
+/* Format an IP6 address. */
+static u8 *
+vat_json_format_ip6_address (u8 * s, va_list * args)
+{
+ ip6_address_t *a = va_arg (*args, ip6_address_t *);
+ u32 i, i_max_n_zero, max_n_zeros, i_first_zero, n_zeros, last_double_colon;
+
+ i_max_n_zero = ARRAY_LEN (a->as_u16);
+ max_n_zeros = 0;
+ i_first_zero = i_max_n_zero;
+ n_zeros = 0;
+ for (i = 0; i < ARRAY_LEN (a->as_u16); i++)
+ {
+ u32 is_zero = a->as_u16[i] == 0;
+ if (is_zero && i_first_zero >= ARRAY_LEN (a->as_u16))
+ {
+ i_first_zero = i;
+ n_zeros = 0;
+ }
+ n_zeros += is_zero;
+ if ((!is_zero && n_zeros > max_n_zeros)
+ || (i + 1 >= ARRAY_LEN (a->as_u16) && n_zeros > max_n_zeros))
+ {
+ i_max_n_zero = i_first_zero;
+ max_n_zeros = n_zeros;
+ i_first_zero = ARRAY_LEN (a->as_u16);
+ n_zeros = 0;
+ }
+ }
+
+ last_double_colon = 0;
+ for (i = 0; i < ARRAY_LEN (a->as_u16); i++)
+ {
+ if (i == i_max_n_zero && max_n_zeros > 1)
+ {
+ s = format (s, "::");
+ i += max_n_zeros - 1;
+ last_double_colon = 1;
+ }
+ else
+ {
+ s = format (s, "%s%x",
+ (last_double_colon || i == 0) ? "" : ":",
+ clib_net_to_host_u16 (a->as_u16[i]));
+ last_double_colon = 0;
+ }
+ }
+
+ return s;
+}
+
+static void
+vat_json_indent_print (vat_print_ctx_t * ctx)
+{
+ int i;
+ for (i = 0; i < ctx->indent * VAT_TAB_WIDTH; i++)
+ {
+ fformat (ctx->ofp, " ");
+ }
+}
+
+static void
+vat_json_indent_line (vat_print_ctx_t * ctx, char *fmt, ...)
+{
+ va_list va;
+
+ vat_json_indent_print (ctx);
+ va_start (va, fmt);
+ va_fformat (ctx->ofp, fmt, &va);
+ va_end (va);
+}
+
+static u8
+is_num_only (vat_json_node_t * p)
+{
+ vat_json_node_t *elem;
+ vec_foreach (elem, p)
+ {
+ if (VAT_JSON_INT != elem->type && VAT_JSON_UINT != elem->type)
+ {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static void
+vat_json_print_internal (vat_print_ctx_t * ctx, vat_json_node_t * node)
+{
+#define P(fmt,...) fformat(ctx->ofp, fmt, ##__VA_ARGS__)
+#define PL(fmt,...) fformat(ctx->ofp, fmt"\n", ##__VA_ARGS__)
+#define PPL(fmt,...) vat_json_indent_line(ctx, fmt"\n", ##__VA_ARGS__)
+#define PP(fmt,...) vat_json_indent_line(ctx, fmt, ##__VA_ARGS__)
+#define INCR (ctx->indent++)
+#define DECR (ctx->indent--)
+
+ vat_json_pair_t *pair;
+ u32 i, count;
+ vat_json_node_t *elem;
+ u8 num_only = 0;
+
+ if (!node)
+ {
+ return;
+ }
+
+ switch (node->type)
+ {
+ case VAT_JSON_OBJECT:
+ count = vec_len (node->pairs);
+ if (count >= 1)
+ {
+ PL ("{");
+ INCR;
+ for (i = 0; i < count; i++)
+ {
+ pair = &node->pairs[i];
+ PP ("\"%s\": ", pair->name);
+ vat_json_print_internal (ctx, &pair->value);
+ if (i < count - 1)
+ {
+ P (",");
+ }
+ PL ();
+ }
+ DECR;
+ PP ("}");
+ }
+ else
+ {
+ P ("{}");
+ }
+ break;
+ case VAT_JSON_ARRAY:
+ num_only = is_num_only (node->array);
+ count = vec_len (node->array);
+ if (count >= 1)
+ {
+ if (num_only)
+ P ("[");
+ else
+ PL ("[ ");
+ INCR;
+ for (i = 0; i < count; i++)
+ {
+ elem = &node->array[i];
+ if (!num_only)
+ {
+ vat_json_indent_print (ctx);
+ }
+ vat_json_print_internal (ctx, elem);
+ if (i < count - 1)
+ {
+ if (num_only)
+ {
+ P (", ");
+ }
+ else
+ {
+ P (",");
+ }
+ }
+ if (!num_only)
+ PL ();
+ }
+ DECR;
+ if (!num_only)
+ PP ("]");
+ else
+ P ("]");
+ }
+ else
+ {
+ P ("[]");
+ }
+ break;
+ case VAT_JSON_INT:
+ P ("%d", node->sint);
+ break;
+ case VAT_JSON_UINT:
+ P ("%" PRIu64, node->uint);
+ break;
+ case VAT_JSON_REAL:
+ P ("%f", node->real);
+ break;
+ case VAT_JSON_STRING:
+ P ("\"%s\"", node->string);
+ break;
+ case VAT_JSON_IPV4:
+ P ("\"%U\"", vat_json_format_ip4_address, &node->ip4);
+ break;
+ case VAT_JSON_IPV6:
+ P ("\"%U\"", vat_json_format_ip6_address, &node->ip6);
+ break;
+ default:
+ break;
+ }
+#undef PPL
+#undef PP
+#undef PL
+#undef P
+}
+
+void
+vat_json_print (FILE * ofp, vat_json_node_t * node)
+{
+ vat_print_ctx_t ctx;
+ memset (&ctx, 0, sizeof ctx);
+ ctx.indent = 0;
+ ctx.ofp = ofp;
+ fformat (ofp, "\n");
+ vat_json_print_internal (&ctx, node);
+ fformat (ofp, "\n");
+}
+
+void
+vat_json_free (vat_json_node_t * node)
+{
+ int i = 0;
+
+ if (NULL == node)
+ {
+ return;
+ }
+ switch (node->type)
+ {
+ case VAT_JSON_OBJECT:
+ for (i = 0; i < vec_len (node->pairs); i++)
+ {
+ vat_json_free (&node->pairs[i].value);
+ }
+ if (NULL != node->pairs)
+ {
+ vec_free (node->pairs);
+ }
+ break;
+ case VAT_JSON_ARRAY:
+ for (i = 0; i < vec_len (node->array); i++)
+ {
+ vat_json_free (&node->array[i]);
+ }
+ if (NULL != node->array)
+ {
+ vec_free (node->array);
+ }
+ break;
+ case VAT_JSON_STRING:
+ if (NULL != node->string)
+ {
+ vec_free (node->string);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/api/json_format.h b/src/vpp/api/json_format.h
new file mode 100644
index 00000000..154fb3df
--- /dev/null
+++ b/src/vpp/api/json_format.h
@@ -0,0 +1,254 @@
+/*
+ *------------------------------------------------------------------
+ * json_format.h
+ *
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __JSON_FORMAT_H__
+#define __JSON_FORMAT_H__
+
+#include <vppinfra/clib.h>
+#include <vppinfra/format.h>
+#include <netinet/ip.h>
+
+/* JSON value type */
+typedef enum
+{
+ VAT_JSON_NONE,
+ VAT_JSON_OBJECT,
+ VAT_JSON_ARRAY,
+ VAT_JSON_STRING,
+ VAT_JSON_REAL,
+ VAT_JSON_UINT,
+ VAT_JSON_INT,
+ VAT_JSON_IPV4,
+ VAT_JSON_IPV6,
+ VAT_JSON_MAX
+} vat_json_val_type_t;
+
+typedef struct vat_json_node_s vat_json_node_t;
+typedef struct vat_json_pair_s vat_json_pair_t;
+
+/* JSON object structure */
+struct vat_json_node_s
+{
+ vat_json_val_type_t type;
+ union
+ {
+ vat_json_pair_t *pairs;
+ vat_json_node_t *array;
+ u8 *string;
+ struct in_addr ip4;
+ struct in6_addr ip6;
+ u64 uint;
+ i64 sint;
+ f64 real;
+ };
+};
+
+struct vat_json_pair_s
+{
+ const char *name;
+ vat_json_node_t value;
+};
+
+void vat_json_print (FILE * ofp, vat_json_node_t * node);
+void vat_json_free (vat_json_node_t * node);
+
+static_always_inline void
+vat_json_init_object (vat_json_node_t * json)
+{
+ json->type = VAT_JSON_OBJECT;
+ json->pairs = NULL;
+}
+
+static_always_inline void
+vat_json_init_array (vat_json_node_t * json)
+{
+ json->type = VAT_JSON_ARRAY;
+ json->array = NULL;
+}
+
+static_always_inline void
+vat_json_set_string (vat_json_node_t * json, u8 * str)
+{
+ json->type = VAT_JSON_STRING;
+ json->string = str;
+}
+
+static_always_inline void
+vat_json_set_string_copy (vat_json_node_t * json, const u8 * str)
+{
+ u8 *ns = NULL;
+ vec_validate (ns, strlen ((const char *) str));
+ strcpy ((char *) ns, (const char *) str);
+ vec_add1 (ns, '\0');
+ vat_json_set_string (json, ns);
+}
+
+static_always_inline void
+vat_json_set_int (vat_json_node_t * json, i64 num)
+{
+ json->type = VAT_JSON_INT;
+ json->sint = num;
+}
+
+static_always_inline void
+vat_json_set_uint (vat_json_node_t * json, u64 num)
+{
+ json->type = VAT_JSON_UINT;
+ json->uint = num;
+}
+
+static_always_inline void
+vat_json_set_real (vat_json_node_t * json, f64 real)
+{
+ json->type = VAT_JSON_REAL;
+ json->real = real;
+}
+
+static_always_inline void
+vat_json_set_ip4 (vat_json_node_t * json, struct in_addr ip4)
+{
+ json->type = VAT_JSON_IPV4;
+ json->ip4 = ip4;
+}
+
+static_always_inline void
+vat_json_set_ip6 (vat_json_node_t * json, struct in6_addr ip6)
+{
+ json->type = VAT_JSON_IPV6;
+ json->ip6 = ip6;
+}
+
+static_always_inline vat_json_node_t *
+vat_json_object_add (vat_json_node_t * json, const char *name)
+{
+ ASSERT (VAT_JSON_OBJECT == json->type);
+ uword pos = vec_len (json->pairs);
+ vec_validate (json->pairs, pos);
+ json->pairs[pos].name = name;
+ return &json->pairs[pos].value;
+}
+
+static_always_inline vat_json_node_t *
+vat_json_array_add (vat_json_node_t * json)
+{
+ ASSERT (VAT_JSON_ARRAY == json->type);
+ uword pos = vec_len (json->array);
+ vec_validate (json->array, pos);
+ return &json->array[pos];
+}
+
+static_always_inline vat_json_node_t *
+vat_json_object_add_list (vat_json_node_t * json, const char *name)
+{
+ vat_json_node_t *array_node = vat_json_object_add (json, name);
+ vat_json_init_array (array_node);
+ return array_node;
+}
+
+static_always_inline void
+vat_json_object_add_string_copy (vat_json_node_t * json,
+ const char *name, u8 * str)
+{
+ vat_json_set_string_copy (vat_json_object_add (json, name), str);
+}
+
+static_always_inline void
+vat_json_object_add_uint (vat_json_node_t * json,
+ const char *name, u64 number)
+{
+ vat_json_set_uint (vat_json_object_add (json, name), number);
+}
+
+static_always_inline void
+vat_json_object_add_int (vat_json_node_t * json, const char *name, i64 number)
+{
+ vat_json_set_int (vat_json_object_add (json, name), number);
+}
+
+static_always_inline void
+vat_json_object_add_real (vat_json_node_t * json, const char *name, f64 real)
+{
+ vat_json_set_real (vat_json_object_add (json, name), real);
+}
+
+static_always_inline void
+vat_json_object_add_ip4 (vat_json_node_t * json,
+ const char *name, struct in_addr ip4)
+{
+ vat_json_set_ip4 (vat_json_object_add (json, name), ip4);
+}
+
+static_always_inline void
+vat_json_object_add_ip6 (vat_json_node_t * json,
+ const char *name, struct in6_addr ip6)
+{
+ vat_json_set_ip6 (vat_json_object_add (json, name), ip6);
+}
+
+static_always_inline void
+vat_json_array_add_int (vat_json_node_t * json, i64 number)
+{
+ vat_json_set_int (vat_json_array_add (json), number);
+}
+
+static_always_inline void
+vat_json_array_add_uint (vat_json_node_t * json, u64 number)
+{
+ vat_json_set_uint (vat_json_array_add (json), number);
+}
+
+static_always_inline void
+vat_json_object_add_bytes (vat_json_node_t * json,
+ const char *name, u8 * array, uword size)
+{
+ ASSERT (VAT_JSON_OBJECT == json->type);
+ vat_json_node_t *json_array = vat_json_object_add (json, name);
+ vat_json_init_array (json_array);
+ int i;
+ for (i = 0; i < size; i++)
+ {
+ vat_json_array_add_uint (json_array, array[i]);
+ }
+}
+
+static_always_inline vat_json_node_t *
+vat_json_object_get_element (vat_json_node_t * json, const char *name)
+{
+ int i = 0;
+
+ ASSERT (VAT_JSON_OBJECT == json->type);
+ for (i = 0; i < vec_len (json->pairs); i++)
+ {
+ if (0 == strcmp (json->pairs[i].name, name))
+ {
+ return &json->pairs[i].value;
+ }
+ }
+ return NULL;
+}
+
+#endif /* __JSON_FORMAT_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/api/summary_stats_client.c b/src/vpp/api/summary_stats_client.c
new file mode 100644
index 00000000..03999567
--- /dev/null
+++ b/src/vpp/api/summary_stats_client.c
@@ -0,0 +1,302 @@
+/*
+ *------------------------------------------------------------------
+ * summary_stats_client -
+ *
+ * Copyright (c) 2010 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <netinet/in.h>
+#include <netdb.h>
+#include <signal.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <time.h>
+#include <fcntl.h>
+#include <string.h>
+#include <vppinfra/clib.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/bitmap.h>
+#include <vppinfra/fifo.h>
+#include <vppinfra/time.h>
+#include <vppinfra/mheap.h>
+#include <vppinfra/heap.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/format.h>
+#include <vppinfra/error.h>
+
+#include <vnet/vnet.h>
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+#include <vpp/api/vpe_msg_enum.h>
+
+#include <vnet/ip/ip.h>
+
+#define f64_endian(a)
+#define f64_print(a,b)
+
+#define vl_typedefs /* define message structures */
+#include <vpp/api/vpe_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vpp/api/vpe_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...)
+#define vl_printfun
+#include <vpp/api/vpe_all_api_h.h>
+#undef vl_printfun
+
+vl_shmem_hdr_t *shmem_hdr;
+
+typedef struct
+{
+ volatile int sigterm_received;
+
+ struct sockaddr_in send_data_addr;
+ int send_data_socket;
+ u8 *display_name;
+
+ /* convenience */
+ unix_shared_memory_queue_t *vl_input_queue;
+ u32 my_client_index;
+} test_main_t;
+
+test_main_t test_main;
+
+/*
+ * Satisfy external references when -lvlib is not available.
+ */
+vlib_main_t vlib_global_main;
+vlib_main_t **vlib_mains;
+
+void
+vlib_cli_output (struct vlib_main_t *vm, char *fmt, ...)
+{
+ clib_warning ("vlib_cli_output callled...");
+}
+
+
+static void
+vl_api_vnet_summary_stats_reply_t_handler (vl_api_vnet_summary_stats_reply_t *
+ mp)
+{
+ test_main_t *tm = &test_main;
+ static u8 *sb;
+ int n;
+
+ printf ("total rx pkts %llu, total rx bytes %llu\n",
+ (unsigned long long) mp->total_pkts[0],
+ (unsigned long long) mp->total_bytes[0]);
+ printf ("total tx pkts %llu, total tx bytes %llu\n",
+ (unsigned long long) mp->total_pkts[1],
+ (unsigned long long) mp->total_bytes[1]);
+ printf ("vector rate %.2f\n", mp->vector_rate);
+
+ vec_reset_length (sb);
+ sb = format (sb, "%v,%.0f,%llu,%llu,%llu,%llu\n%c",
+ tm->display_name, mp->vector_rate,
+ (unsigned long long) mp->total_pkts[0],
+ (unsigned long long) mp->total_bytes[0],
+ (unsigned long long) mp->total_pkts[1],
+ (unsigned long long) mp->total_bytes[1], 0);
+
+ n = sendto (tm->send_data_socket, sb, vec_len (sb),
+ 0, (struct sockaddr *) &tm->send_data_addr,
+ sizeof (tm->send_data_addr));
+
+ if (n != vec_len (sb))
+ clib_unix_warning ("sendto");
+
+}
+
+#define foreach_api_msg \
+_(VNET_SUMMARY_STATS_REPLY, vnet_summary_stats_reply)
+
+int
+connect_to_vpe (char *name)
+{
+ int rv = 0;
+
+ rv = vl_client_connect_to_vlib ("/vpe-api", name, 32);
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_api_msg;
+#undef _
+
+ shmem_hdr = api_main.shmem_hdr;
+
+ return rv;
+}
+
+int
+disconnect_from_vpe (void)
+{
+ vl_client_disconnect_from_vlib ();
+ return 0;
+}
+
+static void
+sigterm_handler (int sig)
+{
+ test_main_t *tm = &test_main;
+ tm->sigterm_received = 1;
+}
+
+/* Parse an IP4 address %d.%d.%d.%d. */
+uword
+unformat_ip4_address (unformat_input_t * input, va_list * args)
+{
+ u8 *result = va_arg (*args, u8 *);
+ unsigned a[4];
+
+ if (!unformat (input, "%d.%d.%d.%d", &a[0], &a[1], &a[2], &a[3]))
+ return 0;
+
+ if (a[0] >= 256 || a[1] >= 256 || a[2] >= 256 || a[3] >= 256)
+ return 0;
+
+ result[0] = a[0];
+ result[1] = a[1];
+ result[2] = a[2];
+ result[3] = a[3];
+
+ return 1;
+}
+
+int
+main (int argc, char **argv)
+{
+ api_main_t *am = &api_main;
+ test_main_t *tm = &test_main;
+ vl_api_vnet_get_summary_stats_t *mp;
+ unformat_input_t _input, *input = &_input;
+ clib_error_t *error = 0;
+ ip4_address_t collector_ip;
+ u8 *display_name = 0;
+ u16 collector_port = 7654;
+
+ collector_ip.as_u32 = (u32) ~ 0;
+
+ unformat_init_command_line (input, argv);
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "collector-ip %U",
+ unformat_ip4_address, &collector_ip))
+ ;
+ else if (unformat (input, "display-name %v", &display_name))
+ ;
+ else if (unformat (input, "collector-port %d", &collector_port))
+ ;
+ else
+ {
+ error =
+ clib_error_return
+ (0, "Usage: %s collector-ip <ip>\n"
+ " [display-name <string>] [collector-port <num>]\n"
+ " port defaults to 7654", argv[0]);
+ break;
+ }
+ }
+
+ if (error == 0 && collector_ip.as_u32 == (u32) ~ 0)
+ error = clib_error_return (0, "collector-ip not set...\n");
+
+
+ if (error)
+ {
+ clib_error_report (error);
+ exit (1);
+ }
+
+ if (display_name == 0)
+ {
+ display_name = format (0, "vpe-to-%d.%d.%d.%d",
+ collector_ip.as_u8[0],
+ collector_ip.as_u8[1],
+ collector_ip.as_u8[2], collector_ip.as_u8[3]);
+ }
+
+
+ connect_to_vpe ("test_client");
+
+ tm->vl_input_queue = shmem_hdr->vl_input_queue;
+ tm->my_client_index = am->my_client_index;
+ tm->display_name = display_name;
+
+ signal (SIGTERM, sigterm_handler);
+ signal (SIGINT, sigterm_handler);
+ signal (SIGQUIT, sigterm_handler);
+
+ /* data (multicast) RX socket */
+ tm->send_data_socket = socket (PF_INET, SOCK_DGRAM, IPPROTO_UDP);
+ if (tm->send_data_socket < 0)
+ {
+ clib_unix_warning (0, "data_rx_socket");
+ exit (1);
+ }
+
+ memset (&tm->send_data_addr, 0, sizeof (tm->send_data_addr));
+ tm->send_data_addr.sin_family = AF_INET;
+ tm->send_data_addr.sin_addr.s_addr = collector_ip.as_u32;
+ tm->send_data_addr.sin_port = htons (collector_port);
+
+ fformat (stdout, "Send SIGINT or SIGTERM to quit...\n");
+
+ while (1)
+ {
+ sleep (5);
+
+ if (tm->sigterm_received)
+ break;
+ /* Poll for stats */
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS);
+ mp->client_index = tm->my_client_index;
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+ }
+
+ fformat (stdout, "Exiting...\n");
+
+ disconnect_from_vpe ();
+ exit (0);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/api/test_client.c b/src/vpp/api/test_client.c
new file mode 100644
index 00000000..5c568950
--- /dev/null
+++ b/src/vpp/api/test_client.c
@@ -0,0 +1,1531 @@
+/*
+ *------------------------------------------------------------------
+ * api.c - message handler registration
+ *
+ * Copyright (c) 2010 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <netinet/in.h>
+#include <signal.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <time.h>
+#include <fcntl.h>
+#include <string.h>
+#include <vppinfra/clib.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/bitmap.h>
+#include <vppinfra/fifo.h>
+#include <vppinfra/time.h>
+#include <vppinfra/mheap.h>
+#include <vppinfra/heap.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/format.h>
+#include <vppinfra/error.h>
+
+#include <vnet/vnet.h>
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+#include <vpp/api/vpe_msg_enum.h>
+
+#include <vnet/ip/ip.h>
+#include <vnet/interface.h>
+
+#define f64_endian(a)
+#define f64_print(a,b)
+
+#define vl_typedefs /* define message structures */
+#include <vpp/api/vpe_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vpp/api/vpe_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...)
+#define vl_printfun
+#include <vpp/api/vpe_all_api_h.h>
+#undef vl_printfun
+
+vl_shmem_hdr_t *shmem_hdr;
+
+typedef struct
+{
+ int link_events_on;
+ int stats_on;
+ int oam_events_on;
+
+ /* convenience */
+ unix_shared_memory_queue_t *vl_input_queue;
+ u32 my_client_index;
+} test_main_t;
+
+test_main_t test_main;
+
+/*
+ * Satisfy external references when -lvlib is not available.
+ */
+vlib_main_t vlib_global_main;
+vlib_main_t **vlib_mains;
+
+void
+vlib_cli_output (struct vlib_main_t *vm, char *fmt, ...)
+{
+ clib_warning ("vlib_cli_output callled...");
+}
+
+u8 *
+format_ethernet_address (u8 * s, va_list * args)
+{
+ u8 *a = va_arg (*args, u8 *);
+
+ return format (s, "%02x:%02x:%02x:%02x:%02x:%02x",
+ a[0], a[1], a[2], a[3], a[4], a[5]);
+}
+
+static void
+vl_api_sw_interface_details_t_handler (vl_api_sw_interface_details_t * mp)
+{
+ char *duplex, *speed;
+
+ switch (mp->link_duplex << VNET_HW_INTERFACE_FLAG_DUPLEX_SHIFT)
+ {
+ case VNET_HW_INTERFACE_FLAG_HALF_DUPLEX:
+ duplex = "half";
+ break;
+ case VNET_HW_INTERFACE_FLAG_FULL_DUPLEX:
+ duplex = "full";
+ break;
+ default:
+ duplex = "bogus";
+ break;
+ }
+ switch (mp->link_speed << VNET_HW_INTERFACE_FLAG_SPEED_SHIFT)
+ {
+ case VNET_HW_INTERFACE_FLAG_SPEED_10M:
+ speed = "10Mbps";
+ break;
+ case VNET_HW_INTERFACE_FLAG_SPEED_100M:
+ speed = "100Mbps";
+ break;
+ case VNET_HW_INTERFACE_FLAG_SPEED_1G:
+ speed = "1Gbps";
+ break;
+ case VNET_HW_INTERFACE_FLAG_SPEED_10G:
+ speed = "10Gbps";
+ break;
+ case VNET_HW_INTERFACE_FLAG_SPEED_40G:
+ speed = "40Gbps";
+ break;
+ case VNET_HW_INTERFACE_FLAG_SPEED_100G:
+ speed = "100Gbps";
+ break;
+ default:
+ speed = "bogus";
+ break;
+ }
+ fformat (stdout, "details: %s sw_if_index %d sup_sw_if_index %d "
+ "link_duplex %s link_speed %s",
+ mp->interface_name, ntohl (mp->sw_if_index),
+ ntohl (mp->sup_sw_if_index), duplex, speed);
+
+ if (mp->l2_address_length)
+ fformat (stdout, " l2 address: %U\n",
+ format_ethernet_address, mp->l2_address);
+ else
+ fformat (stdout, "\n");
+}
+
+static void
+vl_api_sw_interface_set_flags_t_handler (vl_api_sw_interface_set_flags_t * mp)
+{
+ fformat (stdout, "set flags: sw_if_index %d, admin %s link %s\n",
+ ntohl (mp->sw_if_index),
+ mp->admin_up_down ? "up" : "down",
+ mp->link_up_down ? "up" : "down");
+}
+
+static void
+ vl_api_sw_interface_set_flags_reply_t_handler
+ (vl_api_sw_interface_set_flags_reply_t * mp)
+{
+ fformat (stdout, "set flags reply: reply %d\n", ntohl (mp->retval));
+}
+
+static void
+ vl_api_want_interface_events_reply_t_handler
+ (vl_api_want_interface_events_reply_t * mp)
+{
+}
+
+static void
+vl_api_want_stats_reply_t_handler (vl_api_want_stats_reply_t * mp)
+{
+ fformat (stdout, "want stats reply %d\n", ntohl (mp->retval));
+}
+
+static void
+vl_api_want_oam_events_reply_t_handler (vl_api_want_oam_events_reply_t * mp)
+{
+ fformat (stdout, "want oam reply %d\n", ntohl (mp->retval));
+}
+
+static void
+vl_api_ip_add_del_route_reply_t_handler (vl_api_ip_add_del_route_reply_t * mp)
+{
+ fformat (stdout, "add_route reply %d\n", ntohl (mp->retval));
+}
+
+static void
+ vl_api_sw_interface_add_del_address_reply_t_handler
+ (vl_api_sw_interface_add_del_address_reply_t * mp)
+{
+ fformat (stdout, "add_del_address reply %d\n", ntohl (mp->retval));
+}
+
+static void
+ vl_api_sw_interface_set_table_reply_t_handler
+ (vl_api_sw_interface_set_table_reply_t * mp)
+{
+ fformat (stdout, "set_table reply %d\n", ntohl (mp->retval));
+}
+
+static void
+vl_api_tap_connect_reply_t_handler (vl_api_tap_connect_reply_t * mp)
+{
+ fformat (stdout, "tap connect reply %d, sw_if_index %d\n",
+ ntohl (mp->retval), ntohl (mp->sw_if_index));
+}
+
+static void
+vl_api_create_vlan_subif_reply_t_handler (vl_api_create_vlan_subif_reply_t *
+ mp)
+{
+ fformat (stdout, "create vlan subif reply %d, sw_if_index %d\n",
+ ntohl (mp->retval), ntohl (mp->sw_if_index));
+}
+
+static void vl_api_proxy_arp_add_del_reply_t_handler
+ (vl_api_proxy_arp_add_del_reply_t * mp)
+{
+ fformat (stdout, "add del proxy arp reply %d\n", ntohl (mp->retval));
+}
+
+static void vl_api_proxy_arp_intfc_enable_disable_reply_t_handler
+ (vl_api_proxy_arp_intfc_enable_disable_reply_t * mp)
+{
+ fformat (stdout, "proxy arp intfc ena/dis reply %d\n", ntohl (mp->retval));
+}
+
+static void vl_api_ip_neighbor_add_del_reply_t_handler
+ (vl_api_ip_neighbor_add_del_reply_t * mp)
+{
+ fformat (stdout, "ip neighbor add del reply %d\n", ntohl (mp->retval));
+}
+
+static void
+vl_api_vnet_interface_counters_t_handler (vl_api_vnet_interface_counters_t *
+ mp)
+{
+ char *counter_name;
+ u32 count, sw_if_index;
+ int i;
+
+ count = ntohl (mp->count);
+ sw_if_index = ntohl (mp->first_sw_if_index);
+ if (mp->is_combined == 0)
+ {
+ u64 *vp, v;
+ vp = (u64 *) mp->data;
+
+ switch (mp->vnet_counter_type)
+ {
+ case VNET_INTERFACE_COUNTER_DROP:
+ counter_name = "drop";
+ break;
+ case VNET_INTERFACE_COUNTER_PUNT:
+ counter_name = "punt";
+ break;
+ case VNET_INTERFACE_COUNTER_IP4:
+ counter_name = "ip4";
+ break;
+ case VNET_INTERFACE_COUNTER_IP6:
+ counter_name = "ip6";
+ break;
+ case VNET_INTERFACE_COUNTER_RX_NO_BUF:
+ counter_name = "rx-no-buf";
+ break;
+ case VNET_INTERFACE_COUNTER_RX_MISS:
+ counter_name = "rx-miss";
+ break;
+ case VNET_INTERFACE_COUNTER_RX_ERROR:
+ counter_name = "rx-error";
+ break;
+ case VNET_INTERFACE_COUNTER_TX_ERROR:
+ counter_name = "tx-error (fifo-full)";
+ break;
+ default:
+ counter_name = "bogus";
+ break;
+ }
+ for (i = 0; i < count; i++)
+ {
+ v = clib_mem_unaligned (vp, u64);
+ v = clib_net_to_host_u64 (v);
+ vp++;
+ fformat (stdout, "%d.%s %lld\n", sw_if_index, counter_name, v);
+ sw_if_index++;
+ }
+ }
+ else
+ {
+ vlib_counter_t *vp;
+ u64 packets, bytes;
+ vp = (vlib_counter_t *) mp->data;
+
+ switch (mp->vnet_counter_type)
+ {
+ case VNET_INTERFACE_COUNTER_RX:
+ counter_name = "rx";
+ break;
+ case VNET_INTERFACE_COUNTER_TX:
+ counter_name = "tx";
+ break;
+ default:
+ counter_name = "bogus";
+ break;
+ }
+ for (i = 0; i < count; i++)
+ {
+ packets = clib_mem_unaligned (&vp->packets, u64);
+ packets = clib_net_to_host_u64 (packets);
+ bytes = clib_mem_unaligned (&vp->bytes, u64);
+ bytes = clib_net_to_host_u64 (bytes);
+ vp++;
+ fformat (stdout, "%d.%s.packets %lld\n",
+ sw_if_index, counter_name, packets);
+ fformat (stdout, "%d.%s.bytes %lld\n",
+ sw_if_index, counter_name, bytes);
+ sw_if_index++;
+ }
+ }
+}
+
+/* Format an IP4 address. */
+u8 *
+format_ip4_address (u8 * s, va_list * args)
+{
+ u8 *a = va_arg (*args, u8 *);
+ return format (s, "%d.%d.%d.%d", a[0], a[1], a[2], a[3]);
+}
+
+/* Format an IP4 route destination and length. */
+u8 *
+format_ip4_address_and_length (u8 * s, va_list * args)
+{
+ u8 *a = va_arg (*args, u8 *);
+ u8 l = va_arg (*args, u32);
+ return format (s, "%U/%d", format_ip4_address, a, l);
+}
+
+static void
+vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
+{
+ int i;
+ vl_api_ip4_fib_counter_t *ctrp;
+ u32 count;
+
+ count = ntohl (mp->count);
+
+ fformat (stdout, "fib id %d, count this msg %d\n",
+ ntohl (mp->vrf_id), count);
+
+ ctrp = mp->c;
+ for (i = 0; i < count; i++)
+ {
+ fformat (stdout, "%U: %lld packets, %lld bytes\n",
+ format_ip4_address_and_length, &ctrp->address,
+ (u32) ctrp->address_length,
+ clib_net_to_host_u64 (ctrp->packets),
+ clib_net_to_host_u64 (ctrp->bytes));
+ ctrp++;
+ }
+}
+
+/* Format an IP6 address. */
+u8 *
+format_ip6_address (u8 * s, va_list * args)
+{
+ ip6_address_t *a = va_arg (*args, ip6_address_t *);
+ u32 i, i_max_n_zero, max_n_zeros, i_first_zero, n_zeros, last_double_colon;
+
+ i_max_n_zero = ARRAY_LEN (a->as_u16);
+ max_n_zeros = 0;
+ i_first_zero = i_max_n_zero;
+ n_zeros = 0;
+ for (i = 0; i < ARRAY_LEN (a->as_u16); i++)
+ {
+ u32 is_zero = a->as_u16[i] == 0;
+ if (is_zero && i_first_zero >= ARRAY_LEN (a->as_u16))
+ {
+ i_first_zero = i;
+ n_zeros = 0;
+ }
+ n_zeros += is_zero;
+ if ((!is_zero && n_zeros > max_n_zeros)
+ || (i + 1 >= ARRAY_LEN (a->as_u16) && n_zeros > max_n_zeros))
+ {
+ i_max_n_zero = i_first_zero;
+ max_n_zeros = n_zeros;
+ i_first_zero = ARRAY_LEN (a->as_u16);
+ n_zeros = 0;
+ }
+ }
+
+ last_double_colon = 0;
+ for (i = 0; i < ARRAY_LEN (a->as_u16); i++)
+ {
+ if (i == i_max_n_zero && max_n_zeros > 1)
+ {
+ s = format (s, "::");
+ i += max_n_zeros - 1;
+ last_double_colon = 1;
+ }
+ else
+ {
+ s = format (s, "%s%x",
+ (last_double_colon || i == 0) ? "" : ":",
+ clib_net_to_host_u16 (a->as_u16[i]));
+ last_double_colon = 0;
+ }
+ }
+
+ return s;
+}
+
+/* Format an IP6 route destination and length. */
+u8 *
+format_ip6_address_and_length (u8 * s, va_list * args)
+{
+ ip6_address_t *a = va_arg (*args, ip6_address_t *);
+ u8 l = va_arg (*args, u32);
+ return format (s, "%U/%d", format_ip6_address, a, l);
+}
+
+static void
+vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
+{
+ int i;
+ vl_api_ip6_fib_counter_t *ctrp;
+ u32 count;
+
+ count = ntohl (mp->count);
+
+ fformat (stdout, "fib id %d, count this msg %d\n",
+ ntohl (mp->vrf_id), count);
+
+ ctrp = mp->c;
+ for (i = 0; i < count; i++)
+ {
+ fformat (stdout, "%U: %lld packets, %lld bytes\n",
+ format_ip6_address_and_length, &ctrp->address,
+ (u32) ctrp->address_length,
+ clib_net_to_host_u64 (ctrp->packets),
+ clib_net_to_host_u64 (ctrp->bytes));
+ ctrp++;
+ }
+}
+
+static void
+vl_api_oam_event_t_handler (vl_api_oam_event_t * mp)
+{
+ fformat (stdout, "OAM: %U now %s\n",
+ format_ip4_address, &mp->dst_address,
+ mp->state == 1 ? "alive" : "dead");
+}
+
+static void
+vl_api_oam_add_del_reply_t_handler (vl_api_oam_add_del_reply_t * mp)
+{
+ fformat (stdout, "oam add del reply %d\n", ntohl (mp->retval));
+}
+
+static void
+vl_api_reset_fib_reply_t_handler (vl_api_reset_fib_reply_t * mp)
+{
+ fformat (stdout, "fib reset reply %d\n", ntohl (mp->retval));
+}
+
+static void
+vl_api_dhcp_proxy_set_vss_reply_t_handler (vl_api_dhcp_proxy_set_vss_reply_t *
+ mp)
+{
+ fformat (stdout, "dhcp proxy set vss reply %d\n", ntohl (mp->retval));
+}
+
+static void
+vl_api_dhcp_proxy_config_reply_t_handler (vl_api_dhcp_proxy_config_reply_t *
+ mp)
+{
+ fformat (stdout, "dhcp proxy config reply %d\n", ntohl (mp->retval));
+}
+
+static void
+vl_api_set_ip_flow_hash_reply_t_handler (vl_api_set_ip_flow_hash_reply_t * mp)
+{
+ fformat (stdout, "set ip flow hash reply %d\n", ntohl (mp->retval));
+}
+
+static void
+ vl_api_sw_interface_ip6nd_ra_config_reply_t_handler
+ (vl_api_sw_interface_ip6nd_ra_config_reply_t * mp)
+{
+ fformat (stdout, "ip6 nd ra-config reply %d\n", ntohl (mp->retval));
+}
+
+static void
+ vl_api_sw_interface_ip6nd_ra_prefix_reply_t_handler
+ (vl_api_sw_interface_ip6nd_ra_prefix_reply_t * mp)
+{
+ fformat (stdout, "ip6 nd ra-prefix reply %d\n", ntohl (mp->retval));
+}
+
+static void
+ vl_api_sw_interface_ip6_enable_disable_reply_t_handler
+ (vl_api_sw_interface_ip6_enable_disable_reply_t * mp)
+{
+ fformat (stdout, "ip6 enable/disable reply %d\n", ntohl (mp->retval));
+}
+
+static void
+ vl_api_sw_interface_ip6_set_link_local_address_reply_t_handler
+ (vl_api_sw_interface_ip6_set_link_local_address_reply_t * mp)
+{
+ fformat (stdout, "ip6 set link-local address reply %d\n",
+ ntohl (mp->retval));
+}
+
+static void vl_api_create_loopback_reply_t_handler
+ (vl_api_create_loopback_reply_t * mp)
+{
+ fformat (stdout, "create loopback status %d, sw_if_index %d\n",
+ ntohl (mp->retval), ntohl (mp->sw_if_index));
+}
+
+static void
+vl_api_sr_tunnel_add_del_reply_t_handler (vl_api_sr_tunnel_add_del_reply_t *
+ mp)
+{
+ fformat (stdout, "sr tunnel add/del reply %d\n", ntohl (mp->retval));
+}
+
+static void vl_api_l2_patch_add_del_reply_t_handler
+ (vl_api_l2_patch_add_del_reply_t * mp)
+{
+ fformat (stdout, "l2 patch reply %d\n", ntohl (mp->retval));
+}
+
+static void vl_api_sw_interface_set_l2_xconnect_reply_t_handler
+ (vl_api_sw_interface_set_l2_xconnect_reply_t * mp)
+{
+ fformat (stdout, "l2_xconnect reply %d\n", ntohl (mp->retval));
+}
+
+static void vl_api_sw_interface_set_l2_bridge_reply_t_handler
+ (vl_api_sw_interface_set_l2_bridge_reply_t * mp)
+{
+ fformat (stdout, "l2_bridge reply %d\n", ntohl (mp->retval));
+}
+
+static void
+noop_handler (void *notused)
+{
+}
+
+#define vl_api_vnet_ip4_fib_counters_t_endian noop_handler
+#define vl_api_vnet_ip4_fib_counters_t_print noop_handler
+#define vl_api_vnet_ip6_fib_counters_t_endian noop_handler
+#define vl_api_vnet_ip6_fib_counters_t_print noop_handler
+
+#define foreach_api_msg \
+_(SW_INTERFACE_DETAILS, sw_interface_details) \
+_(SW_INTERFACE_SET_FLAGS, sw_interface_set_flags) \
+_(SW_INTERFACE_SET_FLAGS_REPLY, sw_interface_set_flags_reply) \
+_(WANT_INTERFACE_EVENTS_REPLY, want_interface_events_reply) \
+_(WANT_STATS_REPLY, want_stats_reply) \
+_(WANT_OAM_EVENTS_REPLY, want_oam_events_reply) \
+_(OAM_EVENT, oam_event) \
+_(OAM_ADD_DEL_REPLY, oam_add_del_reply) \
+_(VNET_INTERFACE_COUNTERS, vnet_interface_counters) \
+_(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
+_(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
+_(IP_ADD_DEL_ROUTE_REPLY, ip_add_del_route_reply) \
+_(SW_INTERFACE_ADD_DEL_ADDRESS_REPLY, sw_interface_add_del_address_reply) \
+_(SW_INTERFACE_SET_TABLE_REPLY, sw_interface_set_table_reply) \
+_(TAP_CONNECT_REPLY, tap_connect_reply) \
+_(CREATE_VLAN_SUBIF_REPLY, create_vlan_subif_reply) \
+_(PROXY_ARP_ADD_DEL_REPLY, proxy_arp_add_del_reply) \
+_(PROXY_ARP_INTFC_ENABLE_DISABLE_REPLY, proxy_arp_intfc_enable_disable_reply) \
+_(IP_NEIGHBOR_ADD_DEL_REPLY, ip_neighbor_add_del_reply) \
+_(RESET_FIB_REPLY, reset_fib_reply) \
+_(DHCP_PROXY_CONFIG_REPLY, dhcp_proxy_config_reply) \
+_(DHCP_PROXY_SET_VSS_REPLY, dhcp_proxy_set_vss_reply) \
+_(SET_IP_FLOW_HASH_REPLY, set_ip_flow_hash_reply) \
+_(SW_INTERFACE_IP6ND_RA_CONFIG_REPLY, sw_interface_ip6nd_ra_config_reply) \
+_(SW_INTERFACE_IP6ND_RA_PREFIX_REPLY, sw_interface_ip6nd_ra_prefix_reply) \
+_(SW_INTERFACE_IP6_ENABLE_DISABLE_REPLY, sw_interface_ip6_enable_disable_reply) \
+_(SW_INTERFACE_IP6_SET_LINK_LOCAL_ADDRESS_REPLY, sw_interface_ip6_set_link_local_address_reply) \
+ _(CREATE_LOOPBACK_REPLY, create_loopback_reply) \
+_(L2_PATCH_ADD_DEL_REPLY, l2_patch_add_del_reply) \
+_(SR_TUNNEL_ADD_DEL_REPLY,sr_tunnel_add_del_reply) \
+_(SW_INTERFACE_SET_L2_XCONNECT_REPLY, sw_interface_set_l2_xconnect_reply) \
+_(SW_INTERFACE_SET_L2_BRIDGE_REPLY, sw_interface_set_l2_bridge_reply)
+
+int
+connect_to_vpe (char *name)
+{
+ int rv = 0;
+
+ rv = vl_client_connect_to_vlib ("/vpe-api", name, 32);
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_api_msg;
+#undef _
+
+ shmem_hdr = api_main.shmem_hdr;
+
+ return rv;
+}
+
+int
+disconnect_from_vpe (void)
+{
+ vl_client_disconnect_from_vlib ();
+ return 0;
+}
+
+void
+link_up_down_enable_disable (test_main_t * tm, int enable)
+{
+ vl_api_want_interface_events_t *mp;
+
+ /* Request admin / link up down messages */
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_EVENTS);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->enable_disable = enable;
+ mp->pid = getpid ();
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+ tm->link_events_on = enable;
+}
+
+void
+stats_enable_disable (test_main_t * tm, int enable)
+{
+ vl_api_want_stats_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_WANT_STATS);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->enable_disable = enable;
+ mp->pid = getpid ();
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+ tm->stats_on = enable;
+}
+
+void
+oam_events_enable_disable (test_main_t * tm, int enable)
+{
+ vl_api_want_oam_events_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_WANT_OAM_EVENTS);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->enable_disable = enable;
+ mp->pid = getpid ();
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+ tm->oam_events_on = enable;
+}
+
+void
+oam_add_del (test_main_t * tm, int is_add)
+{
+ vl_api_oam_add_del_t *mp;
+ ip4_address_t tmp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_OAM_ADD_DEL);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->is_add = is_add;
+
+ tmp.as_u32 = ntohl (0xc0a80101); /* 192.168.1.1 */
+ clib_memcpy (mp->src_address, tmp.as_u8, 4);
+
+ tmp.as_u32 = ntohl (0xc0a80103); /* 192.168.1.3 */
+ clib_memcpy (mp->dst_address, tmp.as_u8, 4);
+
+ mp->vrf_id = 0;
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+dump (test_main_t * tm)
+{
+ vl_api_sw_interface_dump_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_DUMP);
+ mp->client_index = tm->my_client_index;
+ mp->name_filter_valid = 1;
+ strncpy ((char *) mp->name_filter, "eth", sizeof (mp->name_filter) - 1);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+add_del_ip4_route (test_main_t * tm, int enable_disable)
+{
+ vl_api_ip_add_del_route_t *mp;
+ u32 tmp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_IP_ADD_DEL_ROUTE);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->table_id = ntohl (0);
+ mp->create_vrf_if_needed = 1;
+
+ mp->next_hop_sw_if_index = ntohl (5);
+ mp->is_add = enable_disable;
+ mp->next_hop_weight = 1;
+
+ /* Next hop: 6.0.0.1 */
+ tmp = ntohl (0x06000001);
+ clib_memcpy (mp->next_hop_address, &tmp, sizeof (tmp));
+
+ /* Destination: 10.0.0.1/32 */
+ tmp = ntohl (0x0);
+ clib_memcpy (mp->dst_address, &tmp, sizeof (tmp));
+ mp->dst_address_length = 0;
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+add_del_ip6_route (test_main_t * tm, int enable_disable)
+{
+ vl_api_ip_add_del_route_t *mp;
+ u64 tmp[2];
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_IP_ADD_DEL_ROUTE);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->next_hop_sw_if_index = ntohl (5);
+ mp->is_add = enable_disable;
+ mp->is_ipv6 = 1;
+ mp->next_hop_weight = 1;
+ mp->dst_address_length = 64;
+
+ /* add/del dabe::/64 via db01::11 */
+
+ tmp[0] = clib_host_to_net_u64 (0xdabe000000000000ULL);
+ tmp[1] = clib_host_to_net_u64 (0x0ULL);
+ clib_memcpy (mp->dst_address, &tmp[0], 8);
+ clib_memcpy (&mp->dst_address[8], &tmp[1], 8);
+
+ tmp[0] = clib_host_to_net_u64 (0xdb01000000000000ULL);
+ tmp[1] = clib_host_to_net_u64 (0x11ULL);
+ clib_memcpy (mp->next_hop_address, &tmp[0], 8);
+ clib_memcpy (&mp->next_hop_address[8], &tmp[1], 8);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+add_del_interface_address (test_main_t * tm, int enable_disable)
+{
+ vl_api_sw_interface_add_del_address_t *mp;
+ u32 tmp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_ADD_DEL_ADDRESS);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl (5);
+ mp->is_add = enable_disable;
+ mp->address_length = 8;
+
+ tmp = ntohl (0x01020304);
+ clib_memcpy (mp->address, &tmp, 4);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+add_del_v6_interface_address (test_main_t * tm, int enable_disable)
+{
+ vl_api_sw_interface_add_del_address_t *mp;
+ u64 tmp[2];
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_ADD_DEL_ADDRESS);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->is_ipv6 = 1;
+ mp->sw_if_index = ntohl (5);
+ mp->is_add = enable_disable;
+ mp->address_length = 64;
+
+ tmp[0] = clib_host_to_net_u64 (0xdb01000000000000ULL);
+ tmp[1] = clib_host_to_net_u64 (0x11ULL);
+
+ clib_memcpy (mp->address, &tmp[0], 8);
+ clib_memcpy (&mp->address[8], &tmp[1], 8);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+del_all_interface_addresses (test_main_t * tm)
+{
+ vl_api_sw_interface_add_del_address_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_ADD_DEL_ADDRESS);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl (5);
+ mp->del_all = 1;
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+set_interface_table (test_main_t * tm, int is_ipv6, u32 vrf_id)
+{
+ vl_api_sw_interface_set_table_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_SET_TABLE);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl (5);
+ mp->is_ipv6 = is_ipv6;
+ mp->vrf_id = ntohl (vrf_id);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+connect_unix_tap (test_main_t * tm, char *name)
+{
+ vl_api_tap_connect_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_TAP_CONNECT);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ strncpy ((char *) mp->tap_name, name, sizeof (mp->tap_name) - 1);
+ mp->use_random_mac = 1;
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+create_vlan_subif (test_main_t * tm, u32 vlan_id)
+{
+ vl_api_create_vlan_subif_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_CREATE_VLAN_SUBIF);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl (5);
+ mp->vlan_id = ntohl (vlan_id);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+add_del_proxy_arp (test_main_t * tm, int is_add)
+{
+ vl_api_proxy_arp_add_del_t *mp;
+ u32 tmp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_PROXY_ARP_ADD_DEL);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->vrf_id = ntohl (11);
+ mp->is_add = is_add;
+
+ /* proxy fib 11, 1.1.1.1 -> 1.1.1.10 */
+ tmp = ntohl (0x01010101);
+ clib_memcpy (mp->low_address, &tmp, 4);
+
+ tmp = ntohl (0x0101010a);
+ clib_memcpy (mp->hi_address, &tmp, 4);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+proxy_arp_intfc_enable_disable (test_main_t * tm, int enable_disable)
+{
+ vl_api_proxy_arp_intfc_enable_disable_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_PROXY_ARP_INTFC_ENABLE_DISABLE);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl (6);
+ mp->enable_disable = enable_disable;
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+add_ip4_neighbor (test_main_t * tm, int add_del)
+{
+ vl_api_ip_neighbor_add_del_t *mp;
+ u32 tmp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_IP_NEIGHBOR_ADD_DEL);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->vrf_id = ntohl (11);
+ mp->sw_if_index = ntohl (6);
+ mp->is_add = add_del;
+
+ memset (mp->mac_address, 0xbe, sizeof (mp->mac_address));
+
+ tmp = ntohl (0x0101010a);
+ clib_memcpy (mp->dst_address, &tmp, 4);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+add_ip6_neighbor (test_main_t * tm, int add_del)
+{
+ vl_api_ip_neighbor_add_del_t *mp;
+ u64 tmp[2];
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_IP_NEIGHBOR_ADD_DEL);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->vrf_id = ntohl (11);
+ mp->sw_if_index = ntohl (6);
+ mp->is_add = add_del;
+ mp->is_ipv6 = 1;
+
+ memset (mp->mac_address, 0xbe, sizeof (mp->mac_address));
+
+ tmp[0] = clib_host_to_net_u64 (0xdb01000000000000ULL);
+ tmp[1] = clib_host_to_net_u64 (0x11ULL);
+
+ clib_memcpy (mp->dst_address, &tmp[0], 8);
+ clib_memcpy (&mp->dst_address[8], &tmp[1], 8);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+reset_fib (test_main_t * tm, u8 is_ip6)
+{
+ vl_api_reset_fib_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_RESET_FIB);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->vrf_id = ntohl (11);
+ mp->is_ipv6 = is_ip6;
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+dhcpv6_set_vss (test_main_t * tm)
+{
+ vl_api_dhcp_proxy_set_vss_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_DHCP_PROXY_SET_VSS);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->oui = ntohl (6);
+ mp->fib_id = ntohl (60);
+ mp->is_add = 1;
+ mp->is_ipv6 = 1;
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+dhcpv4_set_vss (test_main_t * tm)
+{
+ vl_api_dhcp_proxy_set_vss_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_DHCP_PROXY_SET_VSS);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->oui = ntohl (4);
+ mp->fib_id = ntohl (40);
+ mp->is_add = 1;
+ mp->is_ipv6 = 0;
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+dhcp_set_vss (test_main_t * tm)
+{
+ dhcpv4_set_vss (tm);
+ dhcpv6_set_vss (tm);
+}
+
+void
+dhcp_set_proxy (test_main_t * tm, int ipv6)
+{
+ vl_api_dhcp_proxy_config_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_DHCP_PROXY_CONFIG);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->vrf_id = ntohl (0);
+ mp->is_ipv6 = ipv6;
+ mp->insert_circuit_id = 1;
+ mp->is_add = 1;
+ mp->dhcp_server[0] = 0x20;
+ mp->dhcp_server[1] = 0x01;
+ mp->dhcp_server[2] = 0xab;
+ mp->dhcp_server[3] = 0xcd;
+ mp->dhcp_server[4] = 0x12;
+ mp->dhcp_server[5] = 0x34;
+ mp->dhcp_server[6] = 0xfe;
+ mp->dhcp_server[7] = 0xdc;
+ mp->dhcp_server[14] = 0;
+ mp->dhcp_server[15] = 0x2;
+
+ mp->dhcp_src_address[0] = 0x20;
+ mp->dhcp_src_address[1] = 0x01;
+ mp->dhcp_src_address[2] = 0xab;
+ mp->dhcp_src_address[3] = 0xcd;
+ mp->dhcp_src_address[4] = 0x12;
+ mp->dhcp_src_address[5] = 0x34;
+ mp->dhcp_src_address[6] = 0x56;
+ mp->dhcp_src_address[7] = 0x78;
+ mp->dhcp_src_address[14] = 0;
+ mp->dhcp_src_address[15] = 0x2;
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+set_ip_flow_hash (test_main_t * tm, u8 is_ip6)
+{
+ vl_api_set_ip_flow_hash_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SET_IP_FLOW_HASH);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->vrf_id = 0;
+ mp->is_ipv6 = is_ip6;
+ mp->dst = 1;
+ mp->reverse = 1;
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+ip6nd_ra_config (test_main_t * tm, int is_no)
+{
+ vl_api_sw_interface_ip6nd_ra_config_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl (5);
+ mp->is_no = is_no;
+
+ mp->suppress = 1;
+
+
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_IP6ND_RA_CONFIG);
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+ip6nd_ra_prefix (test_main_t * tm, int is_no)
+{
+ vl_api_sw_interface_ip6nd_ra_prefix_t *mp;
+ u64 tmp[2];
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl (5);
+ mp->is_no = is_no;
+
+ mp->use_default = 1;
+
+
+ tmp[0] = clib_host_to_net_u64 (0xdb01000000000000ULL);
+ tmp[1] = clib_host_to_net_u64 (0x11ULL);
+
+
+ clib_memcpy (mp->address, &tmp[0], 8);
+ clib_memcpy (&mp->address[8], &tmp[1], 8);
+
+ mp->address_length = 64;
+
+
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_IP6ND_RA_PREFIX);
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+ip6_enable_disable (test_main_t * tm, int enable)
+{
+ vl_api_sw_interface_ip6_enable_disable_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl (5);
+ mp->enable = (enable == 1);;
+
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_IP6_ENABLE_DISABLE);
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+loop_create (test_main_t * tm)
+{
+ vl_api_create_loopback_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+
+ mp->_vl_msg_id = ntohs (VL_API_CREATE_LOOPBACK);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+ip6_set_link_local_address (test_main_t * tm)
+{
+ vl_api_sw_interface_ip6_set_link_local_address_t *mp;
+ u64 tmp[2];
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl (5);
+
+ tmp[0] = clib_host_to_net_u64 (0xfe80000000000000ULL);
+ tmp[1] = clib_host_to_net_u64 (0x11ULL);
+
+ clib_memcpy (mp->address, &tmp[0], 8);
+ clib_memcpy (&mp->address[8], &tmp[1], 8);
+
+ mp->address_length = 64;
+
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_IP6_SET_LINK_LOCAL_ADDRESS);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+
+void
+set_flags (test_main_t * tm, int up_down)
+{
+ vl_api_sw_interface_set_flags_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_SET_FLAGS);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->sw_if_index = ntohl (5);
+ mp->admin_up_down = up_down;
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+
+}
+
+void
+l2_patch_add_del (test_main_t * tm, int is_add)
+{
+ vl_api_l2_patch_add_del_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_L2_PATCH_ADD_DEL);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->is_add = is_add;
+ mp->rx_sw_if_index = ntohl (1);
+ mp->tx_sw_if_index = ntohl (2);
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+l2_xconnect (test_main_t * tm)
+{
+ vl_api_sw_interface_set_l2_xconnect_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_SET_L2_XCONNECT);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->rx_sw_if_index = ntohl (5);
+ mp->tx_sw_if_index = ntohl (6);
+ mp->enable = 1;
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+l2_bridge (test_main_t * tm)
+{
+ vl_api_sw_interface_set_l2_bridge_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_SET_L2_BRIDGE);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+ mp->rx_sw_if_index = ntohl (5);
+ mp->bd_id = ntohl (6);
+ mp->bvi = ntohl (1);
+ mp->shg = ntohl (0);
+ mp->enable = 1;
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+int
+main (int argc, char **argv)
+{
+ api_main_t *am = &api_main;
+ test_main_t *tm = &test_main;
+ int ch;
+
+ connect_to_vpe ("test_client");
+
+ tm->vl_input_queue = shmem_hdr->vl_input_queue;
+ tm->my_client_index = am->my_client_index;
+
+ fformat (stdout, "Type 'h' for help, 'q' to quit...\n");
+
+ while (1)
+ {
+ ch = getchar ();
+ switch (ch)
+ {
+ case 'q':
+ goto done;
+ case 'd':
+ dump (tm);
+ break;
+ case 'L':
+ link_up_down_enable_disable (tm, 1 /* enable_disable */ );
+ break;
+ case 'l':
+ link_up_down_enable_disable (tm, 0 /* enable_disable */ );
+ break;
+ case 'S':
+ stats_enable_disable (tm, 1 /* enable_disable */ );
+ break;
+ case 's':
+ stats_enable_disable (tm, 0 /* enable_disable */ );
+ break;
+ case '3':
+ add_del_ip4_route (tm, 0 /* add */ );
+ break;
+ case '4':
+ add_del_ip4_route (tm, 1 /* add */ );
+ break;
+ case '5':
+ add_del_ip6_route (tm, 0 /* add */ );
+ break;
+ case '6':
+ add_del_ip6_route (tm, 1 /* add */ );
+ break;
+ case 'A':
+ add_del_interface_address (tm, 1 /* add */ );
+ break;
+ case 'a':
+ add_del_interface_address (tm, 0 /* add */ );
+ break;
+ case 'B':
+ add_del_v6_interface_address (tm, 1 /* add */ );
+ break;
+ case 'b':
+ add_del_v6_interface_address (tm, 0 /* add */ );
+ break;
+ case 'E':
+ l2_patch_add_del (tm, 1 /* is_add */ );
+ break;
+ case 'e':
+ l2_patch_add_del (tm, 0 /* is_add */ );
+ break;
+ case 'z':
+ del_all_interface_addresses (tm);
+ break;
+ case 't':
+ set_interface_table (tm, 0 /* is_ipv6 */ ,
+ 11 /* my amp goes to 11 */ );
+ break;
+ case 'T':
+ set_interface_table (tm, 1 /* is_ipv6 */ ,
+ 12 /* my amp goes to 12 */ );
+ break;
+
+ case 'u':
+ create_vlan_subif (tm, 123);
+ break;
+
+ case 'c':
+ connect_unix_tap (tm, "foo");
+ break;
+
+ case 'n':
+ add_ip4_neighbor (tm, 1 /* is_add */ );
+ add_ip6_neighbor (tm, 1 /* is_add */ );
+ break;
+
+ case 'N':
+ add_ip4_neighbor (tm, 0 /* is_add */ );
+ add_ip6_neighbor (tm, 0 /* is_add */ );
+ break;
+
+ case 'p':
+ add_del_proxy_arp (tm, 1 /* add */ );
+ break;
+
+ case 'i':
+ proxy_arp_intfc_enable_disable (tm, 1 /* enable */ );
+ break;
+
+ case 'O':
+ oam_events_enable_disable (tm, 0 /* enable */ );
+ break;
+
+ case 'o':
+ oam_events_enable_disable (tm, 1 /* enable */ );
+ break;
+
+ case '0':
+ oam_add_del (tm, 0 /* is_add */ );
+ break;
+
+ case '1':
+ oam_add_del (tm, 1 /* is_add */ );
+ break;
+
+ case 'r':
+ reset_fib (tm, 0 /* is_ip6 */ );
+ break;
+
+ case 'R':
+ reset_fib (tm, 1 /* is_ip6 */ );
+ break;
+
+ case 'j':
+ dhcp_set_vss (tm);
+ break;
+
+ case 'k':
+ dhcp_set_proxy (tm, 0);
+ break;
+
+ case 'K':
+ dhcp_set_proxy (tm, 1 /*ipv6 */ );
+ break;
+
+ case 'v':
+ set_ip_flow_hash (tm, 0 /* is_ip6 */ );
+ break;
+
+ case 'V':
+ ip6_set_link_local_address (tm);
+ break;
+
+ case 'w':
+ ip6_enable_disable (tm, 1 /* enable */ );
+ break;
+
+ case 'W':
+ ip6_enable_disable (tm, 0 /* disable */ );
+ break;
+
+ case 'x':
+ ip6nd_ra_config (tm, 0 /* is_no */ );
+ break;
+ case 'X':
+ ip6nd_ra_config (tm, 1 /* is_no */ );
+ break;
+ case 'y':
+ ip6nd_ra_prefix (tm, 0 /* is_no */ );
+ break;
+ case 'Y':
+ ip6nd_ra_prefix (tm, 1 /* is_no */ );
+ break;
+
+ case '7':
+ loop_create (tm);
+ break;
+
+ case 'F':
+ set_flags (tm, 1 /* up_down */ );
+ break;
+
+ case 'f':
+ set_flags (tm, 0 /* up_down */ );
+ break;
+
+ case '@':
+ l2_xconnect (tm);
+ break;
+
+ case '#':
+ l2_bridge (tm);
+ break;
+
+ case 'h':
+ fformat (stdout, "q=quit,d=dump,L=link evts on,l=link evts off\n");
+ fformat (stdout, "S=stats on,s=stats off\n");
+ fformat (stdout, "4=add v4 route, 3=del v4 route\n");
+ fformat (stdout, "6=add v6 route, 5=del v6 route\n");
+ fformat (stdout, "A=add v4 intfc route, a=del v4 intfc route\n");
+ fformat (stdout, "B=add v6 intfc route, b=del v6 intfc route\n");
+ fformat (stdout, "z=del all intfc routes\n");
+ fformat (stdout, "t=set v4 intfc table, T=set v6 intfc table\n");
+ fformat (stdout, "c=connect unix tap\n");
+ fformat (stdout,
+ "j=set dhcpv4 and v6 link-address/option-82 params\n");
+ fformat (stdout, "k=set dhcpv4 relay agent params\n");
+ fformat (stdout, "K=set dhcpv6 relay agent params\n");
+ fformat (stdout, "E=add l2 patch, e=del l2 patch\n");
+ fformat (stdout, "V=ip6 set link-local address \n");
+ fformat (stdout, "w=ip6 enable \n");
+ fformat (stdout, "W=ip6 disable \n");
+ fformat (stdout, "x=ip6 nd config \n");
+ fformat (stdout, "X=no ip6 nd config\n");
+ fformat (stdout, "y=ip6 nd prefix \n");
+ fformat (stdout, "Y=no ip6 nd prefix\n");
+ fformat (stdout, "@=l2 xconnect\n");
+ fformat (stdout, "#=l2 bridge\n");
+
+ default:
+ break;
+ }
+
+ }
+
+done:
+
+ if (tm->link_events_on)
+ link_up_down_enable_disable (tm, 0 /* enable */ );
+ if (tm->stats_on)
+ stats_enable_disable (tm, 0 /* enable */ );
+ if (tm->oam_events_on)
+ oam_events_enable_disable (tm, 0 /* enable */ );
+
+ disconnect_from_vpe ();
+ exit (0);
+}
+
+#undef vl_api_version
+#define vl_api_version(n,v) static u32 vpe_api_version = v;
+#include <vpp/api/vpe.api.h>
+#undef vl_api_version
+
+void
+vl_client_add_api_signatures (vl_api_memclnt_create_t * mp)
+{
+ /*
+ * Send the main API signature in slot 0. This bit of code must
+ * match the checks in ../vpe/api/api.c: vl_msg_api_version_check().
+ */
+ mp->api_versions[0] = clib_host_to_net_u32 (vpe_api_version);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/api/test_ha.c b/src/vpp/api/test_ha.c
new file mode 100644
index 00000000..3264d5f9
--- /dev/null
+++ b/src/vpp/api/test_ha.c
@@ -0,0 +1,249 @@
+/*
+ *------------------------------------------------------------------
+ * api.c - message handler registration
+ *
+ * Copyright (c) 2010 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <netinet/in.h>
+#include <signal.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <time.h>
+#include <fcntl.h>
+#include <string.h>
+#include <vppinfra/clib.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/bitmap.h>
+#include <vppinfra/fifo.h>
+#include <vppinfra/time.h>
+#include <vppinfra/mheap.h>
+#include <vppinfra/heap.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/format.h>
+#include <vppinfra/error.h>
+
+#include <vnet/vnet.h>
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <svm/svm.h>
+#include <svm/svmdb.h>
+
+#include <vpp/api/vpe_msg_enum.h>
+
+#include <vnet/ip/ip.h>
+
+#define f64_endian(a)
+#define f64_print(a,b)
+
+#define vl_typedefs /* define message structures */
+#include <vpp/api/vpe_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vpp/api/vpe_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...)
+#define vl_printfun
+#include <vpp/api/vpe_all_api_h.h>
+#undef vl_printfun
+
+vl_shmem_hdr_t *shmem_hdr;
+
+typedef struct
+{
+ u32 pings_sent;
+ u32 pings_replied;
+ volatile u32 signal_received;
+
+ /* convenience */
+ unix_shared_memory_queue_t *vl_input_queue;
+ u32 my_client_index;
+ svmdb_client_t *svmdb_client;
+} test_main_t;
+
+test_main_t test_main;
+
+static void vl_api_control_ping_reply_t_handler
+ (vl_api_control_ping_reply_t * mp)
+{
+ test_main_t *tm = &test_main;
+
+ fformat (stdout, "control ping reply from pid %d\n", ntohl (mp->vpe_pid));
+ tm->pings_replied++;
+}
+
+vlib_main_t vlib_global_main;
+vlib_main_t **vlib_mains;
+
+void
+vlib_cli_output (struct vlib_main_t *vm, char *fmt, ...)
+{
+ clib_warning ("BUG: vlib_cli_output callled...");
+}
+
+#define foreach_api_msg \
+_(CONTROL_PING_REPLY,control_ping_reply)
+
+void
+ping (test_main_t * tm)
+{
+ vl_api_control_ping_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_CONTROL_PING);
+ mp->client_index = tm->my_client_index;
+ mp->context = 0xdeadbeef;
+
+ vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
+}
+
+static void
+noop_handler (void *notused)
+{
+}
+
+int
+connect_to_vpe (char *name)
+{
+ int rv = 0;
+ test_main_t *tm = &test_main;
+ api_main_t *am = &api_main;
+
+ rv = vl_client_connect_to_vlib ("/vpe-api", name, 32);
+ if (rv < 0)
+ return rv;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_api_msg;
+#undef _
+
+ shmem_hdr = api_main.shmem_hdr;
+ tm->vl_input_queue = shmem_hdr->vl_input_queue;
+ tm->my_client_index = am->my_client_index;
+ return 0;
+}
+
+int
+disconnect_from_vpe (void)
+{
+ vl_client_disconnect_from_vlib ();
+
+ return 0;
+}
+
+void
+signal_handler (int signo)
+{
+ test_main_t *tm = &test_main;
+
+ tm->signal_received = 1;
+}
+
+
+int
+main (int argc, char **argv)
+{
+ test_main_t *tm = &test_main;
+ api_main_t *am = &api_main;
+ u32 swt_pid = 0;
+ int connected = 0;
+
+ signal (SIGINT, signal_handler);
+
+ while (1)
+ {
+ if (tm->signal_received)
+ break;
+
+ if (am->shmem_hdr)
+ swt_pid = am->shmem_hdr->vl_pid;
+
+ /* If kill returns 0, the vpe-f process is alive */
+ if (kill (swt_pid, 0) == 0)
+ {
+ /* Try to connect */
+ if (connected == 0)
+ {
+ fformat (stdout, "Connect to VPE-f\n");
+ if (connect_to_vpe ("test_ha_client") >= 0)
+ {
+ tm->pings_sent = 0;
+ tm->pings_replied = 0;
+ connected = 1;
+ }
+ else
+ {
+ fformat (stdout, "Connect failed, sleep and retry...\n");
+ sleep (1);
+ continue;
+ }
+ }
+ tm->pings_sent++;
+ ping (tm);
+
+ sleep (1);
+
+ /* havent heard back in 3 seconds, disco / reco */
+ if ((tm->pings_replied + 3) <= tm->pings_sent)
+ {
+ fformat (stdout, "VPE-f pid %d not responding\n", swt_pid);
+ swt_pid = 0;
+ disconnect_from_vpe ();
+ connected = 0;
+ }
+ }
+ else
+ {
+ if (connected)
+ {
+ fformat (stdout, "VPE-f pid %d died\n", swt_pid);
+ swt_pid = 0;
+ disconnect_from_vpe ();
+ connected = 0;
+ }
+ sleep (1);
+ }
+ }
+
+ fformat (stdout, "Signal received, graceful exit\n");
+ disconnect_from_vpe ();
+ exit (0);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/api/vat.h b/src/vpp/api/vat.h
new file mode 120000
index 00000000..3adbdbae
--- /dev/null
+++ b/src/vpp/api/vat.h
@@ -0,0 +1 @@
+../../vat/vat.h \ No newline at end of file
diff --git a/src/vpp/api/vpe.api b/src/vpp/api/vpe.api
new file mode 100644
index 00000000..abd0e8f1
--- /dev/null
+++ b/src/vpp/api/vpe.api
@@ -0,0 +1,2782 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \file
+
+ This file defines vpe control-plane API messages which are generally
+ called through a shared memory interface.
+*/
+
+/*
+ * Note: API placement cleanup in progress
+ * If you're looking for interface APIs, please
+ * see .../vnet/vnet/{interface.api,interface_api.c}
+ * IP APIs: see .../vnet/vnet/ip/{ip.api, ip_api.c}
+ * TAP APIs: see .../vnet/vnet/unix/{tap.api, tap_api.c}
+ * VXLAN APIs: see .../vnet/vnet/vxlan/{vxlan.api, vxlan_api.c}
+ * AF-PACKET APIs: ... see /vnet/devices/af_packet/{af_packet.api, af_packet_api.c}
+ * NETMAP APIs: see ... /vnet/vnet/devices/netmap/{netmap.api, netmap_api.c}
+ * VHOST-USER APIs: see .../vnet/devices/virtio/{vhost_user.api, vhost_user_api.c}
+ * VXLAN GPE APIs: see .../vnet/vnet/vxlan-gpe/{vxlan_gpe.api, vxlan_gpe_api.c}
+ * GRE APIs: see .../vnet/vnet/gre/{gre.api, gre_api.c}
+ * L2TP APIs: see .../vnet/vnet/l2tp/{l2tp.api, l2tp_api.c}
+ * BFD APIs: see .../vnet/vnet/bfd/{bfd.api, bfd_api.c}
+ * IPSEC APIs: see .../vnet/vnet/ipsec/{ipsec.api, ipsec_api.c}
+ * IPSEC-GRE APIs: see .../vnet/vnet/ipsec-gre/{ipsec_gre.api, ipsec_gre_api.c}
+ * LISP APIs: see .../vnet/vnet/lisp/{lisp.api, lisp_api.c}
+ * LISP-GPE APIs: see .../vnet/vnet/lisp-gpe/{lisp_gpe.api, lisp_gpe_api.c}
+ */
+
+/** \brief Create a new subinterface with the given vlan id
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - software index of the new vlan's parent interface
+ @param vlan_id - vlan tag of the new interface
+*/
+define create_vlan_subif
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 vlan_id;
+};
+
+/** \brief Reply for the vlan subinterface create request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+ @param sw_if_index - software index allocated for the new subinterface
+*/
+define create_vlan_subif_reply
+{
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief Enable or Disable MPLS on and interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - index of the interface
+ @param enable - if non-zero enable, else disable
+*/
+define sw_interface_set_mpls_enable
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 enable;
+};
+
+/** \brief Reply for MPLS state on an interface
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define sw_interface_set_mpls_enable_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief MPLS Route Add / del route
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param mr_label - The MPLS label value
+ @param mr_eos - The End of stack bit
+ @param mr_table_id - The MPLS table-id the route is added in
+ @param mr_classify_table_index - If this is a classify route,
+ this is the classify table index
+ @param mr_create_table_if_needed - If the MPLS or IP tables do not exist,
+ create them
+ @param mr_is_add - Is this a route add or delete
+ @param mr_is_classify - Is this route result a classify
+ @param mr_is_multipath - Is this route update a multipath - i.e. is this
+ a path addition to an existing route
+ @param mr_is_resolve_host - Recurse resolution constraint via a host prefix
+ @param mr_is_resolve_attached - Recurse resolution constraint via attached prefix
+ @param mr_next_hop_proto_is_ip4 - The next-hop is IPV4
+ @param mr_next_hop_weight - The weight, for UCMP
+ @param mr_next_hop[16] - the nextop address
+ @param mr_next_hop_sw_if_index - the next-hop SW interface
+ @param mr_next_hop_table_id - the next-hop table-id (if appropriate)
+ @param mr_next_hop_n_out_labels - the number of labels in the label stack
+ @param mr_next_hop_out_label_stack - the next-hop output label stack, outer most first
+ @param next_hop_via_label - The next-hop is a resolved via a local label
+*/
+define mpls_route_add_del
+{
+ u32 client_index;
+ u32 context;
+ u32 mr_label;
+ u8 mr_eos;
+ u32 mr_table_id;
+ u32 mr_classify_table_index;
+ u8 mr_create_table_if_needed;
+ u8 mr_is_add;
+ u8 mr_is_classify;
+ u8 mr_is_multipath;
+ u8 mr_is_resolve_host;
+ u8 mr_is_resolve_attached;
+ u8 mr_next_hop_proto_is_ip4;
+ u8 mr_next_hop_weight;
+ u8 mr_next_hop[16];
+ u8 mr_next_hop_n_out_labels;
+ u32 mr_next_hop_sw_if_index;
+ u32 mr_next_hop_table_id;
+ u32 mr_next_hop_via_label;
+ u32 mr_next_hop_out_label_stack[mr_next_hop_n_out_labels];
+};
+
+/** \brief Reply for MPLS route add / del request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define mpls_route_add_del_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Dump MPLS fib table
+ @param client_index - opaque cookie to identify the sender
+*/
+define mpls_fib_dump
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief FIB path
+ @param sw_if_index - index of the interface
+ @param weight - The weight, for UCMP
+ @param is_local - local if non-zero, else remote
+ @param is_drop - Drop the packet
+ @param is_unreach - Drop the packet and rate limit send ICMP unreachable
+ @param is_prohibit - Drop the packet and rate limit send ICMP prohibited
+ @param afi - the afi of the next hop, IP46_TYPE_IP4=1, IP46_TYPE_IP6=2
+ @param next_hop[16] - the next hop address
+
+ WARNING: this type is replicated, pending cleanup completion
+
+*/
+typeonly manual_print manual_endian define fib_path2
+{
+ u32 sw_if_index;
+ u32 weight;
+ u8 is_local;
+ u8 is_drop;
+ u8 is_unreach;
+ u8 is_prohibit;
+ u8 afi;
+ u8 next_hop[16];
+};
+
+/** \brief mpls FIB table response
+ @param table_id - MPLS fib table id
+ @param s_bit - End-of-stack bit
+ @param label - MPLS label value
+ @param count - the number of fib_path in path
+ @param path - array of of fib_path structures
+*/
+manual_endian manual_print define mpls_fib_details
+{
+ u32 context;
+ u32 table_id;
+ u8 eos_bit;
+ u32 label;
+ u32 count;
+ vl_api_fib_path2_t path[count];
+};
+
+/** \brief Bind/Unbind an MPLS local label to an IP prefix. i.e. create
+ a per-prefix label entry.
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param mb_mpls_table_id - The MPLS table-id the MPLS entry will be added in
+ @param mb_label - The MPLS label value to bind
+ @param mb_ip_table_id - The IP table-id of the IP prefix to bind to.
+ @param mb_create_table_if_needed - Create either/both tables if required.
+ @param mb_is_bind - Bind or unbind
+ @param mb_is_ip4 - The prefix to bind to is IPv4
+ @param mb_address_length - Length of IP prefix
+ @param mb_address[16] - IP prefix/
+*/
+define mpls_ip_bind_unbind
+{
+ u32 client_index;
+ u32 context;
+ u32 mb_mpls_table_id;
+ u32 mb_label;
+ u32 mb_ip_table_id;
+ u8 mb_create_table_if_needed;
+ u8 mb_is_bind;
+ u8 mb_is_ip4;
+ u8 mb_address_length;
+ u8 mb_address[16];
+};
+
+/** \brief Reply for MPLS IP bind/unbind request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define mpls_ip_bind_unbind_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief MPLS tunnel Add / del route
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param mt_is_add - Is this a route add or delete
+ @param mt_sw_if_index - The SW interface index of the tunnel to delete
+ @param mt_next_hop_proto_is_ip4 - The next-hop is IPV4
+ @param mt_next_hop_weight - The weight, for UCMP
+ @param mt_next_hop[16] - the nextop address
+ @param mt_next_hop_sw_if_index - the next-hop SW interface
+ @param mt_next_hop_table_id - the next-hop table-id (if appropriate)
+ @param mt_next_hop_n_out_labels - the number of next-hop output labels
+ @param mt_next_hop_out_label_stack - the next-hop output label stack, outer most first
+*/
+define mpls_tunnel_add_del
+{
+ u32 client_index;
+ u32 context;
+ u32 mt_sw_if_index;
+ u8 mt_is_add;
+ u8 mt_l2_only;
+ u8 mt_next_hop_proto_is_ip4;
+ u8 mt_next_hop_weight;
+ u8 mt_next_hop[16];
+ u8 mt_next_hop_n_out_labels;
+ u32 mt_next_hop_sw_if_index;
+ u32 mt_next_hop_table_id;
+ u32 mt_next_hop_out_label_stack[mt_next_hop_n_out_labels];
+};
+
+/** \brief Reply for MPLS tunnel add / del request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+ @param sw_if_index - SW interface index of the tunnel created
+*/
+define mpls_tunnel_add_del_reply
+{
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief Dump mpls eth tunnel table
+ @param client_index - opaque cookie to identify the sender
+ @param tunnel_index - eth tunnel identifier or -1 in case of all tunnels
+*/
+define mpls_tunnel_dump
+{
+ u32 client_index;
+ u32 context;
+ i32 tunnel_index;
+};
+
+/** \brief mpls eth tunnel operational state response
+ @param tunnel_index - eth tunnel identifier
+ @param intfc_address - interface ipv4 addr
+ @param mask_width - interface ipv4 addr mask
+ @param hw_if_index - interface id
+ @param l2_only -
+ @param tunnel_dst_mac -
+ @param tx_sw_if_index -
+ @param encap_index - reference to mpls label table
+ @param nlabels - number of resolved labels
+ @param labels - resolved labels
+*/
+define mpls_tunnel_details
+{
+ u32 context;
+ u32 tunnel_index;
+ u8 mt_l2_only;
+ u8 mt_sw_if_index;
+ u8 mt_next_hop_proto_is_ip4;
+ u8 mt_next_hop[16];
+ u32 mt_next_hop_sw_if_index;
+ u32 mt_next_hop_table_id;
+ u32 mt_next_hop_n_labels;
+ u32 mt_next_hop_out_labels[mt_next_hop_n_labels];
+};
+
+/** \brief Proxy ARP add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param vrf_id - VRF / Fib table ID
+ @param is_add - 1 if adding the Proxy ARP range, 0 if deleting
+ @param low_address[4] - Low address of the Proxy ARP range
+ @param hi_address[4] - High address of the Proxy ARP range
+*/
+define proxy_arp_add_del
+{
+ u32 client_index;
+ u32 context;
+ u32 vrf_id;
+ u8 is_add;
+ u8 low_address[4];
+ u8 hi_address[4];
+};
+
+/** \brief Reply for proxy arp add / del request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define proxy_arp_add_del_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Proxy ARP add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - Which interface to enable / disable Proxy Arp on
+ @param enable_disable - 1 to enable Proxy ARP on interface, 0 to disable
+*/
+define proxy_arp_intfc_enable_disable
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ /* 1 = on, 0 = off */
+ u8 enable_disable;
+};
+
+/** \brief Reply for Proxy ARP interface enable / disable request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define proxy_arp_intfc_enable_disable_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Reset VRF (remove all routes etc) request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_ipv6 - 1 for IPv6 neighbor, 0 for IPv4
+ @param vrf_id - ID of th FIB table / VRF to reset
+*/
+define reset_vrf
+{
+ u32 client_index;
+ u32 context;
+ u8 is_ipv6;
+ u32 vrf_id;
+};
+
+/** \brief Reply for Reset VRF request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define reset_vrf_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Is Address Reachable request - DISABLED
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param next_hop_sw_if_index - index of interface used to get to next hop
+ @param is_ipv6 - 1 for IPv6, 0 for IPv4
+ @param is_error - address not found or does not match intf
+ @param address[] - Address in question
+*/
+define is_address_reachable
+{
+ u32 client_index; /* (api_main_t *) am->my_client_index */
+ u32 context;
+ u32 next_hop_sw_if_index;
+ u8 is_known; /* on reply, this is the answer */
+ u8 is_ipv6;
+ u8 is_error; /* address not found or does not match intf */
+ u8 address[16];
+};
+
+/** \brief Want Stats, register for stats updates
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param enable_disable - 1 = enable stats, 0 = disable
+ @param pid - pid of process requesting stats updates
+*/
+define want_stats
+{
+ u32 client_index;
+ u32 context;
+ u32 enable_disable;
+ u32 pid;
+};
+
+/** \brief Reply for Want Stats request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define want_stats_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+typeonly manual_print manual_endian define ip4_fib_counter
+{
+ u32 address;
+ u8 address_length;
+ u64 packets;
+ u64 bytes;
+};
+
+manual_print manual_endian define vnet_ip4_fib_counters
+{
+ u32 vrf_id;
+ u32 count;
+ vl_api_ip4_fib_counter_t c[count];
+};
+
+typeonly manual_print manual_endian define ip6_fib_counter
+{
+ u64 address[2];
+ u8 address_length;
+ u64 packets;
+ u64 bytes;
+};
+
+manual_print manual_endian define vnet_ip6_fib_counters
+{
+ u32 vrf_id;
+ u32 count;
+ vl_api_ip6_fib_counter_t c[count];
+};
+
+/** \brief Request for a single block of summary stats
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define vnet_get_summary_stats
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Reply for vnet_get_summary_stats request
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for request
+ @param total_pkts -
+ @param total_bytes -
+ @param vector_rate -
+*/
+define vnet_summary_stats_reply
+{
+ u32 context;
+ i32 retval;
+ u64 total_pkts[2];
+ u64 total_bytes[2];
+ f64 vector_rate;
+};
+
+/** \brief OAM event structure
+ @param dst_address[] -
+ @param state
+*/
+define oam_event
+{
+ u8 dst_address[4];
+ u8 state;
+};
+
+/** \brief Want OAM events request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param enable_disable- enable if non-zero, else disable
+ @param pid - pid of the requesting process
+*/
+define want_oam_events
+{
+ u32 client_index;
+ u32 context;
+ u32 enable_disable;
+ u32 pid;
+};
+
+/** \brief Want OAM events response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the want oam stats request
+*/
+define want_oam_events_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief OAM add / del target request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param vrf_id - vrf_id of the target
+ @param src_address[] - source address to use for the updates
+ @param dst_address[] - destination address of the target
+ @param is_add - add target if non-zero, else delete
+*/
+define oam_add_del
+{
+ u32 client_index;
+ u32 context;
+ u32 vrf_id;
+ u8 src_address[4];
+ u8 dst_address[4];
+ u8 is_add;
+};
+
+/** \brief OAM add / del target response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code of the request
+*/
+define oam_add_del_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Reset fib table request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param vrf_id - vrf/table id of the fib table to reset
+ @param is_ipv6 - an ipv6 fib to reset if non-zero, else ipv4
+*/
+define reset_fib
+{
+ u32 client_index;
+ u32 context;
+ u32 vrf_id;
+ u8 is_ipv6;
+};
+
+/** \brief Reset fib response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the reset bfib request
+*/
+define reset_fib_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief DHCP Proxy config add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param vrf_id - vrf id
+ @param if_ipv6 - ipv6 if non-zero, else ipv4
+ @param is_add - add the config if non-zero, else delete
+ @param insert_circuit_id - option82 suboption 1 fib number
+ @param dhcp_server[] - server address
+ @param dhcp_src_address[] - <fix this, need details>
+*/
+define dhcp_proxy_config
+{
+ u32 client_index;
+ u32 context;
+ u32 vrf_id;
+ u8 is_ipv6;
+ u8 is_add;
+ u8 insert_circuit_id;
+ u8 dhcp_server[16];
+ u8 dhcp_src_address[16];
+};
+
+/** \brief DHCP Proxy config response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define dhcp_proxy_config_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief DHCP Proxy set / unset vss request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param tbl_id - table id
+ @param oui - first part of vpn id
+ @param fib_id - second part of vpn id
+ @param is_ipv6 - ip6 if non-zero, else ip4
+ @param is_add - set vss if non-zero, else delete
+*/
+define dhcp_proxy_set_vss
+{
+ u32 client_index;
+ u32 context;
+ u32 tbl_id;
+ u32 oui;
+ u32 fib_id;
+ u8 is_ipv6;
+ u8 is_add;
+};
+
+/** \brief DHCP proxy set / unset vss response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define dhcp_proxy_set_vss_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Create loopback interface request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param mac_address - mac addr to assign to the interface if none-zero
+*/
+define create_loopback
+{
+ u32 client_index;
+ u32 context;
+ u8 mac_address[6];
+};
+
+/** \brief Create loopback interface response
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - sw index of the interface that was created
+ @param retval - return code for the request
+*/
+define create_loopback_reply
+{
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief Delete loopback interface request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - sw index of the interface that was created
+*/
+define delete_loopback
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+};
+
+/** \brief Delete loopback interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define delete_loopback_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Control ping from client to api server request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define control_ping
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Control ping from the client to the server response
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+ @param vpe_pid - the pid of the vpe, returned by the server
+*/
+define control_ping_reply
+{
+ u32 context;
+ i32 retval;
+ u32 client_index;
+ u32 vpe_pid;
+};
+
+/** \brief Process a vpe parser cli string request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param cmd_in_shmem - pointer to cli command string
+*/
+define cli_request
+{
+ u32 client_index;
+ u32 context;
+ u64 cmd_in_shmem;
+};
+define cli_inband
+{
+ u32 client_index;
+ u32 context;
+ u32 length;
+ u8 cmd[length];
+};
+
+/** \brief vpe parser cli string response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for request
+ @param reply_in_shmem - Reply string from cli processing if any
+*/
+define cli_reply
+{
+ u32 context;
+ i32 retval;
+ u64 reply_in_shmem;
+};
+define cli_inband_reply
+{
+ u32 context;
+ i32 retval;
+ u32 length;
+ u8 reply[length];
+};
+
+/** \brief Set max allowed ARP or ip6 neighbor entries request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_ipv6 - neighbor limit if non-zero, else ARP limit
+ @param arp_neighbor_limit - the new limit, defaults are ~ 50k
+*/
+define set_arp_neighbor_limit
+{
+ u32 client_index;
+ u32 context;
+ u8 is_ipv6;
+ u32 arp_neighbor_limit;
+};
+
+/** \brief Set max allowed ARP or ip6 neighbor entries response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for request
+*/
+define set_arp_neighbor_limit_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief L2 interface patch add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param rx_sw_if_index - receive side interface
+ @param tx_sw_if_index - transmit side interface
+ @param is_add - if non-zero set up the interface patch, else remove it
+*/
+define l2_patch_add_del
+{
+ u32 client_index;
+ u32 context;
+ u32 rx_sw_if_index;
+ u32 tx_sw_if_index;
+ u8 is_add;
+};
+
+/** \brief L2 interface patch add / del response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define l2_patch_add_del_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPv6 segment routing tunnel add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add the tunnel if non-zero, else delete it
+ @param name[] - tunnel name (len. 64)
+ @param src_address[] -
+ @param dst_address[] -
+ @param dst_mask_width -
+ @param inner_vrf_id -
+ @param outer_vrf_id -
+ @param flags_net_byte_order -
+ @param n_segments -
+ @param n_tags -
+ @param segs_and_tags[] -
+ @param policy_name[] - name of policy to associate this tunnel to (len. 64)
+*/
+define sr_tunnel_add_del
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 name[64];
+ u8 src_address[16];
+ u8 dst_address[16];
+ u8 dst_mask_width;
+ u32 inner_vrf_id;
+ u32 outer_vrf_id;
+ u16 flags_net_byte_order;
+ u8 n_segments;
+ u8 n_tags;
+ u8 policy_name[64];
+ u8 segs_and_tags[0];
+};
+
+/** \brief IPv6 segment routing tunnel add / del response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define sr_tunnel_add_del_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPv6 segment routing policy add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add the tunnel if non-zero, else delete it
+ @param name[] - policy name (len. 64)
+ @param tunnel_names[] -
+*/
+define sr_policy_add_del
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 name[64];
+ u8 tunnel_names[0];
+};
+
+/** \brief IPv6 segment routing policy add / del response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define sr_policy_add_del_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPv6 segment routing multicast map to policy add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add the tunnel if non-zero, else delete it
+ @param multicast_address[] - IP6 multicast address
+ @param policy_name[] = policy name (len.64)
+*/
+define sr_multicast_map_add_del
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 multicast_address[16];
+ u8 policy_name[64];
+};
+
+/** \brief IPv6 segment routing multicast map to policy add / del response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define sr_multicast_map_add_del_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Interface set vpath request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface used to reach neighbor
+ @param enable - if non-zero enable, else disable
+*/
+define sw_interface_set_vpath
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 enable;
+};
+
+/** \brief Interface set vpath response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define sw_interface_set_vpath_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Set L2 XConnect between two interfaces request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param rx_sw_if_index - Receive interface index
+ @param tx_sw_if_index - Transmit interface index
+ @param enable - enable xconnect if not 0, else set to L3 mode
+*/
+define sw_interface_set_l2_xconnect
+{
+ u32 client_index;
+ u32 context;
+ u32 rx_sw_if_index;
+ u32 tx_sw_if_index;
+ u8 enable;
+};
+
+/** \brief Set L2 XConnect response
+ @param context - sender context, to match reply w/ request
+ @param retval - L2 XConnect request return code
+*/
+define sw_interface_set_l2_xconnect_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Interface bridge mode request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param rx_sw_if_index - the interface
+ @param bd_id - bridge domain id
+ @param bvi - Setup interface as a bvi, bridge mode only
+ @param shg - Shared horizon group, for bridge mode only
+ @param enable - Enable beige mode if not 0, else set to L3 mode
+*/
+define sw_interface_set_l2_bridge
+{
+ u32 client_index;
+ u32 context;
+ u32 rx_sw_if_index;
+ u32 bd_id;
+ u8 shg;
+ u8 bvi;
+ u8 enable;
+};
+
+/** \brief Interface bridge mode response
+ @param context - sender context, to match reply w/ request
+ @param retval - Bridge mode request return code
+*/
+define sw_interface_set_l2_bridge_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief L2 FIB add entry request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param mac - the entry's mac address
+ @param bd_id - the entry's bridge domain id
+ @param sw_if_index - the interface
+ @param is_add - If non zero add the entry, else delete it
+ @param static_mac -
+ @param filter_mac -
+*/
+define l2fib_add_del
+{
+ u32 client_index;
+ u32 context;
+ u64 mac;
+ u32 bd_id;
+ u32 sw_if_index;
+ u8 is_add;
+ u8 static_mac;
+ u8 filter_mac;
+ u8 bvi_mac;
+};
+
+/** \brief L2 FIB add entry response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the add l2fib entry request
+*/
+define l2fib_add_del_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Set L2 flags request !!! TODO - need more info, feature bits in l2_input.h
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface
+ @param is_set - if non-zero, set the bits, else clear them
+ @param feature_bitmap - non-zero bits to set or clear
+*/
+define l2_flags
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 is_set;
+ u32 feature_bitmap;
+};
+
+/** \brief Set L2 bits response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the set l2 bits request
+*/
+define l2_flags_reply
+{
+ u32 context;
+ i32 retval;
+ u32 resulting_feature_bitmap;
+};
+
+/** \brief Set bridge flags (such as L2_LEARN, L2_FWD, L2_FLOOD,
+ L2_UU_FLOOD, or L2_ARP_TERM) request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param bd_id - the bridge domain to set the flags for
+ @param is_set - if non-zero, set the flags, else clear them
+ @param feature_bitmap - bits that are non-zero to set or clear
+*/
+define bridge_flags
+{
+ u32 client_index;
+ u32 context;
+ u32 bd_id;
+ u8 is_set;
+ u32 feature_bitmap;
+};
+
+/** \brief Set bridge flags response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the set bridge flags request
+ @param resulting_feature_bitmap - the feature bitmap value after the request is implemented
+*/
+define bridge_flags_reply
+{
+ u32 context;
+ i32 retval;
+ u32 resulting_feature_bitmap;
+};
+
+/** \brief Set bridge domain ip to mac entry request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param bd_id - the bridge domain to set the flags for
+ @param is_add - if non-zero, add the entry, else clear it
+ @param is_ipv6 - if non-zero, ipv6 address, else ipv4 address
+ @param mac_address - MAC address
+ @param
+*/
+define bd_ip_mac_add_del
+{
+ u32 client_index;
+ u32 context;
+ u32 bd_id;
+ u8 is_add;
+ u8 is_ipv6;
+ u8 ip_address[16];
+ u8 mac_address[6];
+};
+
+/** \brief Set bridge domain ip to mac entry response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the set bridge flags request
+*/
+define bd_ip_mac_add_del_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Add/Delete classification table request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add- if non-zero add the table, else delete it
+ @param del_chain - if non-zero delete the whole chain of tables
+ @param table_index - if add, reuturns index of the created table, else specifies the table to delete
+ @param nbuckets - number of buckets when adding a table
+ @param memory_size - memory size when adding a table
+ @param match_n_vectors - number of match vectors
+ @param next_table_index - index of next table
+ @param miss_next_index - index of miss table
+ @param current_data_flag - option to use current node's packet payload
+ as the starting point from where packets are classified,
+ This option is only valid for L2/L3 input ACL for now.
+ 0: by default, classify data from the buffer's start location
+ 1: classify packets from VPP node’s current data pointer
+ @param current_data_offset - a signed value to shift the start location of
+ the packet to be classified
+ For example, if input IP ACL node is used, L2 header’s first byte
+ can be accessible by configuring current_data_offset to -14
+ if there is no vlan tag.
+ This is valid only if current_data_flag is set to 1.
+ @param mask[] - match mask
+*/
+define classify_add_del_table
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 del_chain;
+ u32 table_index;
+ u32 nbuckets;
+ u32 memory_size;
+ u32 skip_n_vectors;
+ u32 match_n_vectors;
+ u32 next_table_index;
+ u32 miss_next_index;
+ u32 current_data_flag;
+ i32 current_data_offset;
+ u8 mask[0];
+};
+
+/** \brief Add/Delete classification table response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the table add/del requst
+ @param new_table_index - for add, returned index of the new table
+ @param skip_n_vectors - for add, returned value of skip_n_vectors in table
+ @param match_n_vectors -for add, returned value of match_n_vectors in table
+*/
+define classify_add_del_table_reply
+{
+ u32 context;
+ i32 retval;
+ u32 new_table_index;
+ u32 skip_n_vectors;
+ u32 match_n_vectors;
+};
+
+/** \brief Classify add / del session request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add session if non-zero, else delete
+ @param table_index - index of the table to add/del the session, required
+ @param hit_next_index - for add, hit_next_index of new session, required
+ @param opaque_index - for add, opaque_index of new session
+ @param advance -for add, advance value for session
+ @param action -
+ 0: no action (by default)
+ metadata is not used.
+ 1: Classified IP packets will be looked up from the
+ specified ipv4 fib table (configured by metadata as VRF id).
+ Only valid for L3 input ACL node
+ 2: Classified IP packets will be looked up from the
+ specified ipv6 fib table (configured by metadata as VRF id).
+ Only valid for L3 input ACL node
+ @param metadata - valid only if action != 0
+ VRF id if action is 1 or 2.
+ @param match[] - for add, match value for session, required
+*/
+define classify_add_del_session
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u32 table_index;
+ u32 hit_next_index;
+ u32 opaque_index;
+ i32 advance;
+ u8 action;
+ u32 metadata;
+ u8 match[0];
+};
+
+/** \brief Classify add / del session response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the add/del session request
+*/
+define classify_add_del_session_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Set/unset the classification table for an interface request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_ipv6 - ipv6 if non-zero, else ipv4
+ @param sw_if_index - interface to associate with the table
+ @param table_index - index of the table, if ~0 unset the table
+*/
+define classify_set_interface_ip_table
+{
+ u32 client_index;
+ u32 context;
+ u8 is_ipv6;
+ u32 sw_if_index;
+ u32 table_index; /* ~0 => off */
+};
+
+/** \brief Set/unset interface classification table response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code
+*/
+define classify_set_interface_ip_table_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Set/unset l2 classification tables for an interface request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface to set/unset tables for
+ @param ip4_table_index - ip4 index, use ~0 for all 3 indexes to unset
+ @param ip6_table_index - ip6 index
+ @param other_table_index - other index
+*/
+define classify_set_interface_l2_tables
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ /* 3 x ~0 => off */
+ u32 ip4_table_index;
+ u32 ip6_table_index;
+ u32 other_table_index;
+ u8 is_input;
+};
+
+/** \brief Set/unset l2 classification tables for an interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define classify_set_interface_l2_tables_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Get node index using name request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param node_name[] - name of the node
+*/
+define get_node_index
+{
+ u32 client_index;
+ u32 context;
+ u8 node_name[64];
+};
+
+/** \brief Get node index using name request
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+ @param node_index - index of the desired node if found, else ~0
+*/
+define get_node_index_reply
+{
+ u32 context;
+ i32 retval;
+ u32 node_index;
+};
+
+/** \brief Set the next node for a given node request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param node_name[] - node to add the next node to
+ @param next_name[] - node to add as the next node
+*/
+define add_node_next
+{
+ u32 client_index;
+ u32 context;
+ u8 node_name[64];
+ u8 next_name[64];
+};
+
+/** \brief IP Set the next node for a given node response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the add next node request
+ @param next_index - the index of the next node if success, else ~0
+*/
+define add_node_next_reply
+{
+ u32 context;
+ i32 retval;
+ u32 next_index;
+};
+
+/** \brief DHCP Proxy config 2 add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param rx_vrf_id - receive vrf id
+ @param server_vrf_id - server vrf id
+ @param if_ipv6 - ipv6 if non-zero, else ipv4
+ @param is_add - add the config if non-zero, else delete
+ @param insert_circuit_id - option82 suboption 1 fib number
+ @param dhcp_server[] - server address
+ @param dhcp_src_address[] - <fix this, need details>
+*/
+define dhcp_proxy_config_2
+{
+ u32 client_index;
+ u32 context;
+ u32 rx_vrf_id;
+ u32 server_vrf_id;
+ u8 is_ipv6;
+ u8 is_add;
+ u8 insert_circuit_id;
+ u8 dhcp_server[16];
+ u8 dhcp_src_address[16];
+};
+
+/** \brief DHCP Proxy config 2 add / del response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for request
+*/
+define dhcp_proxy_config_2_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief L2 fib clear table request, clear all mac entries in the l2 fib
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define l2_fib_clear_table
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief L2 fib clear table response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define l2_fib_clear_table_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief L2 interface ethernet flow point filtering enable/disable request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface to enable/disable filtering on
+ @param enable_disable - if non-zero enable filtering, else disable
+*/
+define l2_interface_efp_filter
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 enable_disable;
+};
+
+/** \brief L2 interface ethernet flow point filtering response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define l2_interface_efp_filter_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief L2 interface vlan tag rewrite configure request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface the operation is applied to
+ @param vtr_op - Choose from l2_vtr_op_t enum values
+ @param push_dot1q - first pushed flag dot1q id set, else dot1ad
+ @param tag1 - Needed for any push or translate vtr op
+ @param tag2 - Needed for any push 2 or translate x-2 vtr ops
+*/
+define l2_interface_vlan_tag_rewrite
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 vtr_op;
+ u32 push_dot1q; // ethertype of first pushed tag is dot1q/dot1ad
+ u32 tag1; // first pushed tag
+ u32 tag2; // second pushed tag
+};
+
+/** \brief L2 interface vlan tag rewrite response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define l2_interface_vlan_tag_rewrite_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+define create_subif
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 sub_id;
+
+ /* These fields map directly onto the subif template */
+ u8 no_tags;
+ u8 one_tag;
+ u8 two_tags;
+ u8 dot1ad; // 0 = dot1q, 1=dot1ad
+ u8 exact_match;
+ u8 default_sub;
+ u8 outer_vlan_id_any;
+ u8 inner_vlan_id_any;
+ u16 outer_vlan_id;
+ u16 inner_vlan_id;
+};
+
+define create_subif_reply
+{
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief show version
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define show_version
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief show version response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+ @param program - name of the program (vpe)
+ @param version - version of the program
+ @param build_directory - root of the workspace where the program was built
+*/
+define show_version_reply
+{
+ u32 context;
+ i32 retval;
+ u8 program[32];
+ u8 version[32];
+ u8 build_date[32];
+ u8 build_directory[256];
+};
+
+/** \brief l2 fib table entry structure
+ @param bd_id - the l2 fib / bridge domain table id
+ @param mac - the entry's mac address
+ @param sw_if_index - index of the interface
+ @param static_mac - the entry is statically configured.
+ @param filter_mac - the entry is a mac filter entry.
+ @param bvi_mac - the mac address is a bridge virtual interface
+*/
+define l2_fib_table_entry
+{
+ u32 context;
+ u32 bd_id;
+ u64 mac;
+ u32 sw_if_index;
+ u8 static_mac;
+ u8 filter_mac;
+ u8 bvi_mac;
+};
+
+/** \brief Dump l2 fib (aka bridge domain) table
+ @param client_index - opaque cookie to identify the sender
+ @param bd_id - the l2 fib / bridge domain table identifier
+*/
+define l2_fib_table_dump
+{
+ u32 client_index;
+ u32 context;
+ u32 bd_id;
+};
+
+/* Gross kludge, DGMS */
+define interface_name_renumber
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 new_show_dev_instance;
+};
+
+define interface_name_renumber_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Register for ip4 arp resolution events
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param enable_disable - 1 => register for events, 0 => cancel registration
+ @param pid - sender's pid
+ @param address - the exact ip4 address of interest
+*/
+define want_ip4_arp_events
+{
+ u32 client_index;
+ u32 context;
+ u8 enable_disable;
+ u32 pid;
+ u32 address;
+};
+
+/** \brief Reply for interface events registration
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define want_ip4_arp_events_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Tell client about an ip4 arp resolution event
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param address - the exact ip4 address of interest
+ @param pid - client pid registered to receive notification
+ @param sw_if_index - interface which received ARP packet
+ @param new_mac - the new mac address
+ @param mac_ip - 0: resolution event, 1: mac/ip binding in bd
+*/
+define ip4_arp_event
+{
+ u32 client_index;
+ u32 context;
+ u32 address;
+ u32 pid;
+ u32 sw_if_index;
+ u8 new_mac[6];
+ u8 mac_ip;
+};
+
+/** \brief Register for ip6 nd resolution events
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param enable_disable - 1 => register for events, 0 => cancel registration
+ @param pid - sender's pid
+ @param address - the exact ip6 address of interest
+*/
+define want_ip6_nd_events
+{
+ u32 client_index;
+ u32 context;
+ u8 enable_disable;
+ u32 pid;
+ u8 address[16];
+};
+
+/** \brief Reply for ip6 nd resolution events registration
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define want_ip6_nd_events_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Tell client about an ip6 nd resolution or mac/ip event
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param pid - client pid registered to receive notification
+ @param sw_if_index - interface which received ARP packet
+ @param address - the exact ip6 address of interest
+ @param new_mac - the new mac address
+ @param mac_ip - 0: resolution event, 1: mac/ip binding in bd
+*/
+define ip6_nd_event
+{
+ u32 client_index;
+ u32 context;
+ u32 pid;
+ u32 sw_if_index;
+ u8 address[16];
+ u8 new_mac[6];
+ u8 mac_ip;
+};
+
+/** \brief L2 bridge domain add or delete request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param bd_id - the bridge domain to create
+ @param flood - enable/disable bcast/mcast flooding in the bd
+ @param uu_flood - enable/disable uknown unicast flood in the bd
+ @param forward - enable/disable forwarding on all interfaces in the bd
+ @param learn - enable/disable learning on all interfaces in the bd
+ @param arp_term - enable/disable arp termination in the bd
+ @param mac_age - mac aging time in min, 0 for disabled
+ @param is_add - add or delete flag
+*/
+define bridge_domain_add_del
+{
+ u32 client_index;
+ u32 context;
+ u32 bd_id;
+ u8 flood;
+ u8 uu_flood;
+ u8 forward;
+ u8 learn;
+ u8 arp_term;
+ u8 mac_age;
+ u8 is_add;
+};
+
+/** \brief L2 bridge domain add or delete response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the set bridge flags request
+*/
+define bridge_domain_add_del_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief L2 bridge domain request operational state details
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param bd_id - the bridge domain id desired or ~0 to request all bds
+*/
+define bridge_domain_dump
+{
+ u32 client_index;
+ u32 context;
+ u32 bd_id;
+};
+
+/** \brief L2 bridge domain operational state response
+ @param bd_id - the bridge domain id
+ @param flood - bcast/mcast flooding state on all interfaces in the bd
+ @param uu_flood - uknown unicast flooding state on all interfaces in the bd
+ @param forward - forwarding state on all interfaces in the bd
+ @param learn - learning state on all interfaces in the bd
+ @param arp_term - arp termination state on all interfaces in the bd
+ @param mac_age - mac aging time in min, 0 for disabled
+ @param n_sw_ifs - number of sw_if_index's in the domain
+*/
+define bridge_domain_details
+{
+ u32 context;
+ u32 bd_id;
+ u8 flood;
+ u8 uu_flood;
+ u8 forward;
+ u8 learn;
+ u8 arp_term;
+ u8 mac_age;
+ u32 bvi_sw_if_index;
+ u32 n_sw_ifs;
+};
+
+/** \brief L2 bridge domain sw interface operational state response
+ @param bd_id - the bridge domain id
+ @param sw_if_index - sw_if_index in the domain
+ @param shg - split horizon group for the interface
+*/
+define bridge_domain_sw_if_details
+{
+ u32 context;
+ u32 bd_id;
+ u32 sw_if_index;
+ u8 shg;
+};
+
+/** \brief DHCP Client config add / del request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - index of the interface for DHCP client
+ @param hostname - hostname
+ @param is_add - add the config if non-zero, else delete
+ @param want_dhcp_event - DHCP event sent to the sender
+ via dhcp_compl_event API message if non-zero
+ @param pid - sender's pid
+*/
+define dhcp_client_config
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 hostname[64];
+ u8 is_add;
+ u8 want_dhcp_event;
+ u32 pid;
+};
+
+/** \brief DHCP Client config response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define dhcp_client_config_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Set/unset input ACL interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface to set/unset input ACL
+ @param ip4_table_index - ip4 classify table index (~0 for skip)
+ @param ip6_table_index - ip6 classify table index (~0 for skip)
+ @param l2_table_index - l2 classify table index (~0 for skip)
+ @param is_add - Set input ACL if non-zero, else unset
+ Note: User is recommeneded to use just one valid table_index per call.
+ (ip4_table_index, ip6_table_index, or l2_table_index)
+*/
+define input_acl_set_interface
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 ip4_table_index;
+ u32 ip6_table_index;
+ u32 l2_table_index;
+ u8 is_add;
+};
+
+/** \brief Set/unset input ACL interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define input_acl_set_interface_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Tell client about a DHCP completion event
+ @param client_index - opaque cookie to identify the sender
+ @param pid - client pid registered to receive notification
+ @param is_ipv6 - if non-zero the address is ipv6, else ipv4
+ @param host_address - Host IP address
+ @param router_address - Router IP address
+ @param host_mac - Host MAC address
+*/
+define dhcp_compl_event
+{
+ u32 client_index;
+ u32 pid;
+ u8 hostname[64];
+ u8 is_ipv6;
+ u8 host_address[16];
+ u8 router_address[16];
+ u8 host_mac[6];
+};
+
+/** \brief cop: enable/disable junk filtration features on an interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_inded - desired interface
+ @param enable_disable - 1 => enable, 0 => disable
+*/
+
+define cop_interface_enable_disable
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 enable_disable;
+};
+
+/** \brief cop: interface enable/disable junk filtration reply
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+
+define cop_interface_enable_disable_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief cop: enable/disable whitelist filtration features on an interface
+ Note: the supplied fib_id must match in order to remove the feature!
+
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface handle, physical interfaces only
+ @param fib_id - fib identifier for the whitelist / blacklist fib
+ @param ip4 - 1 => enable ip4 filtration, 0=> disable ip4 filtration
+ @param ip6 - 1 => enable ip6 filtration, 0=> disable ip6 filtration
+ @param default_cop - 1 => enable non-ip4, non-ip6 filtration 0=> disable it
+*/
+
+define cop_whitelist_enable_disable
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 fib_id;
+ u8 ip4;
+ u8 ip6;
+ u8 default_cop;
+};
+
+/** \brief cop: interface enable/disable junk filtration reply
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+
+define cop_whitelist_enable_disable_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief get_node_graph - get a copy of the vpp node graph
+ including the current set of graph arcs.
+
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+
+define get_node_graph
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief get_node_graph_reply
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+ @param reply_in_shmem - result from vlib_node_serialize, in shared
+ memory. Process with vlib_node_unserialize, remember to switch
+ heaps and free the result.
+*/
+
+define get_node_graph_reply
+{
+ u32 context;
+ i32 retval;
+ u64 reply_in_shmem;
+};
+
+/** \brief IOAM enable : Enable in-band OAM
+ @param id - profile id
+ @param seqno - To enable Seqno Processing
+ @param analyse - Enabling analysis of iOAM at decap node
+ @param pow_enable - Proof of Work enabled or not flag
+ @param trace_enable - iOAM Trace enabled or not flag
+*/
+define ioam_enable
+{
+ u32 client_index;
+ u32 context;
+ u16 id;
+ u8 seqno;
+ u8 analyse;
+ u8 pot_enable;
+ u8 trace_enable;
+ u32 node_id;
+};
+
+/** \brief iOAM Trace profile add / del response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define ioam_enable_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief iOAM disable
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param index - MAP Domain index
+*/
+define ioam_disable
+{
+ u32 client_index;
+ u32 context;
+ u16 id;
+};
+
+/** \brief iOAM disable response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define ioam_disable_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Add/del policer
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add policer if non-zero, else delete
+ @param name - policer name
+ @param cir - CIR
+ @param eir - EIR
+ @param cb - Committed Burst
+ @param eb - Excess or Peak Burst
+ @param rate_type - rate type
+ @param round_type - rounding type
+ @param type - policer algorithm
+ @param color_aware - 0=color-blind, 1=color-aware
+ @param conform_action_type - conform action type
+ @param conform_dscp - DSCP for conform mar-and-transmit action
+ @param exceed_action_type - exceed action type
+ @param exceed_dscp - DSCP for exceed mar-and-transmit action
+ @param violate_action_type - violate action type
+ @param violate_dscp - DSCP for violate mar-and-transmit action
+*/
+define policer_add_del
+{
+ u32 client_index;
+ u32 context;
+
+ u8 is_add;
+ u8 name[64];
+ u32 cir;
+ u32 eir;
+ u64 cb;
+ u64 eb;
+ u8 rate_type;
+ u8 round_type;
+ u8 type;
+ u8 color_aware;
+ u8 conform_action_type;
+ u8 conform_dscp;
+ u8 exceed_action_type;
+ u8 exceed_dscp;
+ u8 violate_action_type;
+ u8 violate_dscp;
+};
+
+/** \brief Add/del policer response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+ @param policer_index - for add, returned index of the new policer
+*/
+define policer_add_del_reply
+{
+ u32 context;
+ i32 retval;
+ u32 policer_index;
+};
+
+/** \brief Get list of policers
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param match_name_valid - if 0 request all policers otherwise use match_name
+ @param match_name - policer name
+*/
+define policer_dump
+{
+ u32 client_index;
+ u32 context;
+
+ u8 match_name_valid;
+ u8 match_name[64];
+};
+
+/** \brief Policer operational state response.
+ @param context - sender context, to match reply w/ request
+ @param name - policer name
+ @param cir - CIR
+ @param eir - EIR
+ @param cb - Committed Burst
+ @param eb - Excess or Peak Burst
+ @param rate_type - rate type
+ @param round_type - rounding type
+ @param type - policer algorithm
+ @param conform_action_type - conform action type
+ @param conform_dscp - DSCP for conform mar-and-transmit action
+ @param exceed_action_type - exceed action type
+ @param exceed_dscp - DSCP for exceed mar-and-transmit action
+ @param violate_action_type - violate action type
+ @param violate_dscp - DSCP for violate mar-and-transmit action
+ @param single_rate - 1 = single rate policer, 0 = two rate policer
+ @param color_aware - for hierarchical policing
+ @param scale - power-of-2 shift amount for lower rates
+ @param cir_tokens_per_period - number of tokens for each period
+ @param pir_tokens_per_period - number of tokens for each period for 2-rate policer
+ @param current_limit - current limit
+ @param current_bucket - current bucket
+ @param extended_limit - extended limit
+ @param extended_bucket - extended bucket
+ @param last_update_time - last update time
+*/
+define policer_details
+{
+ u32 context;
+
+ u8 name[64];
+ u32 cir;
+ u32 eir;
+ u64 cb;
+ u64 eb;
+ u8 rate_type;
+ u8 round_type;
+ u8 type;
+ u8 conform_action_type;
+ u8 conform_dscp;
+ u8 exceed_action_type;
+ u8 exceed_dscp;
+ u8 violate_action_type;
+ u8 violate_dscp;
+ u8 single_rate;
+ u8 color_aware;
+ u32 scale;
+ u32 cir_tokens_per_period;
+ u32 pir_tokens_per_period;
+ u32 current_limit;
+ u32 current_bucket;
+ u32 extended_limit;
+ u32 extended_bucket;
+ u64 last_update_time;
+};
+
+/** \brief Set/unset policer classify interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface to set/unset policer classify
+ @param ip4_table_index - ip4 classify table index (~0 for skip)
+ @param ip6_table_index - ip6 classify table index (~0 for skip)
+ @param l2_table_index - l2 classify table index (~0 for skip)
+ @param is_add - Set if non-zero, else unset
+ Note: User is recommeneded to use just one valid table_index per call.
+ (ip4_table_index, ip6_table_index, or l2_table_index)
+*/
+define policer_classify_set_interface
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 ip4_table_index;
+ u32 ip6_table_index;
+ u32 l2_table_index;
+ u8 is_add;
+};
+
+/** \brief Set/unset policer classify interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define policer_classify_set_interface_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Get list of policer classify interfaces and tables
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param type - classify table type
+*/
+define policer_classify_dump
+{
+ u32 client_index;
+ u32 context;
+ u8 type;
+};
+
+/** \brief Policer iclassify operational state response.
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - software interface index
+ @param table_index - classify table index
+*/
+define policer_classify_details
+{
+ u32 context;
+ u32 sw_if_index;
+ u32 table_index;
+};
+
+/** \brief Classify get table IDs request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define classify_table_ids
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Reply for classify get table IDs request
+ @param context - sender context which was passed in the request
+ @param count - number of ids returned in response
+ @param ids - array of classify table ids
+*/
+define classify_table_ids_reply
+{
+ u32 context;
+ i32 retval;
+ u32 count;
+ u32 ids[count];
+};
+
+/** \brief Classify table ids by interface index request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - index of the interface
+*/
+define classify_table_by_interface
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+};
+
+/** \brief Reply for classify table id by interface index request
+ @param context - sender context which was passed in the request
+ @param count - number of ids returned in response
+ @param sw_if_index - index of the interface
+ @param l2_table_id - l2 classify table index
+ @param ip4_table_id - ip4 classify table index
+ @param ip6_table_id - ip6 classify table index
+*/
+define classify_table_by_interface_reply
+{
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+ u32 l2_table_id;
+ u32 ip4_table_id;
+ u32 ip6_table_id;
+};
+
+/** \brief Classify table info
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param table_id - classify table index
+*/
+define classify_table_info
+{
+ u32 client_index;
+ u32 context;
+ u32 table_id;
+};
+
+/** \brief Reply for classify table info request
+ @param context - sender context which was passed in the request
+ @param count - number of ids returned in response
+ @param table_id - classify table index
+ @param nbuckets - number of buckets when adding a table
+ @param match_n_vectors - number of match vectors
+ @param skip_n_vectors - number of skip_n_vectors
+ @param active_sessions - number of sessions (active entries)
+ @param next_table_index - index of next table
+ @param miss_next_index - index of miss table
+ @param mask[] - match mask
+*/
+define classify_table_info_reply
+{
+ u32 context;
+ i32 retval;
+ u32 table_id;
+ u32 nbuckets;
+ u32 match_n_vectors;
+ u32 skip_n_vectors;
+ u32 active_sessions;
+ u32 next_table_index;
+ u32 miss_next_index;
+ u32 mask_length;
+ u8 mask[mask_length];
+};
+
+/** \brief Classify sessions dump request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param table_id - classify table index
+*/
+define classify_session_dump
+{
+ u32 client_index;
+ u32 context;
+ u32 table_id;
+};
+
+/** \brief Reply for classify table session dump request
+ @param context - sender context which was passed in the request
+ @param count - number of ids returned in response
+ @param table_id - classify table index
+ @param hit_next_index - hit_next_index of session
+ @param opaque_index - for add, opaque_index of session
+ @param advance - advance value of session
+ @param match[] - match value for session
+*/
+define classify_session_details
+{
+ u32 context;
+ i32 retval;
+ u32 table_id;
+ u32 hit_next_index;
+ i32 advance;
+ u32 opaque_index;
+ u32 match_length;
+ u8 match[match_length];
+};
+
+/** \brief Configure IPFIX exporter process request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param collector_address - address of IPFIX collector
+ @param collector_port - port of IPFIX collector
+ @param src_address - address of IPFIX exporter
+ @param vrf_id - VRF / fib table ID
+ @param path_mtu - Path MTU between exporter and collector
+ @param template_interval - number of seconds after which to resend template
+ @param udp_checksum - UDP checksum calculation enable flag
+*/
+define set_ipfix_exporter
+{
+ u32 client_index;
+ u32 context;
+ u8 collector_address[16];
+ u16 collector_port;
+ u8 src_address[16];
+ u32 vrf_id;
+ u32 path_mtu;
+ u32 template_interval;
+ u8 udp_checksum;
+};
+
+/** \brief Reply to IPFIX exporter configure request
+ @param context - sender context which was passed in the request
+*/
+define set_ipfix_exporter_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPFIX exporter dump request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define ipfix_exporter_dump
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Reply to IPFIX exporter dump request
+ @param context - sender context which was passed in the request
+ @param collector_address - address of IPFIX collector
+ @param collector_port - port of IPFIX collector
+ @param src_address - address of IPFIX exporter
+ @param fib_index - fib table index
+ @param path_mtu - Path MTU between exporter and collector
+ @param template_interval - number of seconds after which to resend template
+ @param udp_checksum - UDP checksum calculation enable flag
+*/
+define ipfix_exporter_details
+{
+ u32 context;
+ u8 collector_address[16];
+ u16 collector_port;
+ u8 src_address[16];
+ u32 vrf_id;
+ u32 path_mtu;
+ u32 template_interval;
+ u8 udp_checksum;
+};
+
+/** \brief IPFIX classify stream configure request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param domain_id - domain ID reported in IPFIX messages for classify stream
+ @param src_port - source port of UDP session for classify stream
+*/
+define set_ipfix_classify_stream {
+ u32 client_index;
+ u32 context;
+ u32 domain_id;
+ u16 src_port;
+};
+
+/** \brief IPFIX classify stream configure response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define set_ipfix_classify_stream_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPFIX classify stream dump request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define ipfix_classify_stream_dump {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Reply to IPFIX classify stream dump request
+ @param context - sender context, to match reply w/ request
+ @param domain_id - domain ID reported in IPFIX messages for classify stream
+ @param src_port - source port of UDP session for classify stream
+*/
+define ipfix_classify_stream_details {
+ u32 context;
+ u32 domain_id;
+ u16 src_port;
+};
+
+/** \brief IPFIX add or delete classifier table request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param table_id - classifier table ID
+ @param ip_version - version of IP used in the classifier table
+ @param transport_protocol - transport protocol used in the classifier table or 255 for unspecified
+*/
+define ipfix_classify_table_add_del {
+ u32 client_index;
+ u32 context;
+ u32 table_id;
+ u8 ip_version;
+ u8 transport_protocol;
+ u8 is_add;
+};
+
+/** \brief IPFIX add classifier table response
+ @param context - sender context which was passed in the request
+*/
+define ipfix_classify_table_add_del_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief IPFIX classify tables dump request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define ipfix_classify_table_dump {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Reply to IPFIX classify tables dump request
+ @param context - sender context, to match reply w/ request
+ @param table_id - classifier table ID
+ @param ip_version - version of IP used in the classifier table
+ @param transport_protocol - transport protocol used in the classifier table or 255 for unspecified
+*/
+define ipfix_classify_table_details {
+ u32 context;
+ u32 table_id;
+ u8 ip_version;
+ u8 transport_protocol;
+};
+
+/** \brief Set/unset flow classify interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface to set/unset flow classify
+ @param ip4_table_index - ip4 classify table index (~0 for skip)
+ @param ip6_table_index - ip6 classify table index (~0 for skip)
+ @param l2_table_index - l2 classify table index (~0 for skip)
+ @param is_add - Set if non-zero, else unset
+ Note: User is recommeneded to use just one valid table_index per call.
+ (ip4_table_index, ip6_table_index, or l2_table_index)
+*/
+define flow_classify_set_interface {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 ip4_table_index;
+ u32 ip6_table_index;
+ u8 is_add;
+};
+
+/** \brief Set/unset flow classify interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define flow_classify_set_interface_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Get list of flow classify interfaces and tables
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param type - classify table type
+*/
+define flow_classify_dump {
+ u32 client_index;
+ u32 context;
+ u8 type;
+};
+
+/** \brief Flow classify operational state response.
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - software interface index
+ @param table_index - classify table index
+*/
+define flow_classify_details {
+ u32 context;
+ u32 sw_if_index;
+ u32 table_index;
+};
+
+/** \brief Query relative index via node names
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param node_name - name of node to find relative index from
+ @param next_name - next node from node_name to find relative index of
+*/
+define get_next_index
+{
+ u32 client_index;
+ u32 context;
+ u8 node_name[64];
+ u8 next_name[64];
+};
+
+/** \brief Reply for get next node index
+ @param context - sender context which was passed in the request
+ @param retval - return value
+ @param next_index - index of the next_node
+*/
+define get_next_index_reply
+{
+ u32 context;
+ i32 retval;
+ u32 next_index;
+};
+
+/** \brief PacketGenerator create interface request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param interface_id - interface index
+*/
+define pg_create_interface
+{
+ u32 client_index;
+ u32 context;
+ u32 interface_id;
+};
+
+/** \brief PacketGenerator create interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define pg_create_interface_reply
+{
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief PacketGenerator capture packets on given interface request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param interface_id - pg interface index
+ @param is_enabled - 1 if enabling streams, 0 if disabling
+ @param count - number of packets to be captured
+ @param pcap_file - pacp file name to store captured packets
+*/
+define pg_capture
+{
+ u32 client_index;
+ u32 context;
+ u32 interface_id;
+ u8 is_enabled;
+ u32 count;
+ u32 pcap_name_length;
+ u8 pcap_file_name[pcap_name_length];
+};
+
+/** \brief PacketGenerator capture packets response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define pg_capture_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Enable / disable packet generator request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_enabled - 1 if enabling streams, 0 if disabling
+ @param stream - stream name to be enable/disabled, if not specified handle all streams
+*/
+define pg_enable_disable
+{
+ u32 client_index;
+ u32 context;
+ u8 is_enabled;
+ u32 stream_name_length;
+ u8 stream_name[stream_name_length];
+};
+
+/** \brief Reply for enable / disable packet generator
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define pg_enable_disable_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Configure IP source and L4 port-range check
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_ip6 - 1 if source address type is IPv6
+ @param is_add - 1 if add, 0 if delete
+ @param mask_length - mask length for address entry
+ @param address - array of address bytes
+ @param number_of_ranges - length of low_port and high_port arrays (must match)
+ @param low_ports[32] - up to 32 low end of port range entries (must have corresponding high_ports entry)
+ @param high_ports[32] - up to 32 high end of port range entries (must have corresponding low_ports entry)
+ @param vrf_id - fib table/vrf id to associate the source and port-range check with
+ @note To specify a single port set low_port and high_port entry the same
+*/
+define ip_source_and_port_range_check_add_del
+{
+ u32 client_index;
+ u32 context;
+ u8 is_ipv6;
+ u8 is_add;
+ u8 mask_length;
+ u8 address[16];
+ u8 number_of_ranges;
+ u16 low_ports[32];
+ u16 high_ports[32];
+ u32 vrf_id;
+};
+
+/** \brief Configure IP source and L4 port-range check reply
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+*/
+define ip_source_and_port_range_check_add_del_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Set interface source and L4 port-range request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param interface_id - interface index
+ @param tcp_vrf_id - VRF associated with source and TCP port-range check
+ @param udp_vrf_id - VRF associated with source and TCP port-range check
+*/
+define ip_source_and_port_range_check_interface_add_del
+{
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u32 sw_if_index;
+ u32 tcp_in_vrf_id;
+ u32 tcp_out_vrf_id;
+ u32 udp_in_vrf_id;
+ u32 udp_out_vrf_id;
+};
+
+/** \brief Set interface source and L4 port-range response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define ip_source_and_port_range_check_interface_add_del_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Delete sub interface request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - sw index of the interface that was created by create_subif
+*/
+define delete_subif {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+};
+
+/** \brief Delete sub interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define delete_subif_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief DPDK interface HQoS pipe profile set request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - the interface
+ @param subport - subport ID
+ @param pipe - pipe ID within its subport
+ @param profile - pipe profile ID
+*/
+define sw_interface_set_dpdk_hqos_pipe {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 subport;
+ u32 pipe;
+ u32 profile;
+};
+
+/** \brief DPDK interface HQoS pipe profile set reply
+ @param context - sender context, to match reply w/ request
+ @param retval - request return code
+*/
+define sw_interface_set_dpdk_hqos_pipe_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief DPDK interface HQoS subport parameters set request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - the interface
+ @param subport - subport ID
+ @param tb_rate - subport token bucket rate (measured in bytes/second)
+ @param tb_size - subport token bucket size (measured in credits)
+ @param tc_rate - subport traffic class 0 .. 3 rates (measured in bytes/second)
+ @param tc_period - enforcement period for rates (measured in milliseconds)
+*/
+define sw_interface_set_dpdk_hqos_subport {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 subport;
+ u32 tb_rate;
+ u32 tb_size;
+ u32 tc_rate[4];
+ u32 tc_period;
+};
+
+/** \brief DPDK interface HQoS subport parameters set reply
+ @param context - sender context, to match reply w/ request
+ @param retval - request return code
+*/
+define sw_interface_set_dpdk_hqos_subport_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief DPDK interface HQoS tctbl entry set request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - the interface
+ @param entry - entry index ID
+ @param tc - traffic class (0 .. 3)
+ @param queue - traffic class queue (0 .. 3)
+*/
+define sw_interface_set_dpdk_hqos_tctbl {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 entry;
+ u32 tc;
+ u32 queue;
+};
+
+/** \brief DPDK interface HQoS tctbl entry set reply
+ @param context - sender context, to match reply w/ request
+ @param retval - request return code
+*/
+define sw_interface_set_dpdk_hqos_tctbl_reply {
+ u32 context;
+ i32 retval;
+};
+
+/** \brief L2 interface pbb tag rewrite configure request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface the operation is applied to
+ @param vtr_op - Choose from l2_vtr_op_t enum values
+ @param inner_tag - needed for translate_qinq vtr op only
+ @param outer_tag - needed for translate_qinq vtr op only
+ @param b_dmac - B-tag remote mac address, needed for any push or translate_qinq vtr op
+ @param b_smac - B-tag local mac address, needed for any push or translate qinq vtr op
+ @param b_vlanid - B-tag vlanid, needed for any push or translate qinq vtr op
+ @param i_sid - I-tag service id, needed for any push or translate qinq vtr op
+*/
+define l2_interface_pbb_tag_rewrite
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 vtr_op;
+ u16 outer_tag;
+ u8 b_dmac[6];
+ u8 b_smac[6];
+ u16 b_vlanid;
+ u32 i_sid;
+};
+
+/** \brief L2 interface pbb tag rewrite response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+*/
+define l2_interface_pbb_tag_rewrite_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Punt traffic to the host
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - add punt if non-zero, else delete
+ @param ipv - L3 protocol 4 - IPv4, 6 - IPv6, ~0 - All
+ @param l4_protocol - L4 protocol to be punted, only UDP (0x11) is supported
+ @param l4_port - TCP/UDP port to be punted
+*/
+define punt {
+ u32 client_index;
+ u32 context;
+ u8 is_add;
+ u8 ipv;
+ u8 l4_protocol;
+ u16 l4_port;
+};
+
+/** \brief Reply to the punt request
+ @param context - sender context which was passed in the request
+ @param retval - return code of punt request
+*/
+define punt_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/** \brief Feature path enable/disable request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - the interface
+ @param enable - 1 = on, 0 = off
+*/
+define feature_enable_disable {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 enable;
+ u8 arc_name[64];
+ u8 feature_name[64];
+};
+
+/** \brief Reply to the eature path enable/disable request
+ @param context - sender context which was passed in the request
+ @param retval - return code for the request
+*/
+define feature_enable_disable_reply
+{
+ u32 context;
+ i32 retval;
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/api/vpe_all_api_h.h b/src/vpp/api/vpe_all_api_h.h
new file mode 100644
index 00000000..397cd807
--- /dev/null
+++ b/src/vpp/api/vpe_all_api_h.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Add to the bottom of the #include list, or elves will steal your
+ * keyboard in the middle of the night!
+ */
+
+/* Include the (first) vlib-api API definition layer */
+#include <vlibmemory/vl_memory_api_h.h>
+
+/* Include the (second) vnet API definition layer */
+#define included_from_layer_3
+#include <vnet/vnet_all_api_h.h>
+#undef included_from_layer_3
+
+/* Include the current layer (third) vpp API definition layer */
+#include <vpp/api/vpe.api.h>
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/api/vpe_msg_enum.h b/src/vpp/api/vpe_msg_enum.h
new file mode 100644
index 00000000..4fcc1c8c
--- /dev/null
+++ b/src/vpp/api/vpe_msg_enum.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_vpe_msg_enum_h
+#define included_vpe_msg_enum_h
+
+#include <vppinfra/byte_order.h>
+
+#define vl_msg_id(n,h) n,
+typedef enum
+{
+ VL_ILLEGAL_MESSAGE_ID = 0,
+#include <vpp/api/vpe_all_api_h.h>
+ VL_MSG_FIRST_AVAILABLE,
+} vl_msg_id_t;
+#undef vl_msg_id
+
+#endif /* included_vpe_msg_enum_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/api/vpp_get_metrics.c b/src/vpp/api/vpp_get_metrics.c
new file mode 100644
index 00000000..3474133d
--- /dev/null
+++ b/src/vpp/api/vpp_get_metrics.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <pwd.h>
+#include <grp.h>
+#include <netinet/in.h>
+#include <signal.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <time.h>
+#include <fcntl.h>
+#include <string.h>
+#include <vppinfra/clib.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/bitmap.h>
+#include <vppinfra/fifo.h>
+#include <vppinfra/time.h>
+#include <vppinfra/mheap.h>
+#include <vppinfra/heap.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/format.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/api_errno.h>
+
+#include <svm/svmdb.h>
+
+svmdb_client_t *c;
+volatile int signal_received;
+
+static void
+unix_signal_handler (int signum, siginfo_t * si, ucontext_t * uc)
+{
+ static int once;
+
+ if (once)
+ exit (1);
+
+ once = 1;
+ signal_received = 1;
+}
+
+static void
+setup_signal_handlers (void)
+{
+ uword i;
+ struct sigaction sa;
+
+ for (i = 1; i < 32; i++)
+ {
+ memset (&sa, 0, sizeof (sa));
+ sa.sa_sigaction = (void *) unix_signal_handler;
+ sa.sa_flags = SA_SIGINFO;
+
+ switch (i)
+ {
+ /* these signals take the default action */
+ case SIGABRT:
+ case SIGKILL:
+ case SIGSTOP:
+ case SIGUSR1:
+ case SIGUSR2:
+ continue;
+
+ /* ignore SIGPIPE, SIGCHLD */
+ case SIGPIPE:
+ case SIGCHLD:
+ sa.sa_sigaction = (void *) SIG_IGN;
+ break;
+
+ /* catch and handle all other signals */
+ default:
+ break;
+ }
+
+ if (sigaction (i, &sa, 0) < 0)
+ return clib_unix_warning (0, "sigaction %U", format_signal, i);
+ }
+}
+
+int
+main (int argc, char **argv)
+{
+ unformat_input_t input;
+ char *chroot_path = 0;
+ u8 *chroot_path_u8;
+ int interval = 0;
+ f64 *vector_ratep, *rx_ratep, *sig_error_ratep;
+ pid_t *vpp_pidp;
+ svmdb_map_args_t _ma, *ma = &_ma;
+ int uid, gid, rv;
+ struct passwd _pw, *pw;
+ struct group _grp, *grp;
+ char *s, buf[128];
+
+ unformat_init_command_line (&input, argv);
+
+ uid = geteuid ();
+ gid = getegid ();
+
+ while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (&input, "chroot %s", &chroot_path_u8))
+ {
+ chroot_path = (char *) chroot_path_u8;
+ }
+ else if (unformat (&input, "interval %d", &interval))
+ ;
+ else if (unformat (&input, "uid %d", &uid))
+ ;
+ else if (unformat (&input, "gid %d", &gid))
+ ;
+ else if (unformat (&input, "uid %s", &s))
+ {
+ /* lookup the username */
+ pw = NULL;
+ rv = getpwnam_r (s, &_pw, buf, sizeof (buf), &pw);
+ if (rv < 0)
+ {
+ fformat (stderr, "cannot fetch username %s", s);
+ exit (1);
+ }
+ if (pw == NULL)
+ {
+ fformat (stderr, "username %s does not exist", s);
+ exit (1);
+ }
+ vec_free (s);
+ uid = pw->pw_uid;
+ }
+ else if (unformat (&input, "gid %s", &s))
+ {
+ /* lookup the group name */
+ grp = NULL;
+ rv = getgrnam_r (s, &_grp, buf, sizeof (buf), &grp);
+ if (rv != 0)
+ {
+ fformat (stderr, "cannot fetch group %s", s);
+ exit (1);
+ }
+ if (grp == NULL)
+ {
+ fformat (stderr, "group %s does not exist", s);
+ exit (1);
+ }
+ vec_free (s);
+ gid = grp->gr_gid;
+ }
+ else
+ {
+ fformat (stderr,
+ "usage: vpp_get_metrics [chroot <path>] [interval <nn>]\n");
+ exit (1);
+ }
+ }
+
+ setup_signal_handlers ();
+
+ memset (ma, 0, sizeof (*ma));
+ ma->root_path = chroot_path;
+ ma->uid = uid;
+ ma->gid = gid;
+
+ c = svmdb_map (ma);
+
+ vpp_pidp =
+ svmdb_local_get_variable_reference (c, SVMDB_NAMESPACE_VEC, "vpp_pid");
+ vector_ratep =
+ svmdb_local_get_variable_reference (c, SVMDB_NAMESPACE_VEC,
+ "vpp_vector_rate");
+ rx_ratep =
+ svmdb_local_get_variable_reference (c, SVMDB_NAMESPACE_VEC,
+ "vpp_input_rate");
+ sig_error_ratep =
+ svmdb_local_get_variable_reference (c, SVMDB_NAMESPACE_VEC,
+ "vpp_sig_error_rate");
+
+ /*
+ * Make sure vpp is actually running. Otherwise, there's every
+ * chance that the database region will be wiped out by the
+ * process monitor script
+ */
+
+ if (vpp_pidp == 0 || vector_ratep == 0 || rx_ratep == 0
+ || sig_error_ratep == 0)
+ {
+ fformat (stdout, "vpp not running\n");
+ exit (1);
+ }
+
+ do
+ {
+ /*
+ * Once vpp exits, the svm db region will be recreated...
+ * Can't use kill (*vpp_pidp, 0) if running as non-root /
+ * accessing the shared-VM database via group perms.
+ */
+ if (*vpp_pidp == 0)
+ {
+ fformat (stdout, "vpp not running\n");
+ exit (1);
+ }
+ fformat (stdout,
+ "%d: vpp_vector_rate=%.2f, vpp_input_rate=%f, vpp_sig_error_rate=%f\n",
+ *vpp_pidp, *vector_ratep, *rx_ratep, *sig_error_ratep);
+
+ if (interval)
+ sleep (interval);
+ if (signal_received)
+ break;
+ }
+ while (interval);
+
+ svmdb_unmap (c);
+ exit (0);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/app/l2t.c b/src/vpp/app/l2t.c
new file mode 100644
index 00000000..45dd2807
--- /dev/null
+++ b/src/vpp/app/l2t.c
@@ -0,0 +1,557 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+
+#if DPDK == 0
+#include <vnet/devices/pci/ixge.h>
+#else
+#include <vnet/devices/dpdk/dpdk.h>
+#endif
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <app/l2t.h>
+
+l2t_main_t l2t_main;
+
+/* $$$$ unused?
+ * get_interface_ethernet_address
+ * paints the ethernet address for a given interface
+ * into the supplied destination
+ */
+void
+get_interface_ethernet_address (l2t_main_t * lm, u8 * dst, u32 sw_if_index)
+{
+ ethernet_main_t *em = ethernet_get_main (lm->vlib_main);
+ ethernet_interface_t *ei;
+ vnet_hw_interface_t *hi;
+
+ hi = vnet_get_sup_hw_interface (lm->vnet_main, sw_if_index);
+ ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
+ clib_memcpy (dst, ei->address, sizeof (ei->address));
+}
+
+/* packet trace format function */
+u8 *
+format_l2t_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ l2t_trace_t *t = va_arg (*args, l2t_trace_t *);
+
+ if (t->is_user_to_network)
+ s = format (s, "L2T: %U (client) -> %U (our) session %d",
+ format_ip6_address, &t->client_address,
+ format_ip6_address, &t->our_address, t->session_index);
+ else
+ s = format (s, "L2T: %U (our) -> %U (client) session %d)",
+ format_ip6_address, &t->our_address,
+ format_ip6_address, &t->client_address, t->session_index);
+ return s;
+}
+
+u8 *
+format_l2t_session (u8 * s, va_list * args)
+{
+ l2t_session_t *session = va_arg (*args, l2t_session_t *);
+ l2t_main_t *lm = &l2t_main;
+ u32 counter_index;
+ vlib_counter_t v;
+
+ s = format (s, "[%d] %U (our) %U (client) vlan-id %d rx_sw_if_index %d\n",
+ session - lm->sessions,
+ format_ip6_address, &session->our_address,
+ format_ip6_address, &session->client_address,
+ clib_net_to_host_u16 (session->vlan_id), session->sw_if_index);
+
+ s = format (s, " local cookie %llx remote cookie %llx\n",
+ clib_net_to_host_u64 (session->local_cookie),
+ clib_net_to_host_u64 (session->remote_cookie));
+
+ if (session->cookie_flags & L2TP_COOKIE_ROLLOVER_LOCAL)
+ {
+ s = format (s, " local rollover cookie %llx\n",
+ clib_net_to_host_u64 (session->lcl_ro_cookie));
+ }
+
+ s = format (s, " local session-id %d remote session-id %d\n",
+ clib_net_to_host_u32 (session->local_session_id),
+ clib_net_to_host_u32 (session->remote_session_id));
+
+ s = format (s, " l2 specific sublayer %s\n",
+ session->l2_sublayer_present ? "preset" : "absent");
+
+ counter_index =
+ session_index_to_counter_index (session - lm->sessions,
+ SESSION_COUNTER_USER_TO_NETWORK);
+
+ vlib_get_combined_counter (&lm->counter_main, counter_index, &v);
+ if (v.packets != 0)
+ s = format (s, " user-to-net: %llu pkts %llu bytes\n",
+ v.packets, v.bytes);
+
+ vlib_get_combined_counter (&lm->counter_main, counter_index + 1, &v);
+
+ if (v.packets != 0)
+ s = format (s, " net-to-user: %llu pkts %llu bytes\n",
+ v.packets, v.bytes);
+ return s;
+}
+
+static clib_error_t *
+show_session_summary_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ l2t_main_t *lm = &l2t_main;
+
+ vlib_cli_output (vm, "%d active sessions\n", pool_elts (lm->sessions));
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+static VLIB_CLI_COMMAND (show_session_summary_command) = {
+ .path = "show session",
+ .short_help = "show session summary",
+ .function = show_session_summary_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+show_session_detail_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ l2t_session_t *session;
+ l2t_main_t *lm = &l2t_main;
+
+ /* *INDENT-OFF* */
+ pool_foreach (session, lm->sessions,
+ ({
+ vlib_cli_output (vm, "%U", format_l2t_session, session);
+ }));
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+static VLIB_CLI_COMMAND (show_session_detail_command) = {
+ .path = "show session detail",
+ .short_help = "show session table detail",
+ .function = show_session_detail_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+test_counters_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ l2t_session_t *session;
+ l2t_main_t *lm = &l2t_main;
+ u32 session_index;
+ u32 counter_index;
+ u32 nincr = 0;
+
+ /* *INDENT-OFF* */
+ pool_foreach (session, lm->sessions,
+ ({
+ session_index = session - lm->sessions;
+ counter_index =
+ session_index_to_counter_index (session_index,
+ SESSION_COUNTER_USER_TO_NETWORK);
+ vlib_increment_combined_counter (&lm->counter_main,
+ counter_index,
+ 1/*pkt*/, 1111 /*bytes*/);
+ vlib_increment_combined_counter (&lm->counter_main,
+ counter_index+1,
+ 1/*pkt*/, 2222 /*bytes*/);
+ nincr++;
+ }));
+ /* *INDENT-ON* */
+ vlib_cli_output (vm, "Incremented %d active counters\n", nincr);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+static VLIB_CLI_COMMAND (test_counters_command) = {
+ .path = "test counters",
+ .short_help = "increment all active counters",
+ .function = test_counters_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+clear_counters_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ l2t_session_t *session;
+ l2t_main_t *lm = &l2t_main;
+ u32 session_index;
+ u32 counter_index;
+ u32 nincr = 0;
+
+ /* *INDENT-OFF* */
+ pool_foreach (session, lm->sessions,
+ ({
+ session_index = session - lm->sessions;
+ counter_index =
+ session_index_to_counter_index (session_index,
+ SESSION_COUNTER_USER_TO_NETWORK);
+ vlib_zero_combined_counter (&lm->counter_main, counter_index);
+ vlib_zero_combined_counter (&lm->counter_main, counter_index+1);
+ nincr++;
+ }));
+ /* *INDENT-ON* */
+ vlib_cli_output (vm, "Cleared %d active counters\n", nincr);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+static VLIB_CLI_COMMAND (clear_counters_command) = {
+ .path = "clear counters",
+ .short_help = "clear all active counters",
+ .function = clear_counters_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+l2tp_session_add_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ ip6_address_t client_address, our_address;
+ ip6_address_t *dst_address_copy, *src_address_copy;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u32 vlan_id;
+ u32 sw_if_index = (u32) ~ 0;
+ l2t_main_t *lm = &l2t_main;
+ l2t_session_t *s;
+ uword *p;
+ vnet_hw_interface_t *hi;
+ vnet_sw_interface_t *si;
+ u32 next_index;
+ uword vlan_and_sw_if_index_key;
+ u32 counter_index;
+ u64 local_cookie = (u64) ~ 0, remote_cookie = (u64) ~ 0;
+ u32 local_session_id = 1, remote_session_id = 1;
+ int our_address_set = 0, client_address_set = 0;
+ int l2_sublayer_present = 0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "client %U",
+ unformat_ip6_address, &client_address))
+ client_address_set = 1;
+ else if (unformat (line_input, "our %U",
+ unformat_ip6_address, &our_address))
+ our_address_set = 1;
+ else if (unformat (line_input, "vlan %d", &vlan_id))
+ ;
+ else if (unformat (line_input, "l2-interface %U",
+ unformat_vnet_sw_interface,
+ vnet_get_main (), &sw_if_index))
+ ;
+ else if (unformat (line_input, "interface %U",
+ unformat_vnet_sw_interface,
+ vnet_get_main (), &sw_if_index))
+ ;
+ else if (unformat (line_input, "local-cookie %llx", &local_cookie))
+ ;
+ else if (unformat (line_input, "remote-cookie %llx", &remote_cookie))
+ ;
+ else if (unformat (line_input, "local-session-id %d",
+ &local_session_id))
+ ;
+ else if (unformat (line_input, "remote-session-id %d",
+ &remote_session_id))
+ ;
+ else if (unformat (line_input, "l2-sublayer-present"))
+ l2_sublayer_present = 1;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (sw_if_index == (u32) ~ 0)
+ return clib_error_return (0, "l2-interface not specified");
+ if (our_address_set == 0)
+ return clib_error_return (0, "our address not specified");
+ if (client_address_set == 0)
+ return clib_error_return (0, "client address not specified");
+
+ remote_session_id = clib_host_to_net_u32 (remote_session_id);
+ local_session_id = clib_host_to_net_u32 (local_session_id);
+
+ switch (lm->lookup_type)
+ {
+ case L2T_LOOKUP_SRC_ADDRESS:
+ p = hash_get_mem (lm->session_by_src_address, &client_address);
+ if (p)
+ return clib_error_return
+ (0, "Session w/ client address %U already exists",
+ format_ip6_address, &client_address);
+ break;
+
+ case L2T_LOOKUP_DST_ADDRESS:
+ p = hash_get_mem (lm->session_by_dst_address, &our_address);
+ if (p)
+ return clib_error_return
+ (0, "Session w/ our address %U already exists",
+ format_ip6_address, &our_address);
+ break;
+
+ case L2T_LOOKUP_SESSION_ID:
+ p = hash_get (lm->session_by_session_id, local_session_id);
+ if (p)
+ return clib_error_return
+ (0,
+ "Session w/ local session id %d already exists",
+ clib_net_to_host_u32 (local_session_id));
+ break;
+
+ default:
+ ASSERT (0);
+ }
+
+ pool_get (lm->sessions, s);
+ memset (s, 0, sizeof (*s));
+ clib_memcpy (&s->our_address, &our_address, sizeof (s->our_address));
+ clib_memcpy (&s->client_address, &client_address,
+ sizeof (s->client_address));
+ s->sw_if_index = sw_if_index;
+ s->vlan_id = clib_host_to_net_u16 (vlan_id);
+ s->local_cookie = clib_host_to_net_u64 (local_cookie);
+ l2tp_session_set_remote_cookie (s, remote_cookie);
+ s->local_session_id = local_session_id;
+ s->remote_session_id = remote_session_id;
+ s->l2_sublayer_present = l2_sublayer_present;
+
+ hi = vnet_get_sup_hw_interface (lm->vnet_main, sw_if_index);
+ si = vnet_get_sup_sw_interface (lm->vnet_main, sw_if_index);
+
+ next_index = vlib_node_add_next (vm, l2t_ip6_node.index,
+ hi->output_node_index);
+ s->l2_output_next_index = next_index;
+ s->l2_output_sw_if_index = si->sw_if_index;
+
+ /* Setup hash table entries */
+ switch (lm->lookup_type)
+ {
+ case L2T_LOOKUP_SRC_ADDRESS:
+ src_address_copy = clib_mem_alloc (sizeof (*src_address_copy));
+ clib_memcpy (src_address_copy, &client_address,
+ sizeof (*src_address_copy));
+ hash_set_mem (lm->session_by_src_address, src_address_copy,
+ s - lm->sessions);
+ break;
+ case L2T_LOOKUP_DST_ADDRESS:
+ dst_address_copy = clib_mem_alloc (sizeof (*dst_address_copy));
+ clib_memcpy (dst_address_copy, &our_address,
+ sizeof (*dst_address_copy));
+ hash_set_mem (lm->session_by_dst_address, dst_address_copy,
+ s - lm->sessions);
+ break;
+ case L2T_LOOKUP_SESSION_ID:
+ hash_set (lm->session_by_session_id, local_session_id,
+ s - lm->sessions);
+ break;
+
+ default:
+ ASSERT (0);
+ }
+
+ vlan_and_sw_if_index_key = ((uword) (s->vlan_id) << 32) | sw_if_index;
+ hash_set (lm->session_by_vlan_and_rx_sw_if_index,
+ vlan_and_sw_if_index_key, s - lm->sessions);
+
+ /* validate counters */
+ counter_index =
+ session_index_to_counter_index (s - lm->sessions,
+ SESSION_COUNTER_USER_TO_NETWORK);
+ vlib_validate_counter (&lm->counter_main, counter_index);
+ vlib_validate_counter (&lm->counter_main, counter_index + 1);
+
+ /* Set promiscuous mode on the l2 interface */
+ ethernet_set_flags (lm->vnet_main, hi->hw_if_index,
+ ETHERNET_INTERFACE_FLAG_ACCEPT_ALL);
+ vnet_hw_interface_rx_redirect_to_node (lm->vnet_main, hi->hw_if_index,
+ l2t_l2_node.index);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+static VLIB_CLI_COMMAND (l2tp_session_add_command) = {
+ .path = "l2tp session add",
+ .short_help =
+ "l2tp session add client <ip6> our <ip6> vlan <id> local-cookie <hex> remote-cookie <hex> local-session <dec> remote-session <dec> l2-interface <int>",
+ .function = l2tp_session_add_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+l2tp_session_del_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ l2t_main_t *lm = &l2t_main;
+ u32 session_index;
+ l2t_session_t *s;
+ hash_pair_t *hp;
+ void *key;
+ uword vlan_and_sw_if_index_key;
+
+ if (!unformat (input, "%d", &session_index))
+ return clib_error_return (0, "missing session index: '%U'",
+ format_unformat_error, input);
+
+ if (pool_is_free_index (lm->sessions, session_index))
+ return clib_error_return (0, "session %d not in use", session_index);
+
+ s = pool_elt_at_index (lm->sessions, session_index);
+
+ switch (lm->lookup_type)
+ {
+ case L2T_LOOKUP_SRC_ADDRESS:
+ hp = hash_get_pair_mem (lm->session_by_src_address, &s->client_address);
+ if (hp)
+ {
+ key = (void *) (hp->key);
+ hash_unset_mem (lm->session_by_src_address, &s->client_address);
+ clib_mem_free (key);
+ }
+ else
+ clib_warning ("session %d src address key %U AWOL",
+ s - lm->sessions,
+ format_ip6_address, &s->client_address);
+ break;
+
+ case L2T_LOOKUP_DST_ADDRESS:
+ hp = hash_get_pair_mem (lm->session_by_dst_address, &s->our_address);
+ if (hp)
+ {
+ key = (void *) (hp->key);
+ hash_unset_mem (lm->session_by_dst_address, &s->our_address);
+ clib_mem_free (key);
+ }
+ else
+ clib_warning ("session %d dst address key %U AWOL",
+ s - lm->sessions, format_ip6_address, &s->our_address);
+ break;
+
+ case L2T_LOOKUP_SESSION_ID:
+ hash_unset (lm->session_by_session_id, s->local_session_id);
+ break;
+
+ default:
+ ASSERT (0);
+ }
+
+ vlan_and_sw_if_index_key = ((uword) (s->vlan_id) << 32) | s->sw_if_index;
+
+ hash_unset (lm->session_by_vlan_and_rx_sw_if_index,
+ vlan_and_sw_if_index_key);
+
+ pool_put (lm->sessions, s);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+static VLIB_CLI_COMMAND (l2tp_session_del_command) = {
+ .path = "l2tp session delete",
+ .short_help =
+ "l2tp session delete <session-id>",
+ .function = l2tp_session_del_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+l2tp_session_cookie_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ l2t_main_t *lm = &l2t_main;
+ u32 session_index;
+ l2t_session_t *s;
+ u64 lcl_ro_cookie = (u64) ~ 0, rem_ro_cookie = (u64) ~ 0;
+ u8 cookie_flags = 0;
+
+ if (!unformat (input, "%d", &session_index))
+ return clib_error_return (0, "missing session index: '%U'",
+ format_unformat_error, input);
+
+ if (pool_is_free_index (lm->sessions, session_index))
+ return clib_error_return (0, "session %d not in use", session_index);
+
+ s = pool_elt_at_index (lm->sessions, session_index);
+
+ if (unformat (input, "commit"))
+ {
+ if (!s->cookie_flags)
+ {
+ return clib_error_return (0, "no rollover cookie ready to commit");
+ }
+ else
+ {
+ l2tp_session_cookie_commit (s);
+ return 0;
+ }
+ }
+ if (!unformat (input, "rollover"))
+ return clib_error_return (0, "missing 'commit|rollover': '%U'",
+ format_unformat_error, input);
+ if (unformat (input, "local %llx", &lcl_ro_cookie))
+ {
+ cookie_flags |= L2TP_COOKIE_ROLLOVER_LOCAL;
+ l2tp_session_set_local_rollover_cookie (s, lcl_ro_cookie);
+ }
+ if (unformat (input, "remote %llx", &rem_ro_cookie))
+ {
+ cookie_flags |= L2TP_COOKIE_ROLLOVER_REMOTE;
+ l2tp_session_set_remote_cookie (s, rem_ro_cookie);
+ }
+ if (!cookie_flags)
+ return clib_error_return (0, "no rollover cookie specified");
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+static VLIB_CLI_COMMAND (l2tp_session_cookie_command) = {
+ .path = "l2tp session cookie",
+ .short_help =
+ "l2tp session cookie <session id> commit|rollover [local <hex>] [remote <hex>]",
+ .function = l2tp_session_cookie_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/app/l2t_l2.c b/src/vpp/app/l2t_l2.c
new file mode 100644
index 00000000..07d30d9a
--- /dev/null
+++ b/src/vpp/app/l2t_l2.c
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+
+#if DPDK == 0
+#include <vnet/devices/pci/ixgev.h>
+#include <vnet/devices/pci/ixge.h>
+#include <vnet/devices/pci/ige.h>
+#else
+#include <vnet/devices/dpdk/dpdk.h>
+#endif
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <app/l2t.h>
+
+l2t_main_t l2t_main;
+
+/* Statistics (not really errors) */
+#define foreach_l2t_l2_error \
+_(NETWORK_TO_USER, "L2 network to user (ip6) pkts")
+
+static char *l2t_l2_error_strings[] = {
+#define _(sym,string) string,
+ foreach_l2t_l2_error
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) L2T_L2_ERROR_##sym,
+ foreach_l2t_l2_error
+#undef _
+ L2T_L2_N_ERROR,
+} l2t_l2_error_t;
+
+/*
+ * Packets go to ethernet-input when they don't match a mapping
+ */
+typedef enum
+{
+ L2T_L2_NEXT_DROP,
+ L2T_L2_NEXT_ETHERNET_INPUT,
+ L2T_L2_NEXT_IP6_LOOKUP,
+ L2T_L2_N_NEXT,
+} l2t_l2_next_t;
+
+vlib_node_registration_t l2t_l2_node;
+
+#define NSTAGES 3
+
+static inline void
+stage0 (vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ vlib_buffer_t *b = vlib_get_buffer (vm, buffer_index);
+ vlib_prefetch_buffer_header (b, STORE);
+ CLIB_PREFETCH (b->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
+}
+
+static inline void
+stage1 (vlib_main_t * vm, vlib_node_runtime_t * node, u32 bi)
+{
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi);
+ l2t_main_t *lm = &l2t_main;
+ ethernet_header_t *eh;
+ ethernet_vlan_header_t *vh;
+ u32 session_index;
+ uword *p;
+ uword vlan_and_sw_if_index_key;
+
+ /* just in case, needed to test with the tun/tap device */
+ vlib_buffer_reset (b);
+
+ eh = vlib_buffer_get_current (b);
+
+ /* Not a VLAN pkt? send to ethernet-input... */
+ if (PREDICT_FALSE (eh->type != clib_host_to_net_u16 (0x8100)))
+ {
+ vnet_buffer (b)->l2t.next_index = L2T_L2_NEXT_ETHERNET_INPUT;
+ return;
+ }
+ vh = (ethernet_vlan_header_t *) (eh + 1);
+
+ /* look up session */
+ vlan_and_sw_if_index_key = ((uword) (vh->priority_cfi_and_id) << 32)
+ | vnet_buffer (b)->sw_if_index[VLIB_RX];
+
+ p = hash_get (lm->session_by_vlan_and_rx_sw_if_index,
+ vlan_and_sw_if_index_key);
+
+ if (PREDICT_FALSE (p == 0))
+ {
+ /* $$$ drop here if not for our MAC? */
+ vnet_buffer (b)->l2t.next_index = L2T_L2_NEXT_ETHERNET_INPUT;
+ return;
+ }
+ else
+ {
+ session_index = p[0];
+ }
+
+ /* Remember mapping index, prefetch the mini counter */
+ vnet_buffer (b)->l2t.next_index = L2T_L2_NEXT_IP6_LOOKUP;
+ vnet_buffer (b)->l2t.session_index = session_index;
+
+ /* Each mapping has 2 x (pkt, byte) counters, hence the shift */
+ CLIB_PREFETCH (lm->counter_main.mini + (p[0] << 1), CLIB_CACHE_LINE_BYTES,
+ STORE);
+}
+
+static inline u32
+last_stage (vlib_main_t * vm, vlib_node_runtime_t * node, u32 bi)
+{
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi);
+ l2t_main_t *lm = &l2t_main;
+ ethernet_header_t *eh = vlib_buffer_get_current (b);
+ vlib_node_t *n = vlib_get_node (vm, l2t_l2_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t *em = &vm->error_main;
+ l2tpv3_header_t *l2t; /* l2 header */
+ ethernet_vlan_header_t *vh; /* 802.1q vlan header */
+ u32 counter_index;
+ l2t_session_t *s;
+ ip6_header_t *ip6;
+ u16 payload_ethertype;
+ u8 dst_mac_address[6];
+ u8 src_mac_address[6];
+ u16 payload_length;
+ i32 backup;
+
+ /* Other-than-output pkt? We're done... */
+ if (vnet_buffer (b)->l2t.next_index != L2T_L2_NEXT_IP6_LOOKUP)
+ return vnet_buffer (b)->l2t.next_index;
+
+ vh = (ethernet_vlan_header_t *) (eh + 1);
+
+ em->counters[node_counter_base_index + L2T_L2_ERROR_NETWORK_TO_USER] += 1;
+
+ counter_index =
+ session_index_to_counter_index (vnet_buffer (b)->l2t.session_index,
+ SESSION_COUNTER_NETWORK_TO_USER);
+
+ /* per-mapping byte stats include the ethernet header */
+ vlib_increment_combined_counter (&lm->counter_main, counter_index,
+ 1 /* packet_increment */ ,
+ vlib_buffer_length_in_chain (vm, b) +
+ sizeof (ethernet_header_t));
+
+ s = pool_elt_at_index (lm->sessions, vnet_buffer (b)->l2t.session_index);
+
+ /* Save src/dst MAC addresses */
+#define _(i) dst_mac_address[i] = eh->dst_address[i];
+ _(0) _(1) _(2) _(3) _(4) _(5);
+#undef _
+#define _(i) src_mac_address[i] = eh->src_address[i];
+ _(0) _(1) _(2) _(3) _(4) _(5);
+#undef _
+
+ payload_ethertype = vh->type;
+
+ /* Splice out the 802.1q vlan tag */
+ vlib_buffer_advance (b, 4);
+ eh = vlib_buffer_get_current (b);
+
+ /* restore src/dst MAC addresses */
+#define _(i) eh->dst_address[i] = dst_mac_address[i];
+ _(0) _(1) _(2) _(3) _(4) _(5);
+#undef _
+#define _(i) eh->src_address[i] = src_mac_address[i];
+ _(0) _(1) _(2) _(3) _(4) _(5);
+#undef _
+ eh->type = payload_ethertype;
+
+ /* Paint on an l2tpv3 hdr */
+ backup = sizeof (*l2t);
+#if 0
+ /* back up 4 bytes less if no l2 sublayer */
+ backup -= s->l2_sublayer_present ? 0 : 4;
+#endif
+
+ vlib_buffer_advance (b, -backup);
+ l2t = vlib_buffer_get_current (b);
+
+ l2t->session_id = s->remote_session_id;
+ l2t->cookie = s->remote_cookie;
+
+#if 0
+ if (s->l2_sublayer_present)
+ l2t->l2_specific_sublayer = 0;
+#endif
+
+ /* Paint on an ip6 header */
+ vlib_buffer_advance (b, -(sizeof (*ip6)));
+ ip6 = vlib_buffer_get_current (b);
+
+ ip6->ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 (0x6 << 28);
+
+ /* calculate ip6 payload length */
+ payload_length = vlib_buffer_length_in_chain (vm, b);
+ payload_length -= sizeof (*ip6);
+
+ ip6->payload_length = clib_host_to_net_u16 (payload_length);
+ ip6->protocol = 0x73; /* l2tpv3 */
+ ip6->hop_limit = 0xff;
+ ip6->src_address.as_u64[0] = s->our_address.as_u64[0];
+ ip6->src_address.as_u64[1] = s->our_address.as_u64[1];
+ ip6->dst_address.as_u64[0] = s->client_address.as_u64[0];
+ ip6->dst_address.as_u64[1] = s->client_address.as_u64[1];
+
+ return L2T_L2_NEXT_IP6_LOOKUP;
+}
+
+#include <vnet/pipeline.h>
+
+static uword
+l2t_l2_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return dispatch_pipeline (vm, node, frame);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2t_l2_node) = {
+ .function = l2t_l2_node_fn,
+ .name = "l2t-l2-input",
+ .vector_size = sizeof (u32),
+ .format_trace = format_l2t_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(l2t_l2_error_strings),
+ .error_strings = l2t_l2_error_strings,
+
+ .n_next_nodes = L2T_L2_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [L2T_L2_NEXT_IP6_LOOKUP] = "ip6-lookup",
+ [L2T_L2_NEXT_ETHERNET_INPUT] = "ethernet-input",
+ [L2T_L2_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FUNCTION_MULTIARCH (l2t_l2_node, l2t_l2_node_fn);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/app/sticky_hash.c b/src/vpp/app/sticky_hash.c
new file mode 100644
index 00000000..5569c677
--- /dev/null
+++ b/src/vpp/app/sticky_hash.c
@@ -0,0 +1,581 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/l2/l2_classify.h>
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ip/ip_packet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vppinfra/error.h>
+
+typedef struct
+{
+ u32 fwd_entry_index;
+ u32 rev_entry_index;
+ /* Not strictly needed, for show command */
+ u32 fib_index;
+} sticky_hash_session_t;
+
+typedef struct
+{
+ u32 cached_next_index;
+
+ /* next index added to l2_classify */
+ u32 fwd_miss_next_index;
+
+ /* session pool */
+ sticky_hash_session_t *sessions;
+
+ /* Forward and reverse data session setup buffers */
+ u8 fdata[3 * sizeof (u32x4)];
+ u8 rdata[3 * sizeof (u32x4)];
+
+ /* convenience variables */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+ vnet_classify_main_t *vnet_classify_main;
+ l2_input_classify_main_t *l2_input_classify_main;
+}
+sticky_hash_main_t;
+
+typedef struct
+{
+ /* $$$$ fill in with per-pkt trace data */
+ u32 next_index;
+ u32 sw_if_index;
+} sticky_hash_miss_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_sticky_hash_miss_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ sticky_hash_miss_trace_t *t = va_arg (*args, sticky_hash_miss_trace_t *);
+
+ s = format (s, "STICKY_HASH_MISS: sw_if_index %d", t->sw_if_index);
+ return s;
+}
+
+typedef CLIB_PACKED (struct
+ {
+ ethernet_header_t eh; ip4_header_t ip;
+ }) classify_data_or_mask_t;
+
+sticky_hash_main_t sticky_hash_main;
+
+vlib_node_registration_t sticky_hash_miss_node;
+
+#define foreach_sticky_hash_miss_error \
+_(MISSES, "forward flow classify misses")
+
+typedef enum
+{
+#define _(sym,str) STICKY_HASH_MISS_ERROR_##sym,
+ foreach_sticky_hash_miss_error
+#undef _
+ STICKY_HASH_MISS_N_ERROR,
+} sticky_hash_miss_error_t;
+
+static char *sticky_hash_miss_error_strings[] = {
+#define _(sym,string) string,
+ foreach_sticky_hash_miss_error
+#undef _
+};
+
+/*
+ * To drop a pkt and increment one of the previous counters:
+ *
+ * set b0->error = error_node->errors[STICKY_HASH_MISS_ERROR_EXAMPLE];
+ * set next0 to a disposition index bound to "error-drop".
+ *
+ * To manually increment the specific counter STICKY_HASH_MISS_ERROR_EXAMPLE:
+ *
+ * vlib_node_t *n = vlib_get_node (vm, sticky_hash_miss.index);
+ * u32 node_counter_base_index = n->error_heap_index;
+ * vlib_error_main_t * em = &vm->error_main;
+ * em->counters[node_counter_base_index + STICKY_HASH_MISS_ERROR_EXAMPLE] += 1;
+ *
+ */
+
+typedef enum
+{
+ STICKY_HASH_MISS_NEXT_IP4_INPUT,
+ STICKY_HASH_MISS_N_NEXT,
+} sticky_hash_miss_next_t;
+
+static uword
+sticky_hash_miss_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ sticky_hash_miss_next_t next_index;
+ sticky_hash_main_t *mp = &sticky_hash_main;
+ vlib_node_t *n = vlib_get_node (vm, sticky_hash_miss_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t *em = &vm->error_main;
+ vnet_classify_main_t *cm = mp->vnet_classify_main;
+ ip4_main_t *im = &ip4_main;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ u32 sw_if_index0;
+ u32 fib_index0, ft_index0, rt_index0;
+ vnet_classify_table_3_t *ft0, *rt0;
+ vnet_classify_entry_3_t *fe0, *re0;
+ classify_data_or_mask_t *h0;
+ u8 was_found0;
+ ip4_fib_t *fib0;
+ sticky_hash_session_t *s;
+ u32 tmp;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ next0 = mp->cached_next_index;
+
+ h0 = vlib_buffer_get_current (b0);
+
+ /* Add forward and reverse entries for this flow */
+ clib_memcpy (mp->fdata, h0, sizeof (mp->fdata));
+ clib_memcpy (mp->rdata, h0, sizeof (mp->rdata));
+
+ h0 = (classify_data_or_mask_t *) (mp->rdata);
+
+ /* swap src + dst addresses to form reverse data */
+ tmp = h0->ip.src_address.as_u32;
+ h0->ip.src_address.as_u32 = h0->ip.dst_address.as_u32;
+ h0->ip.dst_address.as_u32 = tmp;
+
+ /* dig up fwd + rev tables */
+ fib_index0 = vec_elt (im->fib_index_by_sw_if_index, sw_if_index0);
+ fib0 = vec_elt_at_index (im->fibs, fib_index0);
+
+ ft_index0 = fib0->fwd_classify_table_index;
+ rt_index0 = fib0->rev_classify_table_index;
+
+ ft0 = (vnet_classify_table_3_t *)
+ pool_elt_at_index (cm->tables, ft_index0);
+ rt0 = (vnet_classify_table_3_t *)
+ pool_elt_at_index (cm->tables, rt_index0);
+
+ fe0 =
+ vnet_classify_find_or_add_entry_3 (ft0, mp->fdata, &was_found0);
+ fe0->next_index = L2_INPUT_CLASSIFY_NEXT_IP4_INPUT;
+ fe0->advance = sizeof (ethernet_header_t);
+
+ re0 = vnet_classify_find_or_add_entry_3 (rt0, mp->rdata, 0);
+ re0->next_index = L2_INPUT_CLASSIFY_NEXT_IP4_INPUT; /* $$$ FIXME */
+ re0->advance = sizeof (ethernet_header_t);
+
+ /* Note: we could get a whole vector of misses for the same sess */
+ if (was_found0 == 0)
+ {
+ pool_get (mp->sessions, s);
+
+ fe0->opaque_index = s - mp->sessions;
+ re0->opaque_index = s - mp->sessions;
+
+ s->fwd_entry_index = fe0 - ft0->entries;
+ s->rev_entry_index = re0 - rt0->entries;
+ s->fib_index = fib_index0;
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ sticky_hash_miss_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+
+ em->counters[node_counter_base_index +
+ STICKY_HASH_MISS_ERROR_MISSES] += 1;
+
+ vlib_buffer_advance (b0, sizeof (ethernet_header_t));
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (sticky_hash_miss_node) = {
+ .function = sticky_hash_miss_node_fn,
+ .name = "sticky-hash-miss",
+ .vector_size = sizeof (u32),
+ .format_trace = format_sticky_hash_miss_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(sticky_hash_miss_error_strings),
+ .error_strings = sticky_hash_miss_error_strings,
+
+ .n_next_nodes = STICKY_HASH_MISS_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [STICKY_HASH_MISS_NEXT_IP4_INPUT] = "ip4-input",
+ },
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+sticky_hash_miss_init (vlib_main_t * vm)
+{
+ sticky_hash_main_t *mp = &sticky_hash_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main ();
+ mp->vnet_classify_main = &vnet_classify_main;
+ mp->l2_input_classify_main = &l2_input_classify_main;
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (sticky_hash_miss_init);
+
+static int ip4_sticky_hash_enable_disable
+ (sticky_hash_main_t * mp,
+ u32 fwd_sw_if_index, u8 * fwd_mask,
+ u32 rev_sw_if_index, u8 * rev_mask, u32 nbuckets, int enable_disable)
+{
+ ip4_main_t *im = &ip4_main;
+ u32 fib_index;
+ ip4_fib_t *fib;
+ vnet_classify_main_t *cm = mp->vnet_classify_main;
+ l2_input_classify_main_t *l2cm = mp->l2_input_classify_main;
+ vnet_classify_table_3_t *ft, *rt;
+
+ fib_index = vec_elt (im->fib_index_by_sw_if_index, fwd_sw_if_index);
+ fib = vec_elt_at_index (im->fibs, fib_index);
+
+ if (fib->fwd_classify_table_index == ~0)
+ {
+ /* Set up forward table */
+ ft = (vnet_classify_table_3_t *)
+ vnet_classify_new_table (cm, fwd_mask, nbuckets,
+ 0 /* skip */ , 3 /* match */ );
+ fib->fwd_classify_table_index
+ = ft - (vnet_classify_table_3_t *) cm->tables;
+ mp->fwd_miss_next_index =
+ vlib_node_add_next (mp->vlib_main, l2_input_classify_node.index,
+ sticky_hash_miss_node.index);
+ ft->miss_next_index = mp->fwd_miss_next_index;
+
+ /* Set up reverse table */
+ rt = (vnet_classify_table_3_t *)
+ vnet_classify_new_table (cm, rev_mask, nbuckets,
+ 0 /* skip */ , 3 /* match */ );
+ fib->rev_classify_table_index
+ = rt - (vnet_classify_table_3_t *) cm->tables;
+ }
+
+ vec_validate
+ (l2cm->classify_table_index_by_sw_if_index[L2_INPUT_CLASSIFY_TABLE_IP4],
+ fwd_sw_if_index);
+
+ vec_validate
+ (l2cm->classify_table_index_by_sw_if_index[L2_INPUT_CLASSIFY_TABLE_IP6],
+ fwd_sw_if_index);
+
+ vec_validate
+ (l2cm->classify_table_index_by_sw_if_index[L2_INPUT_CLASSIFY_TABLE_OTHER],
+ fwd_sw_if_index);
+
+ l2cm->classify_table_index_by_sw_if_index[L2_INPUT_CLASSIFY_TABLE_IP4]
+ [fwd_sw_if_index] = fib->fwd_classify_table_index;
+
+ l2cm->classify_table_index_by_sw_if_index[L2_INPUT_CLASSIFY_TABLE_IP6]
+ [fwd_sw_if_index] = ~0;
+
+ l2cm->classify_table_index_by_sw_if_index[L2_INPUT_CLASSIFY_TABLE_OTHER]
+ [fwd_sw_if_index] = ~0;
+
+
+ vec_validate
+ (l2cm->classify_table_index_by_sw_if_index[L2_INPUT_CLASSIFY_TABLE_IP4],
+ rev_sw_if_index);
+
+ vec_validate
+ (l2cm->classify_table_index_by_sw_if_index[L2_INPUT_CLASSIFY_TABLE_IP6],
+ rev_sw_if_index);
+
+ vec_validate
+ (l2cm->classify_table_index_by_sw_if_index[L2_INPUT_CLASSIFY_TABLE_OTHER],
+ rev_sw_if_index);
+
+
+ l2cm->classify_table_index_by_sw_if_index[L2_INPUT_CLASSIFY_TABLE_IP4]
+ [rev_sw_if_index] = fib->rev_classify_table_index;
+
+ l2cm->classify_table_index_by_sw_if_index[L2_INPUT_CLASSIFY_TABLE_IP6]
+ [rev_sw_if_index] = ~0;
+
+ l2cm->classify_table_index_by_sw_if_index[L2_INPUT_CLASSIFY_TABLE_OTHER]
+ [rev_sw_if_index] = ~0;
+
+ vnet_l2_input_classify_enable_disable (fwd_sw_if_index, enable_disable);
+ vnet_l2_input_classify_enable_disable (rev_sw_if_index, enable_disable);
+ return 0;
+}
+
+static clib_error_t *
+ip4_sticky_hash_init_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ u32 fwd_sw_if_index = ~0, rev_sw_if_index = ~0;
+ int enable_disable = 1;
+ u32 nbuckets = 2;
+ int rv;
+ sticky_hash_main_t *mp = &sticky_hash_main;
+ classify_data_or_mask_t fwd_mask, rev_mask;
+ u8 *fm = 0, *rm = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat
+ (input, "fwd %U", unformat_vnet_sw_interface, mp->vnet_main,
+ &fwd_sw_if_index))
+ ;
+ if (unformat
+ (input, "rev %U", unformat_vnet_sw_interface, mp->vnet_main,
+ &rev_sw_if_index))
+ ;
+ else if (unformat (input, "nbuckets %d", &nbuckets))
+ ;
+ else if (unformat (input, "disable"))
+ enable_disable = 0;
+
+ else
+ break;
+ }
+
+ nbuckets = 1 << max_log2 (nbuckets);
+
+ if (fwd_sw_if_index == ~0)
+ return clib_error_return (0, "fwd interface not set");
+
+ if (rev_sw_if_index == ~0)
+ return clib_error_return (0, "rev interface not set");
+
+ if (!is_pow2 (nbuckets))
+ return clib_error_return (0, "nbuckets %d not a power of 2", nbuckets);
+
+ ASSERT (sizeof (fwd_mask) <= 3 * sizeof (u32x4));
+
+ /* Mask on src/dst address, depending on direction */
+ memset (&fwd_mask, 0, sizeof (fwd_mask));
+ memset (&fwd_mask.ip.src_address, 0xff, 4);
+
+ memset (&rev_mask, 0, sizeof (rev_mask));
+ memset (&rev_mask.ip.dst_address, 0xff, 4);
+
+ vec_validate (fm, 3 * sizeof (u32x4) - 1);
+ vec_validate (rm, 3 * sizeof (u32x4) - 1);
+
+ clib_memcpy (fm, &fwd_mask, sizeof (fwd_mask));
+ clib_memcpy (rm, &rev_mask, sizeof (rev_mask));
+
+ rv = ip4_sticky_hash_enable_disable (mp, fwd_sw_if_index, fm,
+ rev_sw_if_index, rm,
+ nbuckets, enable_disable);
+
+ vec_free (fm);
+ vec_free (rm);
+ switch (rv)
+ {
+ case 0:
+ return 0;
+
+ default:
+ return clib_error_return (0,
+ "ip4_sticky_hash_enable_disable returned %d",
+ rv);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (sticky_hash_init_command, static) = {
+ .path = "ip sticky classify",
+ .short_help = "ip sticky classify fwd <intfc> rev <intfc> "
+ "[nbuckets <nn>][disable]",
+ .function = ip4_sticky_hash_init_command_fn,
+};
+/* *INDENT-ON* */
+
+
+u8 *
+format_sticky_hash_session (u8 * s, va_list * args)
+{
+ sticky_hash_main_t *mp = va_arg (*args, sticky_hash_main_t *);
+ sticky_hash_session_t *session = va_arg (*args, sticky_hash_session_t *);
+ vnet_classify_table_3_t *t;
+ vnet_classify_entry_3_t *e;
+ ip4_main_t *im = &ip4_main;
+ vnet_classify_main_t *cm = mp->vnet_classify_main;
+ ip4_fib_t *fib;
+ classify_data_or_mask_t *match;
+
+ fib = vec_elt_at_index (im->fibs, session->fib_index);
+
+ t = (vnet_classify_table_3_t *)
+ pool_elt_at_index (cm->tables, fib->fwd_classify_table_index);
+ e = pool_elt_at_index (t->entries, session->fwd_entry_index);
+ match = (classify_data_or_mask_t *) (e->key);
+
+ s = format
+ (s,
+ "[%6d] fwd src %U next index %d session %d fib %d\n"
+ " hits %lld last-heard %.6f\n",
+ e - t->entries,
+ format_ip4_address, &match->ip.src_address,
+ e->next_index, e->opaque_index, fib->table_id, e->hits, e->last_heard);
+
+ if (e->opaque_index != session - mp->sessions)
+ s = format (s, "WARNING: forward session index mismatch!\n");
+
+ t = (vnet_classify_table_3_t *)
+ pool_elt_at_index (cm->tables, fib->rev_classify_table_index);
+ e = pool_elt_at_index (t->entries, session->rev_entry_index);
+ match = (classify_data_or_mask_t *) (e->key);
+
+ s = format
+ (s,
+ "[%6d] rev dst %U next index %d session %d\n"
+ " hits %lld last-heard %.6f\n",
+ e - t->entries,
+ format_ip4_address, &match->ip.dst_address,
+ e->next_index, e->opaque_index, e->hits, e->last_heard);
+
+ if (e->opaque_index != session - mp->sessions)
+ s = format (s, "WARNING: reverse session index mismatch!\n");
+ s = format (s, "---------\n");
+
+ return s;
+}
+
+static clib_error_t *
+show_ip4_sticky_hash_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ sticky_hash_main_t *mp = &sticky_hash_main;
+ sticky_hash_session_t *s;
+ int verbose = 0;
+ int dump_classifier_tables = 0;
+ ip4_fib_t *fib;
+ ip4_main_t *im4 = &ip4_main;
+ vnet_classify_main_t *cm = mp->vnet_classify_main;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "verbose"))
+ verbose = 1;
+ else if (unformat (input, "dump-tables")
+ || unformat (input, "dump-classifier-tables"))
+ dump_classifier_tables = 1;
+ else
+ break;
+ }
+
+ if (pool_elts (mp->sessions) == 0)
+ vlib_cli_output (vm, "No ip sticky hash sessions");
+
+
+ vlib_cli_output (vm, "%d active sessions\n", pool_elts (mp->sessions));
+
+ vec_foreach (fib, im4->fibs)
+ {
+ if (fib->fwd_classify_table_index != ~0)
+ vlib_cli_output (vm, "fib %d fwd table: \n%U",
+ fib->table_id,
+ format_classify_table,
+ cm,
+ pool_elt_at_index
+ (cm->tables, fib->fwd_classify_table_index),
+ dump_classifier_tables);
+ if (fib->rev_classify_table_index != ~0)
+ vlib_cli_output (vm, "fib %d rev table: \n%U",
+ fib->table_id,
+ format_classify_table,
+ cm,
+ pool_elt_at_index
+ (cm->tables, fib->rev_classify_table_index),
+ dump_classifier_tables);
+ }
+
+ if (verbose)
+ {
+ /* *INDENT-OFF* */
+ pool_foreach (s, mp->sessions,
+ ({
+ vlib_cli_output (vm, "%U", format_sticky_hash_session, mp, s);
+ }));
+ /* *INDENT-ON* */
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_sticky_hash_command, static) = {
+ .path = "show sticky classify",
+ .short_help = "Display sticky classifier tables",
+ .function = show_ip4_sticky_hash_command_fn,
+};
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/app/version.c b/src/vpp/app/version.c
new file mode 100644
index 00000000..60844c98
--- /dev/null
+++ b/src/vpp/app/version.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vlib/vlib.h>
+#include <vppinfra/cpu.h>
+#include <vpp/app/version.h>
+
+#if DPDK > 0
+#include <rte_version.h>
+#include <vnet/vnet.h>
+#include <vnet/devices/dpdk/dpdk.h>
+#endif /* DPDK */
+
+static char *vpe_version_string =
+ "vpp v" VPP_BUILD_VER
+ " built by " VPP_BUILD_USER " on " VPP_BUILD_HOST " at " VPP_BUILD_DATE;
+
+static char *vpe_compiler =
+#if defined(__INTEL_COMPILER)
+#define __(x) #x
+#define _(x) __(x)
+ "icc " _(__INTEL_COMPILER) " (" __VERSION__ ")";
+#undef _
+#undef __
+#elif defined(__clang__)
+ "Clang/LLVM " __clang_version__;
+#elif defined (__GNUC__)
+ "GCC " __VERSION__;
+#else
+ "unknown compiler";
+#endif
+
+static clib_error_t *
+show_vpe_version_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ if (unformat (input, "verbose"))
+ {
+#define _(a,b,c) vlib_cli_output (vm, "%-25s " b, a ":", c);
+ _("Version", "%s", "v" VPP_BUILD_VER);
+ _("Compiled by", "%s", VPP_BUILD_USER);
+ _("Compile host", "%s", VPP_BUILD_HOST);
+ _("Compile date", "%s", VPP_BUILD_DATE);
+ _("Compile location", "%s", VPP_BUILD_TOPDIR);
+ _("Compiler", "%s", vpe_compiler);
+ _("Current PID", "%d", getpid ());
+#if DPDK > 0
+ _("DPDK Version", "%s", rte_version ());
+ _("DPDK EAL init args", "%s", dpdk_config_main.eal_init_args_str);
+#endif
+#undef _
+ }
+ else
+ vlib_cli_output (vm, "%s", vpe_version_string);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_vpe_version_command, static) = {
+ .path = "show version",
+ .short_help = "show version information",
+ .function = show_vpe_version_command_fn,
+};
+/* *INDENT-ON* */
+
+char *
+vpe_api_get_build_directory (void)
+{
+ return VPP_BUILD_TOPDIR;
+}
+
+char *
+vpe_api_get_version (void)
+{
+ return VPP_BUILD_VER;
+}
+
+char *
+vpe_api_get_build_date (void)
+{
+ return VPP_BUILD_DATE;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/app/vpe_cli.c b/src/vpp/app/vpe_cli.c
new file mode 100644
index 00000000..a26bf71f
--- /dev/null
+++ b/src/vpp/app/vpe_cli.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/adj/adj.h>
+#include <vnet/fib/fib_table.h>
+
+typedef struct
+{
+ u8 mac_addr[6];
+} mac_addr_t;
+
+static clib_error_t *
+virtual_ip_cmd_fn_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ vnet_main_t *vnm = vnet_get_main ();
+ ip46_address_t next_hop, *next_hops;
+ fib_route_path_t *rpaths;
+ fib_prefix_t prefix;
+ u8 mac_addr[6];
+ mac_addr_t *mac_addrs = 0;
+ u32 sw_if_index;
+ u32 i;
+
+ next_hops = NULL;
+ rpaths = NULL;
+ prefix.fp_len = 32;
+ prefix.fp_proto = FIB_PROTOCOL_IP4;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ if (!unformat (line_input, "%U %U",
+ unformat_ip4_address, &prefix.fp_addr.ip4,
+ unformat_vnet_sw_interface, vnm, &sw_if_index))
+ goto barf;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "mac %U",
+ unformat_ethernet_address, &mac_addr))
+ {
+ mac_addr_t *ma;
+ vec_add2 (mac_addrs, ma, 1);
+ clib_memcpy (ma, mac_addr, sizeof (mac_addr));
+ }
+ else if (unformat (line_input, "next-hop %U",
+ unformat_ip4_address, &next_hop.ip4))
+ {
+ vec_add1 (next_hops, next_hop);
+ }
+ else
+ {
+ barf:
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ }
+ if (vec_len (mac_addrs) == 0 || vec_len (mac_addrs) != vec_len (next_hops))
+ goto barf;
+
+ /* Create / delete special interface route /32's */
+
+ for (i = 0; i < vec_len (mac_addrs); i++)
+ {
+ fib_route_path_t *rpath;
+
+ adj_nbr_add_or_lock_w_rewrite (FIB_PROTOCOL_IP4,
+ VNET_LINK_IP4,
+ &next_hops[i],
+ sw_if_index, mac_addrs[i].mac_addr);
+
+ vec_add2 (rpaths, rpath, 1);
+
+ rpath->frp_proto = FIB_PROTOCOL_IP4;
+ rpath->frp_addr = next_hops[i];
+ rpath->frp_sw_if_index = sw_if_index;
+ rpath->frp_fib_index = ~0;
+ rpath->frp_weight = 1;
+ rpath->frp_label_stack = NULL;
+ }
+
+ fib_table_entry_path_add2 (0, // default FIB table
+ &prefix,
+ FIB_SOURCE_CLI, FIB_ENTRY_FLAG_NONE, rpaths);
+
+ vec_free (mac_addrs);
+ vec_free (next_hops);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (virtual_ip_cmd_fn_command, static) = {
+ .path = "ip virtual",
+ .short_help = "ip virtual <addr> <interface> [mac <Mi>]+",
+ .function = virtual_ip_cmd_fn_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/conf/80-vpp.conf b/src/vpp/conf/80-vpp.conf
new file mode 100644
index 00000000..8fdf184c
--- /dev/null
+++ b/src/vpp/conf/80-vpp.conf
@@ -0,0 +1,15 @@
+# Number of 2MB hugepages desired
+vm.nr_hugepages=1024
+
+# Must be greater than or equal to (2 * vm.nr_hugepages).
+vm.max_map_count=3096
+
+# All groups allowed to access hugepages
+vm.hugetlb_shm_group=0
+
+# Shared Memory Max must be greator or equal to the total size of hugepages.
+# For 2MB pages, TotalHugepageSize = vm.nr_hugepages * 2 * 1024 * 1024
+# If the existing kernel.shmmax setting (cat /sys/proc/kernel/shmmax)
+# is greater than the calculated TotalHugepageSize then set this parameter
+# to current shmmax value.
+kernel.shmmax=2147483648
diff --git a/src/vpp/conf/startup.conf b/src/vpp/conf/startup.conf
new file mode 100644
index 00000000..bce00202
--- /dev/null
+++ b/src/vpp/conf/startup.conf
@@ -0,0 +1,99 @@
+
+unix {
+ nodaemon
+ log /tmp/vpp.log
+ full-coredump
+}
+
+api-trace {
+ on
+}
+
+api-segment {
+ gid vpp
+}
+
+cpu {
+ ## In the VPP there is one main thread and optionally the user can create worker(s)
+ ## The main thread and worker thread(s) can be pinned to CPU core(s) manually or automatically
+
+ ## Manual pinning of thread(s) to CPU core(s)
+
+ ## Set logical CPU core where main thread runs
+ # main-core 1
+
+ ## Set logical CPU core(s) where worker threads are running
+ # corelist-workers 2-3,18-19
+
+ ## Automatic pinning of thread(s) to CPU core(s)
+
+ ## Sets number of CPU core(s) to be skipped (1 ... N-1)
+ ## Skipped CPU core(s) are not used for pinning main thread and working thread(s).
+ ## The main thread is automatically pinned to the first available CPU core and worker(s)
+ ## are pinned to next free CPU core(s) after core assigned to main thread
+ # skip-cores 4
+
+ ## Specify a number of workers to be created
+ ## Workers are pinned to N consecutive CPU cores while skipping "skip-cores" CPU core(s)
+ ## and main thread's CPU core
+ # workers 2
+
+ ## Set scheduling policy and priority of main and worker threads
+
+ ## Scheduling policy options are: other (SCHED_OTHER), batch (SCHED_BATCH)
+ ## idle (SCHED_IDLE), fifo (SCHED_FIFO), rr (SCHED_RR)
+ # scheduler-policy fifo
+
+ ## Scheduling priority is used only for "real-time policies (fifo and rr),
+ ## and has to be in the range of priorities supported for a particular policy
+ # scheduler-priority 50
+}
+
+dpdk {
+ ## Change default settings for all intefaces
+ # dev default {
+ ## Number of receive queues, enables RSS
+ ## Default is 1
+ # num-rx-queues 3
+
+ ## Number of transmit queues, Default is equal
+ ## to number of worker threads or 1 if no workers treads
+ # num-tx-queues 3
+
+ ## Number of descriptors in transmit and receive rings
+ ## increasing or reducing number can impact performance
+ ## Default is 1024 for both rx and tx
+ # num-rx-desc 512
+ # num-tx-desc 512
+
+ ## VLAN strip offload mode for interface
+ ## Default is off
+ # vlan-strip-offload on
+ # }
+
+ ## Whitelist specific interface by specifying PCI address
+ # dev 0000:02:00.0
+
+ ## Whitelist specific interface by specifying PCI address and in
+ ## addition specify custom parameters for this interface
+ # dev 0000:02:00.1 {
+ # num-rx-queues 2
+ # }
+
+ ## Change UIO driver used by VPP, Options are: uio_pci_generic, vfio-pci
+ ## and igb_uio (default)
+ # uio-driver uio_pci_generic
+
+ ## Disable mutli-segment buffers, improves performance but
+ ## disables Jumbo MTU support
+ # no-multi-seg
+
+ ## Increase number of buffers allocated, needed only in scenarios with
+ ## large number of interfaces and worker threads. Value is per CPU socket.
+ ## Default is 32768
+ # num-mbufs 128000
+
+ ## Change hugepages allocation per-socket, needed only if there is need for
+ ## larger number of mbufs. Default is 256M on each detected CPU socket
+ # socket-mem 2048,2048
+}
diff --git a/src/vpp/conf/startup.uiopcigeneric.conf b/src/vpp/conf/startup.uiopcigeneric.conf
new file mode 100644
index 00000000..03a89dff
--- /dev/null
+++ b/src/vpp/conf/startup.uiopcigeneric.conf
@@ -0,0 +1,18 @@
+
+unix {
+ nodaemon
+ log /tmp/vpp.log
+ full-coredump
+}
+
+dpdk {
+ uio-driver uio_pci_generic
+}
+
+api-trace {
+ on
+}
+
+api-segment {
+ gid vpp
+}
diff --git a/src/vpp/oam/oam.c b/src/vpp/oam/oam.c
new file mode 100644
index 00000000..07e17b64
--- /dev/null
+++ b/src/vpp/oam/oam.c
@@ -0,0 +1,648 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vpp/oam/oam.h>
+
+#if DPDK > 0
+#include <vnet/devices/dpdk/dpdk.h>
+#endif
+
+oam_main_t oam_main;
+
+static vlib_node_registration_t oam_node;
+
+static void
+init_oam_packet_template (oam_main_t * om, oam_target_t * t)
+{
+ oam_template_t *h;
+ int i;
+ ip_csum_t sum;
+ u16 csum;
+
+ vec_validate_aligned (t->template, 0, CLIB_CACHE_LINE_BYTES);
+
+ h = t->template;
+ memset (h, 0, sizeof (*h));
+
+ h->ip4.src_address.as_u32 = t->src_address.as_u32;
+ h->ip4.dst_address.as_u32 = t->dst_address.as_u32;
+ h->ip4.ip_version_and_header_length = 0x45;
+ h->ip4.length = clib_host_to_net_u16 (sizeof (*h));
+ h->ip4.ttl = 64; /* as in linux */
+ h->ip4.protocol = IP_PROTOCOL_ICMP;
+ h->ip4.checksum = ip4_header_checksum (&h->ip4);
+
+ /*
+ * Template has seq = 0. Each time we send one of these puppies,
+ * change the sequence number and fix the execrated checksum
+ */
+ h->icmp.type = ICMP4_echo_request;
+ h->id = clib_host_to_net_u16 (t->id);
+
+ for (i = 0; i < ARRAY_LEN (h->data); i++)
+ h->data[i] = 'A' + i;
+
+ sum = ip_incremental_checksum (0, &h->icmp,
+ sizeof (h->icmp) + sizeof (h->id) +
+ sizeof (h->seq) + sizeof (h->data));
+ csum = ~ip_csum_fold (sum);
+ h->icmp.checksum = csum;
+}
+
+int
+vpe_oam_add_del_target (ip4_address_t * src_address,
+ ip4_address_t * dst_address, u32 fib_id, int is_add)
+{
+ u64 key;
+ uword *p;
+ oam_main_t *om = &oam_main;
+ oam_target_t *t;
+ ip4_main_t *im = &ip4_main;
+ u32 fib_index;
+
+ /* Make sure the FIB actually exists */
+ p = hash_get (im->fib_index_by_table_id, fib_id);
+ if (!p)
+ return VNET_API_ERROR_NO_SUCH_FIB;
+
+ fib_index = p[0];
+
+ key = ((u64) fib_index << 32) | (dst_address->as_u32);
+ p = hash_get (om->target_by_address_and_fib_id, key);
+
+ if (is_add)
+ {
+ if (p)
+ return VNET_API_ERROR_INVALID_REGISTRATION; /* already there... */
+
+ pool_get (om->targets, t);
+ memset (t, 0, sizeof (*t));
+ t->src_address.as_u32 = src_address->as_u32;
+ t->dst_address.as_u32 = dst_address->as_u32;
+ t->fib_id = fib_id;
+ t->fib_index = fib_index;
+ t->state = OAM_STATE_DEAD;
+ t->last_heard_time = vlib_time_now (om->vlib_main);
+ t->last_heard_seq = (u16) ~ om->misses_allowed;
+ t->id = (u16) random_u32 (&om->random_seed);
+ t->seq = 1;
+ init_oam_packet_template (om, t);
+ hash_set (om->target_by_address_and_fib_id, key, t - om->targets);
+ }
+ else
+ {
+ if (!p)
+ return VNET_API_ERROR_NO_SUCH_ENTRY; /* no such oam target */
+ t = pool_elt_at_index (om->targets, p[0]);
+ vec_free (t->template);
+ hash_unset (om->target_by_address_and_fib_id, key);
+ pool_put (om->targets, t);
+ }
+ return 0;
+}
+
+static clib_error_t *
+oam_add_del_target_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ int is_add = -1;
+ ip4_address_t src_address;
+ int src_set = 0;
+ ip4_address_t dst_address;
+ int dst_set = 0;
+ u32 fib_id = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "add"))
+ is_add = 1;
+ else if (unformat (input, "del"))
+ is_add = 0;
+ else if (unformat (input, "src %U", unformat_ip4_address, &src_address))
+ src_set = 1;
+ else if (unformat (input, "dst %U", unformat_ip4_address, &dst_address))
+ dst_set = 1;
+ else if (unformat (input, "fib %d", &fib_id))
+ ;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ if (is_add == -1)
+ return clib_error_return (0, "missing add / del qualifier");
+ if (src_set == 0)
+ return clib_error_return (0, "src address not set");
+ if (dst_set == 0)
+ return clib_error_return (0, "dst address not set");
+
+ (void) vpe_oam_add_del_target (&src_address, &dst_address, fib_id, is_add);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (oam_add_del_target_command, static) = {
+ .path = "oam",
+ .short_help = "oam [add|del] target <ip4-address> fib <fib-id>",
+ .function = oam_add_del_target_command_fn,
+};
+/* *INDENT-ON* */
+
+static uword
+oam_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f_arg)
+{
+ oam_main_t *om = &oam_main;
+ uword *event_data = 0;
+ oam_target_t *t;
+ oam_template_t *h0;
+ u32 bi0;
+ u16 new_seq;
+ ip_csum_t sum0;
+ vlib_frame_t *f;
+ u32 *to_next, *from;
+ u32 ip4_lookup_node_index;
+ vlib_node_t *ip4_lookup_node;
+ vlib_buffer_t *b0;
+ static u32 *buffers;
+ oam_template_copy_t *copy_src, *copy_dst;
+ void send_oam_event (oam_target_t * t);
+ u32 nalloc;
+
+ /* Enqueue pkts to ip4-lookup */
+ ip4_lookup_node = vlib_get_node_by_name (vm, (u8 *) "ip4-lookup");
+ ip4_lookup_node_index = ip4_lookup_node->index;
+
+ while (1)
+ {
+ /* Only timeout events at the moment */
+ vlib_process_wait_for_event_or_clock (vm, om->interval);
+ vec_reset_length (event_data);
+
+ if (pool_elts (om->targets) == 0)
+ continue;
+
+ if (vec_len (buffers) < pool_elts (om->targets))
+ vec_validate (buffers, pool_elts (om->targets) - 1);
+
+ nalloc = vlib_buffer_alloc (vm, buffers, pool_elts (om->targets));
+ if (nalloc < pool_elts (om->targets))
+ {
+ vlib_buffer_free (vm, buffers, nalloc);
+ continue;
+ }
+
+ f = vlib_get_frame_to_node (vm, ip4_lookup_node_index);
+ f->n_vectors = 0;
+ to_next = vlib_frame_vector_args (f);
+ from = buffers;
+
+ /* *INDENT-OFF* */
+ pool_foreach (t, om->targets,
+ ({
+ /* State transition announcement... */
+ if ((t->seq - t->last_heard_seq) >= om->misses_allowed)
+ {
+ if (t->state == OAM_STATE_ALIVE)
+ {
+ if (CLIB_DEBUG > 0)
+ clib_warning ("oam target %U now DEAD",
+ format_ip4_address, &t->dst_address);
+ t->state = OAM_STATE_DEAD;
+ send_oam_event (t);
+ }
+ }
+ else
+ {
+ if (t->state == OAM_STATE_DEAD)
+ {
+ if (CLIB_DEBUG > 0)
+ clib_warning ("oam target %U now ALIVE",
+ format_ip4_address, &t->dst_address);
+ t->state = OAM_STATE_ALIVE;
+ send_oam_event (t);
+ }
+ }
+
+ /* Send a new icmp */
+ t->seq++;
+ new_seq = clib_host_to_net_u16 (t->seq);
+
+ bi0 = from[0];
+ from++;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
+ vnet_buffer (b0)->sw_if_index [VLIB_TX] = t->fib_index;
+
+ /* Marginally faster than memcpy, probably */
+ copy_dst = (oam_template_copy_t *) b0->data;
+ copy_src = (oam_template_copy_t *) t->template;
+
+ copy_dst->v8[0] = copy_src->v8[0];
+ copy_dst->v8[1] = copy_src->v8[1];
+ copy_dst->v8[2] = copy_src->v8[2];
+ copy_dst->v8[3] = copy_src->v8[3];
+ copy_dst->v4 = copy_src->v4;
+
+ b0->current_data = 0;
+ b0->current_length = sizeof (*t->template);
+ h0 = vlib_buffer_get_current (b0);
+
+ sum0 = h0->icmp.checksum;
+ sum0 = ip_csum_update(sum0, 0 /* old seq */,
+ new_seq, oam_template_t, seq);
+ h0->seq = new_seq;
+ h0->icmp.checksum = ip_csum_fold (sum0);
+
+ to_next[0] = bi0;
+ to_next++;
+ f->n_vectors++;
+ if (f->n_vectors == VLIB_FRAME_SIZE)
+ {
+ clib_warning ("Too many OAM clients...");
+ goto out;
+ }
+ }));
+ /* *INDENT-ON* */
+
+ out:
+ vlib_put_frame_to_node (vm, ip4_lookup_node_index, f);
+ }
+ return 0; /* not so much */
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (oam_process_node,static) = {
+ .function = oam_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "vpe-oam-process",
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+oam_config (vlib_main_t * vm, unformat_input_t * input)
+{
+ oam_main_t *om = &oam_main;
+ f64 interval;
+ u32 misses_allowed;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "interval %f", &interval))
+ om->interval = interval;
+ else if (unformat (input, "misses-allowed %d", &misses_allowed))
+ om->interval = misses_allowed;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ return 0;
+}
+
+VLIB_CONFIG_FUNCTION (oam_config, "oam");
+
+static clib_error_t *
+oam_init (vlib_main_t * vm)
+{
+ oam_main_t *om = &oam_main;
+
+ om->vlib_main = vm;
+ om->vnet_main = vnet_get_main ();
+ om->interval = 2.04;
+ om->misses_allowed = 3;
+ om->random_seed = (u32) (vlib_time_now (vm) * 1e6);
+ om->target_by_address_and_fib_id = hash_create (0, sizeof (uword));
+ om->icmp_id = random_u32 (&om->random_seed);
+
+ ip4_icmp_register_type (vm, ICMP4_echo_reply, oam_node.index);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (oam_init);
+
+static u8 *
+format_oam_target (u8 * s, va_list * args)
+{
+ oam_target_t *t = va_arg (*args, oam_target_t *);
+ int verbose = va_arg (*args, int);
+
+ if (t == 0)
+ return format (s, "%=6s%=14s%=14s%=12s%=10s",
+ "Fib", "Src", "Dst", "Last Heard", "State");
+
+ s = format (s, "%=6d%=14U%=14U%=12.2f%=10s",
+ t->fib_id,
+ format_ip4_address, &t->src_address,
+ format_ip4_address, &t->dst_address,
+ t->last_heard_time,
+ (t->state == OAM_STATE_ALIVE) ? "alive" : "dead");
+ if (verbose)
+ s = format (s, " seq %d last_heard_seq %d", t->seq, t->last_heard_seq);
+
+ return s;
+}
+
+static clib_error_t *
+show_oam_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ oam_main_t *om = &oam_main;
+ oam_target_t *t;
+ int verbose = 0;
+
+ if (unformat (input, "verbose") || unformat (input, "v"))
+ verbose = 1;
+
+ /* print header */
+ vlib_cli_output (vm, "%U", format_oam_target, 0, verbose);
+
+ /* *INDENT-OFF* */
+ pool_foreach (t, om->targets,
+ ({
+ vlib_cli_output (vm, "%U", format_oam_target, t, verbose);
+ }));
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_oam_command, static) = {
+ .path = "show oam",
+ .short_help = "show oam",
+ .function = show_oam_command_fn,
+};
+/* *INDENT-ON* */
+
+typedef struct
+{
+ u32 target_pool_index;
+ ip4_address_t address;
+} oam_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_swap_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ oam_trace_t *t = va_arg (*args, oam_trace_t *);
+
+ s = format (s, "OAM: rx from address %U, target index %d",
+ format_ip4_address, &t->address, t->target_pool_index);
+ return s;
+}
+
+
+#define foreach_oam_error \
+_(PROCESSED, "vpe icmp4 oam replies processed") \
+_(DROPPED, "icmp4 replies dropped (no registration)")
+
+typedef enum
+{
+#define _(sym,str) OAM_ERROR_##sym,
+ foreach_oam_error
+#undef _
+ OAM_N_ERROR,
+} oam_error_t;
+
+static char *oam_error_strings[] = {
+#define _(sym,string) string,
+ foreach_oam_error
+#undef _
+};
+
+/*
+ * To drop a pkt and increment one of the previous counters:
+ *
+ * set b0->error = error_node->errors[OAM_ERROR_EXAMPLE];
+ * set next0 to a disposition index bound to "error-drop".
+ *
+ * To manually increment the specific counter OAM_ERROR_EXAMPLE:
+ *
+ * vlib_node_t *n = vlib_get_node (vm, oam.index);
+ * u32 node_counter_base_index = n->error_heap_index;
+ * vlib_error_main_t * em = &vm->error_main;
+ * em->counters[node_counter_base_index + OAM_ERROR_EXAMPLE] += 1;
+ *
+ */
+
+typedef enum
+{
+ OAM_NEXT_DROP,
+ OAM_NEXT_PUNT,
+ OAM_N_NEXT,
+} oam_next_t;
+
+static uword
+oam_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ oam_next_t next_index;
+ oam_main_t *om = &oam_main;
+ u32 next0 = OAM_NEXT_DROP; /* all pkts go to the hopper... */
+ u32 next1 = OAM_NEXT_DROP;
+ uword *u0, *u1;
+ oam_template_t *oam0, *oam1;
+ u32 fib_index0, fib_index1;
+ u64 key0, key1;
+ oam_target_t *t0, *t1;
+ ip4_main_t *im = &ip4_main;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u32 sw_if_index0, sw_if_index1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ oam0 = vlib_buffer_get_current (b0);
+ oam1 = vlib_buffer_get_current (b1);
+ fib_index0 = vec_elt (im->fib_index_by_sw_if_index, sw_if_index0);
+ fib_index1 = vec_elt (im->fib_index_by_sw_if_index, sw_if_index1);
+
+ key0 = ((u64) fib_index0 << 32) | oam0->ip4.src_address.as_u32;
+ u0 = hash_get (om->target_by_address_and_fib_id, key0);
+ if (u0)
+ {
+ t0 = pool_elt_at_index (om->targets, u0[0]);
+ t0->last_heard_time = vlib_time_now (vm);
+ t0->last_heard_seq = clib_net_to_host_u16 (oam0->seq);
+ b0->error = node->errors[OAM_ERROR_PROCESSED];
+ }
+ else
+ b0->error = node->errors[OAM_ERROR_DROPPED];
+
+ key1 = ((u64) fib_index1 << 32) | oam1->ip4.src_address.as_u32;
+ u1 = hash_get (om->target_by_address_and_fib_id, key1);
+ if (u1)
+ {
+ t1 = pool_elt_at_index (om->targets, u1[0]);
+ t1->last_heard_time = vlib_time_now (vm);
+ t1->last_heard_seq = clib_net_to_host_u16 (oam1->seq);
+ b1->error = node->errors[OAM_ERROR_PROCESSED];
+ }
+ else
+ b1->error = node->errors[OAM_ERROR_DROPPED];
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ oam_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->target_pool_index = u0 ? u0[0] : (u32) ~ 0;
+ t->address.as_u32 = oam0->ip4.src_address.as_u32;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ oam_trace_t *t = vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->target_pool_index = u1 ? u1[0] : (u32) ~ 0;
+ t->address.as_u32 = oam1->ip4.src_address.as_u32;
+
+ }
+ }
+
+ if (vm->os_punt_frame)
+ next0 = next1 = OAM_NEXT_PUNT;
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, sw_if_index0;
+ vlib_buffer_t *b0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ oam0 = vlib_buffer_get_current (b0);
+ fib_index0 = vec_elt (im->fib_index_by_sw_if_index, sw_if_index0);
+
+ key0 = ((u64) fib_index0 << 32) | oam0->ip4.src_address.as_u32;
+ u0 = hash_get (om->target_by_address_and_fib_id, key0);
+ if (u0)
+ {
+ t0 = pool_elt_at_index (om->targets, u0[0]);
+ t0->last_heard_time = vlib_time_now (vm);
+ t0->last_heard_seq = clib_net_to_host_u16 (oam0->seq);
+ b0->error = node->errors[OAM_ERROR_PROCESSED];
+ }
+ else
+ b0->error = node->errors[OAM_ERROR_DROPPED];
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ oam_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->target_pool_index = u0 ? u0[0] : (u32) ~ 0;
+ t->address.as_u32 = oam0->ip4.src_address.as_u32;
+ }
+
+ if (vm->os_punt_frame)
+ next0 = OAM_NEXT_PUNT;
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (oam_node,static) = {
+ .function = oam_node_fn,
+ .name = "vpe-icmp4-oam",
+ .vector_size = sizeof (u32),
+ .format_trace = format_swap_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(oam_error_strings),
+ .error_strings = oam_error_strings,
+
+ .n_next_nodes = OAM_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [OAM_NEXT_DROP] = "error-drop",
+ [OAM_NEXT_PUNT] = "error-punt",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/oam/oam.h b/src/vpp/oam/oam.h
new file mode 100644
index 00000000..f6af9788
--- /dev/null
+++ b/src/vpp/oam/oam.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_oam_h__
+#define __included_oam_h__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/interface.h>
+
+/* 36 octets, make a note of it... */
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ ip4_header_t ip4;
+ icmp46_header_t icmp;
+ u16 id;
+ u16 seq;
+ u8 data[8];
+}) oam_template_t;
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ u64 v8[4];
+ u32 v4;
+}) oam_template_copy_t;
+/* *INDENT-ON* */
+
+typedef enum
+{
+ OAM_STATE_UNKNOWN = 0,
+ OAM_STATE_ALIVE,
+ OAM_STATE_DEAD,
+} oam_state_t;
+
+typedef struct
+{
+ ip4_address_t src_address;
+ ip4_address_t dst_address;
+ u32 fib_id;
+ u32 fib_index;
+ f64 last_heard_time;
+ u16 seq;
+ u16 last_heard_seq;
+ u16 id;
+ u8 state;
+ oam_template_t *template;
+} oam_target_t;
+
+typedef struct
+{
+ /* OAM targets */
+ oam_target_t *targets;
+ uword *target_by_address_and_fib_id;
+
+ /* Config parameters */
+ f64 interval;
+ u32 misses_allowed;
+
+ /* random number seed */
+ u32 random_seed;
+ u16 icmp_id;
+
+ /* oam packet template */
+ vlib_packet_template_t packet_template;
+
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+} oam_main_t;
+
+int vpe_oam_add_del_target (ip4_address_t * src_address,
+ ip4_address_t * dst_address,
+ u32 fib_id, int is_add);
+
+#endif /* __included_oam_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c
new file mode 100644
index 00000000..391e02f6
--- /dev/null
+++ b/src/vpp/stats/stats.c
@@ -0,0 +1,987 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vpp/stats/stats.h>
+#include <signal.h>
+#include <vlib/threads.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/dpo/load_balance.h>
+
+#define STATS_DEBUG 0
+
+stats_main_t stats_main;
+
+#include <vnet/ip/ip.h>
+
+#include <vpp/api/vpe_msg_enum.h>
+
+#define f64_endian(a)
+#define f64_print(a,b)
+
+#define vl_typedefs /* define message structures */
+#include <vpp/api/vpe_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vpp/api/vpe_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vpp/api/vpe_all_api_h.h>
+#undef vl_printfun
+
+#define foreach_stats_msg \
+_(WANT_STATS, want_stats) \
+_(WANT_STATS_REPLY, want_stats_reply) \
+_(VNET_INTERFACE_COUNTERS, vnet_interface_counters) \
+_(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
+_(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters)
+
+/* These constants ensure msg sizes <= 1024, aka ring allocation */
+#define SIMPLE_COUNTER_BATCH_SIZE 126
+#define COMBINED_COUNTER_BATCH_SIZE 63
+#define IP4_FIB_COUNTER_BATCH_SIZE 48
+#define IP6_FIB_COUNTER_BATCH_SIZE 30
+
+/* 5ms */
+#define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
+/* ns/us us/ms */
+
+void
+dslock (stats_main_t * sm, int release_hint, int tag)
+{
+ u32 thread_id;
+ data_structure_lock_t *l = sm->data_structure_lock;
+
+ if (PREDICT_FALSE (l == 0))
+ return;
+
+ thread_id = os_get_cpu_number ();
+ if (l->lock && l->thread_id == thread_id)
+ {
+ l->count++;
+ return;
+ }
+
+ if (release_hint)
+ l->release_hint++;
+
+ while (__sync_lock_test_and_set (&l->lock, 1))
+ /* zzzz */ ;
+ l->tag = tag;
+ l->thread_id = thread_id;
+ l->count = 1;
+}
+
+void
+stats_dslock_with_hint (int hint, int tag)
+{
+ stats_main_t *sm = &stats_main;
+ dslock (sm, hint, tag);
+}
+
+void
+dsunlock (stats_main_t * sm)
+{
+ u32 thread_id;
+ data_structure_lock_t *l = sm->data_structure_lock;
+
+ if (PREDICT_FALSE (l == 0))
+ return;
+
+ thread_id = os_get_cpu_number ();
+ ASSERT (l->lock && l->thread_id == thread_id);
+ l->count--;
+ if (l->count == 0)
+ {
+ l->tag = -l->tag;
+ l->release_hint = 0;
+ CLIB_MEMORY_BARRIER ();
+ l->lock = 0;
+ }
+}
+
+void
+stats_dsunlock (int hint, int tag)
+{
+ stats_main_t *sm = &stats_main;
+ dsunlock (sm);
+}
+
+static void
+do_simple_interface_counters (stats_main_t * sm)
+{
+ vl_api_vnet_interface_counters_t *mp = 0;
+ vnet_interface_main_t *im = sm->interface_main;
+ api_main_t *am = sm->api_main;
+ vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
+ unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
+ vlib_simple_counter_main_t *cm;
+ u32 items_this_message = 0;
+ u64 v, *vp = 0;
+ int i;
+
+ /*
+ * Prevent interface registration from expanding / moving the vectors...
+ * That tends never to happen, so we can hold this lock for a while.
+ */
+ vnet_interface_counter_lock (im);
+
+ vec_foreach (cm, im->sw_if_counters)
+ {
+
+ for (i = 0; i < vec_len (cm->maxi); i++)
+ {
+ if (mp == 0)
+ {
+ items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
+ vec_len (cm->maxi) - i);
+
+ mp = vl_msg_api_alloc_as_if_client
+ (sizeof (*mp) + items_this_message * sizeof (v));
+ mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COUNTERS);
+ mp->vnet_counter_type = cm - im->sw_if_counters;
+ mp->is_combined = 0;
+ mp->first_sw_if_index = htonl (i);
+ mp->count = 0;
+ vp = (u64 *) mp->data;
+ }
+ v = vlib_get_simple_counter (cm, i);
+ clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
+ vp++;
+ mp->count++;
+ if (mp->count == items_this_message)
+ {
+ mp->count = htonl (items_this_message);
+ /* Send to the main thread... */
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+ mp = 0;
+ }
+ }
+ ASSERT (mp == 0);
+ }
+ vnet_interface_counter_unlock (im);
+}
+
+static void
+do_combined_interface_counters (stats_main_t * sm)
+{
+ vl_api_vnet_interface_counters_t *mp = 0;
+ vnet_interface_main_t *im = sm->interface_main;
+ api_main_t *am = sm->api_main;
+ vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
+ unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
+ vlib_combined_counter_main_t *cm;
+ u32 items_this_message = 0;
+ vlib_counter_t v, *vp = 0;
+ int i;
+
+ vnet_interface_counter_lock (im);
+
+ vec_foreach (cm, im->combined_sw_if_counters)
+ {
+
+ for (i = 0; i < vec_len (cm->maxi); i++)
+ {
+ if (mp == 0)
+ {
+ items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
+ vec_len (cm->maxi) - i);
+
+ mp = vl_msg_api_alloc_as_if_client
+ (sizeof (*mp) + items_this_message * sizeof (v));
+ mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COUNTERS);
+ mp->vnet_counter_type = cm - im->combined_sw_if_counters;
+ mp->is_combined = 1;
+ mp->first_sw_if_index = htonl (i);
+ mp->count = 0;
+ vp = (vlib_counter_t *) mp->data;
+ }
+ vlib_get_combined_counter (cm, i, &v);
+ clib_mem_unaligned (&vp->packets, u64)
+ = clib_host_to_net_u64 (v.packets);
+ clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
+ vp++;
+ mp->count++;
+ if (mp->count == items_this_message)
+ {
+ mp->count = htonl (items_this_message);
+ /* Send to the main thread... */
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+ mp = 0;
+ }
+ }
+ ASSERT (mp == 0);
+ }
+ vnet_interface_counter_unlock (im);
+}
+
+/* from .../vnet/vnet/ip/lookup.c. Yuck */
+typedef CLIB_PACKED (struct
+ {
+ ip4_address_t address;
+u32 address_length: 6;
+u32 index: 26;
+ }) ip4_route_t;
+
+static void
+ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
+{
+ struct timespec _req, *req = &_req;
+ struct timespec _rem, *rem = &_rem;
+
+ req->tv_sec = sec;
+ req->tv_nsec = nsec;
+ while (1)
+ {
+ if (nanosleep (req, rem) == 0)
+ break;
+ *req = *rem;
+ if (errno == EINTR)
+ continue;
+ clib_unix_warning ("nanosleep");
+ break;
+ }
+}
+
+static void
+do_ip4_fibs (stats_main_t * sm)
+{
+ ip4_main_t *im4 = &ip4_main;
+ api_main_t *am = sm->api_main;
+ vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
+ unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
+ static ip4_route_t *routes;
+ ip4_route_t *r;
+ fib_table_t *fib;
+ ip_lookup_main_t *lm = &im4->lookup_main;
+ static uword *results;
+ vl_api_vnet_ip4_fib_counters_t *mp = 0;
+ u32 items_this_message;
+ vl_api_ip4_fib_counter_t *ctrp = 0;
+ u32 start_at_fib_index = 0;
+ int i;
+
+again:
+ /* *INDENT-OFF* */
+ pool_foreach (fib, im4->fibs,
+ ({
+ /* We may have bailed out due to control-plane activity */
+ while ((fib - im4->fibs) < start_at_fib_index)
+ continue;
+
+ if (mp == 0)
+ {
+ items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
+ mp = vl_msg_api_alloc_as_if_client
+ (sizeof (*mp) +
+ items_this_message * sizeof (vl_api_ip4_fib_counter_t));
+ mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
+ mp->count = 0;
+ mp->vrf_id = ntohl (fib->ft_table_id);
+ ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
+ }
+ else
+ {
+ /* happens if the last FIB was empty... */
+ ASSERT (mp->count == 0);
+ mp->vrf_id = ntohl (fib->ft_table_id);
+ }
+
+ dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+
+ vec_reset_length (routes);
+ vec_reset_length (results);
+
+ for (i = 0; i < ARRAY_LEN (fib->v4.fib_entry_by_dst_address); i++)
+ {
+ uword *hash = fib->v4.fib_entry_by_dst_address[i];
+ hash_pair_t *p;
+ ip4_route_t x;
+
+ x.address_length = i;
+
+ hash_foreach_pair (p, hash,
+ ({
+ x.address.data_u32 = p->key;
+ if (lm->fib_result_n_words > 1)
+ {
+ x.index = vec_len (results);
+ vec_add (results, p->value, lm->fib_result_n_words);
+ }
+ else
+ x.index = p->value[0];
+
+ vec_add1 (routes, x);
+ if (sm->data_structure_lock->release_hint)
+ {
+ start_at_fib_index = fib - im4->fibs;
+ dsunlock (sm);
+ ip46_fib_stats_delay (sm, 0 /* sec */,
+ STATS_RELEASE_DELAY_NS);
+ mp->count = 0;
+ ctrp = (vl_api_ip4_fib_counter_t *)mp->c;
+ goto again;
+ }
+ }));
+ }
+
+ vec_foreach (r, routes)
+ {
+ vlib_counter_t c;
+
+ vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
+ r->index, &c);
+ /*
+ * If it has actually
+ * seen at least one packet, send it.
+ */
+ if (c.packets > 0)
+ {
+
+ /* already in net byte order */
+ ctrp->address = r->address.as_u32;
+ ctrp->address_length = r->address_length;
+ ctrp->packets = clib_host_to_net_u64 (c.packets);
+ ctrp->bytes = clib_host_to_net_u64 (c.bytes);
+ mp->count++;
+ ctrp++;
+
+ if (mp->count == items_this_message)
+ {
+ mp->count = htonl (items_this_message);
+ /*
+ * If the main thread's input queue is stuffed,
+ * drop the data structure lock (which the main thread
+ * may want), and take a pause.
+ */
+ unix_shared_memory_queue_lock (q);
+ if (unix_shared_memory_queue_is_full (q))
+ {
+ dsunlock (sm);
+ vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
+ unix_shared_memory_queue_unlock (q);
+ mp = 0;
+ ip46_fib_stats_delay (sm, 0 /* sec */ ,
+ STATS_RELEASE_DELAY_NS);
+ goto again;
+ }
+ vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
+ unix_shared_memory_queue_unlock (q);
+
+ items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
+ mp = vl_msg_api_alloc_as_if_client
+ (sizeof (*mp) +
+ items_this_message * sizeof (vl_api_ip4_fib_counter_t));
+ mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
+ mp->count = 0;
+ mp->vrf_id = ntohl (fib->ft_table_id);
+ ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
+ }
+ } /* for each (mp or single) adj */
+ if (sm->data_structure_lock->release_hint)
+ {
+ start_at_fib_index = fib - im4->fibs;
+ dsunlock (sm);
+ ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
+ mp->count = 0;
+ ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
+ goto again;
+ }
+ } /* vec_foreach (routes) */
+
+ dsunlock (sm);
+
+ /* Flush any data from this fib */
+ if (mp->count)
+ {
+ mp->count = htonl (mp->count);
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+ mp = 0;
+ }
+ }));
+ /* *INDENT-ON* */
+
+ /* If e.g. the last FIB had no reportable routes, free the buffer */
+ if (mp)
+ vl_msg_api_free (mp);
+}
+
+typedef struct
+{
+ ip6_address_t address;
+ u32 address_length;
+ u32 index;
+} ip6_route_t;
+
+typedef struct
+{
+ u32 fib_index;
+ ip6_route_t **routep;
+ stats_main_t *sm;
+} add_routes_in_fib_arg_t;
+
+static void
+add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
+{
+ add_routes_in_fib_arg_t *ap = arg;
+ stats_main_t *sm = ap->sm;
+
+ if (sm->data_structure_lock->release_hint)
+ clib_longjmp (&sm->jmp_buf, 1);
+
+ if (kvp->key[2] >> 32 == ap->fib_index)
+ {
+ ip6_address_t *addr;
+ ip6_route_t *r;
+ addr = (ip6_address_t *) kvp;
+ vec_add2 (*ap->routep, r, 1);
+ r->address = addr[0];
+ r->address_length = kvp->key[2] & 0xFF;
+ r->index = kvp->value;
+ }
+}
+
+static void
+do_ip6_fibs (stats_main_t * sm)
+{
+ ip6_main_t *im6 = &ip6_main;
+ api_main_t *am = sm->api_main;
+ vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
+ unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
+ static ip6_route_t *routes;
+ ip6_route_t *r;
+ fib_table_t *fib;
+ static uword *results;
+ vl_api_vnet_ip6_fib_counters_t *mp = 0;
+ u32 items_this_message;
+ vl_api_ip6_fib_counter_t *ctrp = 0;
+ u32 start_at_fib_index = 0;
+ BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
+ add_routes_in_fib_arg_t _a, *a = &_a;
+
+again:
+ /* *INDENT-OFF* */
+ pool_foreach (fib, im6->fibs,
+ ({
+ /* We may have bailed out due to control-plane activity */
+ while ((fib - im6->fibs) < start_at_fib_index)
+ continue;
+
+ if (mp == 0)
+ {
+ items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
+ mp = vl_msg_api_alloc_as_if_client
+ (sizeof (*mp) +
+ items_this_message * sizeof (vl_api_ip6_fib_counter_t));
+ mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
+ mp->count = 0;
+ mp->vrf_id = ntohl (fib->ft_table_id);
+ ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
+ }
+
+ dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+
+ vec_reset_length (routes);
+ vec_reset_length (results);
+
+ a->fib_index = fib - im6->fibs;
+ a->routep = &routes;
+ a->sm = sm;
+
+ if (clib_setjmp (&sm->jmp_buf, 0) == 0)
+ {
+ start_at_fib_index = fib - im6->fibs;
+ BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
+ }
+ else
+ {
+ dsunlock (sm);
+ ip46_fib_stats_delay (sm, 0 /* sec */ ,
+ STATS_RELEASE_DELAY_NS);
+ mp->count = 0;
+ ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
+ goto again;
+ }
+
+ vec_foreach (r, routes)
+ {
+ vlib_counter_t c;
+
+ vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
+ r->index, &c);
+ /*
+ * If it has actually
+ * seen at least one packet, send it.
+ */
+ if (c.packets > 0)
+ {
+ /* already in net byte order */
+ ctrp->address[0] = r->address.as_u64[0];
+ ctrp->address[1] = r->address.as_u64[1];
+ ctrp->address_length = (u8) r->address_length;
+ ctrp->packets = clib_host_to_net_u64 (c.packets);
+ ctrp->bytes = clib_host_to_net_u64 (c.bytes);
+ mp->count++;
+ ctrp++;
+
+ if (mp->count == items_this_message)
+ {
+ mp->count = htonl (items_this_message);
+ /*
+ * If the main thread's input queue is stuffed,
+ * drop the data structure lock (which the main thread
+ * may want), and take a pause.
+ */
+ unix_shared_memory_queue_lock (q);
+ if (unix_shared_memory_queue_is_full (q))
+ {
+ dsunlock (sm);
+ vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
+ unix_shared_memory_queue_unlock (q);
+ mp = 0;
+ ip46_fib_stats_delay (sm, 0 /* sec */ ,
+ STATS_RELEASE_DELAY_NS);
+ goto again;
+ }
+ vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
+ unix_shared_memory_queue_unlock (q);
+
+ items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
+ mp = vl_msg_api_alloc_as_if_client
+ (sizeof (*mp) +
+ items_this_message * sizeof (vl_api_ip6_fib_counter_t));
+ mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
+ mp->count = 0;
+ mp->vrf_id = ntohl (fib->ft_table_id);
+ ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
+ }
+ }
+
+ if (sm->data_structure_lock->release_hint)
+ {
+ start_at_fib_index = fib - im6->fibs;
+ dsunlock (sm);
+ ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
+ mp->count = 0;
+ ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
+ goto again;
+ }
+ } /* vec_foreach (routes) */
+
+ dsunlock (sm);
+
+ /* Flush any data from this fib */
+ if (mp->count)
+ {
+ mp->count = htonl (mp->count);
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+ mp = 0;
+ }
+ }));
+ /* *INDENT-ON* */
+
+ /* If e.g. the last FIB had no reportable routes, free the buffer */
+ if (mp)
+ vl_msg_api_free (mp);
+}
+
+static void
+stats_thread_fn (void *arg)
+{
+ stats_main_t *sm = &stats_main;
+ vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+
+ /* stats thread wants no signals. */
+ {
+ sigset_t s;
+ sigfillset (&s);
+ pthread_sigmask (SIG_SETMASK, &s, 0);
+ }
+
+ if (vec_len (tm->thread_prefix))
+ vlib_set_thread_name ((char *)
+ format (0, "%v_stats%c", tm->thread_prefix, '\0'));
+
+ clib_mem_set_heap (w->thread_mheap);
+
+ while (1)
+ {
+ /* 10 second poll interval */
+ ip46_fib_stats_delay (sm, 10 /* secs */ , 0 /* nsec */ );
+
+ if (!(sm->enable_poller))
+ continue;
+ do_simple_interface_counters (sm);
+ do_combined_interface_counters (sm);
+ do_ip4_fibs (sm);
+ do_ip6_fibs (sm);
+ }
+}
+
+static void
+vl_api_vnet_interface_counters_t_handler (vl_api_vnet_interface_counters_t *
+ mp)
+{
+ vpe_client_registration_t *reg;
+ stats_main_t *sm = &stats_main;
+ unix_shared_memory_queue_t *q, *q_prev = NULL;
+ vl_api_vnet_interface_counters_t *mp_copy = NULL;
+ u32 mp_size;
+
+#if STATS_DEBUG > 0
+ char *counter_name;
+ u32 count, sw_if_index;
+ int i;
+#endif
+
+ mp_size = sizeof (*mp) + (ntohl (mp->count) *
+ (mp->is_combined ? sizeof (vlib_counter_t) :
+ sizeof (u64)));
+
+ /* *INDENT-OFF* */
+ pool_foreach(reg, sm->stats_registrations,
+ ({
+ q = vl_api_client_index_to_input_queue (reg->client_index);
+ if (q)
+ {
+ if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ {
+ mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
+ clib_memcpy(mp_copy, mp, mp_size);
+ vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
+ mp = mp_copy;
+ }
+ q_prev = q;
+ }
+ }));
+ /* *INDENT-ON* */
+
+#if STATS_DEBUG > 0
+ count = ntohl (mp->count);
+ sw_if_index = ntohl (mp->first_sw_if_index);
+ if (mp->is_combined == 0)
+ {
+ u64 *vp, v;
+ vp = (u64 *) mp->data;
+
+ switch (mp->vnet_counter_type)
+ {
+ case VNET_INTERFACE_COUNTER_DROP:
+ counter_name = "drop";
+ break;
+ case VNET_INTERFACE_COUNTER_PUNT:
+ counter_name = "punt";
+ break;
+ case VNET_INTERFACE_COUNTER_IP4:
+ counter_name = "ip4";
+ break;
+ case VNET_INTERFACE_COUNTER_IP6:
+ counter_name = "ip6";
+ break;
+ case VNET_INTERFACE_COUNTER_RX_NO_BUF:
+ counter_name = "rx-no-buff";
+ break;
+ case VNET_INTERFACE_COUNTER_RX_MISS:
+ , counter_name = "rx-miss";
+ break;
+ case VNET_INTERFACE_COUNTER_RX_ERROR:
+ , counter_name = "rx-error (fifo-full)";
+ break;
+ case VNET_INTERFACE_COUNTER_TX_ERROR:
+ , counter_name = "tx-error (fifo-full)";
+ break;
+ default:
+ counter_name = "bogus";
+ break;
+ }
+ for (i = 0; i < count; i++)
+ {
+ v = clib_mem_unaligned (vp, u64);
+ v = clib_net_to_host_u64 (v);
+ vp++;
+ fformat (stdout, "%U.%s %lld\n", format_vnet_sw_if_index_name,
+ sm->vnet_main, sw_if_index, counter_name, v);
+ sw_if_index++;
+ }
+ }
+ else
+ {
+ vlib_counter_t *vp;
+ u64 packets, bytes;
+ vp = (vlib_counter_t *) mp->data;
+
+ switch (mp->vnet_counter_type)
+ {
+ case VNET_INTERFACE_COUNTER_RX:
+ counter_name = "rx";
+ break;
+ case VNET_INTERFACE_COUNTER_TX:
+ counter_name = "tx";
+ break;
+ default:
+ counter_name = "bogus";
+ break;
+ }
+ for (i = 0; i < count; i++)
+ {
+ packets = clib_mem_unaligned (&vp->packets, u64);
+ packets = clib_net_to_host_u64 (packets);
+ bytes = clib_mem_unaligned (&vp->bytes, u64);
+ bytes = clib_net_to_host_u64 (bytes);
+ vp++;
+ fformat (stdout, "%U.%s.packets %lld\n",
+ format_vnet_sw_if_index_name,
+ sm->vnet_main, sw_if_index, counter_name, packets);
+ fformat (stdout, "%U.%s.bytes %lld\n",
+ format_vnet_sw_if_index_name,
+ sm->vnet_main, sw_if_index, counter_name, bytes);
+ sw_if_index++;
+ }
+ }
+#endif
+ if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ {
+ vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ }
+ else
+ {
+ vl_msg_api_free (mp);
+ }
+}
+
+static void
+vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
+{
+ vpe_client_registration_t *reg;
+ stats_main_t *sm = &stats_main;
+ unix_shared_memory_queue_t *q, *q_prev = NULL;
+ vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
+ u32 mp_size;
+
+ mp_size = sizeof (*mp_copy) +
+ ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
+
+ /* *INDENT-OFF* */
+ pool_foreach(reg, sm->stats_registrations,
+ ({
+ q = vl_api_client_index_to_input_queue (reg->client_index);
+ if (q)
+ {
+ if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ {
+ mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
+ clib_memcpy(mp_copy, mp, mp_size);
+ vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
+ mp = mp_copy;
+ }
+ q_prev = q;
+ }
+ }));
+ /* *INDENT-ON* */
+ if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ {
+ vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ }
+ else
+ {
+ vl_msg_api_free (mp);
+ }
+}
+
+static void
+vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
+{
+ vpe_client_registration_t *reg;
+ stats_main_t *sm = &stats_main;
+ unix_shared_memory_queue_t *q, *q_prev = NULL;
+ vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
+ u32 mp_size;
+
+ mp_size = sizeof (*mp_copy) +
+ ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
+
+ /* *INDENT-OFF* */
+ pool_foreach(reg, sm->stats_registrations,
+ ({
+ q = vl_api_client_index_to_input_queue (reg->client_index);
+ if (q)
+ {
+ if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ {
+ mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
+ clib_memcpy(mp_copy, mp, mp_size);
+ vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
+ mp = mp_copy;
+ }
+ q_prev = q;
+ }
+ }));
+ /* *INDENT-ON* */
+ if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ {
+ vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ }
+ else
+ {
+ vl_msg_api_free (mp);
+ }
+}
+
+static void
+vl_api_want_stats_reply_t_handler (vl_api_want_stats_reply_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void
+vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
+{
+ stats_main_t *sm = &stats_main;
+ vpe_client_registration_t *rp;
+ vl_api_want_stats_reply_t *rmp;
+ uword *p;
+ i32 retval = 0;
+ unix_shared_memory_queue_t *q;
+
+ p = hash_get (sm->stats_registration_hash, mp->client_index);
+ if (p)
+ {
+ if (mp->enable_disable)
+ {
+ clib_warning ("pid %d: already enabled...", mp->pid);
+ retval = -2;
+ goto reply;
+ }
+ else
+ {
+ rp = pool_elt_at_index (sm->stats_registrations, p[0]);
+ pool_put (sm->stats_registrations, rp);
+ hash_unset (sm->stats_registration_hash, mp->client_index);
+ goto reply;
+ }
+ }
+ if (mp->enable_disable == 0)
+ {
+ clib_warning ("pid %d: already disabled...", mp->pid);
+ retval = -3;
+ goto reply;
+ }
+ pool_get (sm->stats_registrations, rp);
+ rp->client_index = mp->client_index;
+ rp->client_pid = mp->pid;
+ hash_set (sm->stats_registration_hash, rp->client_index,
+ rp - sm->stats_registrations);
+
+reply:
+ if (pool_elts (sm->stats_registrations))
+ sm->enable_poller = 1;
+ else
+ sm->enable_poller = 0;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+
+ if (!q)
+ return;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
+ rmp->context = mp->context;
+ rmp->retval = retval;
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+int
+stats_memclnt_delete_callback (u32 client_index)
+{
+ vpe_client_registration_t *rp;
+ stats_main_t *sm = &stats_main;
+ uword *p;
+
+ p = hash_get (sm->stats_registration_hash, client_index);
+ if (p)
+ {
+ rp = pool_elt_at_index (sm->stats_registrations, p[0]);
+ pool_put (sm->stats_registrations, rp);
+ hash_unset (sm->stats_registration_hash, client_index);
+ }
+
+ return 0;
+}
+
+#define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
+#define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
+#define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
+#define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
+
+static clib_error_t *
+stats_init (vlib_main_t * vm)
+{
+ stats_main_t *sm = &stats_main;
+ api_main_t *am = &api_main;
+ void *vlib_worker_thread_bootstrap_fn (void *arg);
+
+ sm->vlib_main = vm;
+ sm->vnet_main = vnet_get_main ();
+ sm->interface_main = &vnet_get_main ()->interface_main;
+ sm->api_main = am;
+ sm->stats_poll_interval_in_seconds = 10;
+ sm->data_structure_lock =
+ clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
+ CLIB_CACHE_LINE_BYTES);
+ memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
+ foreach_stats_msg;
+#undef _
+
+ /* tell the msg infra not to free these messages... */
+ am->message_bounce[VL_API_VNET_INTERFACE_COUNTERS] = 1;
+ am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
+ am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (stats_init);
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
+ .name = "stats",
+ .function = stats_thread_fn,
+ .fixed_count = 1,
+ .count = 1,
+ .no_data_structure_clone = 1,
+ .use_pthreads = 1,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/stats/stats.h b/src/vpp/stats/stats.h
new file mode 100644
index 00000000..118115be
--- /dev/null
+++ b/src/vpp/stats/stats.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_stats_h__
+#define __included_stats_h__
+
+#include <time.h>
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/interface.h>
+#include <pthread.h>
+#include <vlib/threads.h>
+#include <vlib/unix/unix.h>
+#include <vlibmemory/api.h>
+#include <vlibmemory/unix_shared_memory_queue.h>
+#include <vlibapi/api_helper_macros.h>
+
+typedef struct
+{
+ volatile u32 lock;
+ volatile u32 release_hint;
+ u32 thread_id;
+ u32 count;
+ int tag;
+} data_structure_lock_t;
+
+typedef struct
+{
+ void *mheap;
+ pthread_t thread_self;
+ pthread_t thread_handle;
+
+ u32 stats_poll_interval_in_seconds;
+ u32 enable_poller;
+
+ uword *stats_registration_hash;
+ vpe_client_registration_t *stats_registrations;
+
+ /* control-plane data structure lock */
+ data_structure_lock_t *data_structure_lock;
+
+ /* bail out of FIB walk if set */
+ clib_longjmp_t jmp_buf;
+
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+ vnet_interface_main_t *interface_main;
+ api_main_t *api_main;
+} stats_main_t;
+
+stats_main_t stats_main;
+
+void dslock (stats_main_t * sm, int release_hint, int tag);
+void dsunlock (stats_main_t * sm);
+
+#endif /* __included_stats_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vpp/vnet/main.c b/src/vpp/vnet/main.c
new file mode 100644
index 00000000..e4695e1e
--- /dev/null
+++ b/src/vpp/vnet/main.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vppinfra/cpu.h>
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/plugin/plugin.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <vpp/api/vpe_msg_enum.h>
+
+#if DPDK
+#include <vnet/devices/dpdk/dpdk.h>
+
+/*
+ * Called by the dpdk driver's rte_delay_us() function.
+ * Return 0 to have the dpdk do a regular delay loop.
+ * Return 1 if to skip the delay loop because we are suspending
+ * the calling vlib process instead.
+ */
+int
+rte_delay_us_override (unsigned us)
+{
+ vlib_main_t *vm;
+
+ /* Don't bother intercepting for short delays */
+ if (us < 10)
+ return 0;
+
+ /*
+ * Only intercept if we are in a vlib process.
+ * If we are called from a vlib worker thread or the vlib main
+ * thread then do not intercept. (Must not be called from an
+ * independent pthread).
+ */
+ if (os_get_cpu_number () == 0)
+ {
+ /*
+ * We're in the vlib main thread or a vlib process. Make sure
+ * the process is running and we're not still initializing.
+ */
+ vm = vlib_get_main ();
+ if (vlib_in_process_context (vm))
+ {
+ /* Only suspend for the admin_down_process */
+ vlib_process_t *proc = vlib_get_current_process (vm);
+ if (!(proc->flags & VLIB_PROCESS_IS_RUNNING) ||
+ (proc->node_runtime.function != admin_up_down_process))
+ return 0;
+
+ f64 delay = 1e-6 * us;
+ vlib_process_suspend (vm, delay);
+ return 1;
+ }
+ }
+ return 0; // no override
+}
+
+#if RTE_VERSION >= RTE_VERSION_NUM(16, 11, 0, 0)
+static void
+rte_delay_us_override_cb (unsigned us)
+{
+ if (rte_delay_us_override (us) == 0)
+ rte_delay_us_block (us);
+}
+#endif
+#endif
+
+static void
+vpe_main_init (vlib_main_t * vm)
+{
+ if (CLIB_DEBUG > 0)
+ vlib_unix_cli_set_prompt ("DBGvpp# ");
+ else
+ vlib_unix_cli_set_prompt ("vpp# ");
+
+ /* Turn off network stack components which we don't want */
+ vlib_mark_init_function_complete (vm, srp_init);
+
+#if DPDK
+#if RTE_VERSION >= RTE_VERSION_NUM(16, 11, 0, 0)
+ /* register custom delay function */
+ rte_delay_us_callback_register (rte_delay_us_override_cb);
+#endif
+#endif
+}
+
+/*
+ * Load plugins from /usr/lib/vpp_plugins by default
+ */
+char *vlib_plugin_path = "/usr/lib/vpp_plugins";
+
+void *
+vnet_get_handoff_structure (void)
+{
+ static vnet_plugin_handoff_t _rv, *rv = &_rv;
+
+ rv->vnet_main = vnet_get_main ();
+ rv->ethernet_main = &ethernet_main;
+ return (void *) rv;
+}
+
+int
+main (int argc, char *argv[])
+{
+ int i;
+ vlib_main_t *vm = &vlib_global_main;
+ void vl_msg_api_set_first_available_msg_id (u16);
+ uword main_heap_size = (1ULL << 30);
+ u8 *sizep;
+ u32 size;
+ void vlib_set_get_handoff_structure_cb (void *cb);
+
+#if __x86_64__
+ CLIB_UNUSED (const char *msg)
+ = "ERROR: This binary requires CPU with %s extensions.\n";
+#define _(a,b) \
+ if (!clib_cpu_supports_ ## a ()) \
+ { \
+ fprintf(stderr, msg, b); \
+ exit(1); \
+ }
+
+#if __AVX2__
+ _(avx2, "AVX2")
+#endif
+#if __AVX__
+ _(avx, "AVX")
+#endif
+#if __SSE4_2__
+ _(sse42, "SSE4.2")
+#endif
+#if __SSE4_1__
+ _(sse41, "SSE4.1")
+#endif
+#if __SSSE3__
+ _(ssse3, "SSSE3")
+#endif
+#if __SSE3__
+ _(sse3, "SSE3")
+#endif
+#undef _
+#endif
+ /*
+ * Load startup config from file.
+ * usage: vpp -c /etc/vpp/startup.conf
+ */
+ if ((argc == 3) && !strncmp (argv[1], "-c", 2))
+ {
+ FILE *fp;
+ char inbuf[4096];
+ int argc_ = 1;
+ char **argv_ = NULL;
+ char *arg = NULL;
+ char *p;
+
+ fp = fopen (argv[2], "r");
+ if (fp == NULL)
+ {
+ fprintf (stderr, "open configuration file '%s' failed\n", argv[2]);
+ return 1;
+ }
+ argv_ = calloc (1, sizeof (char *));
+ if (argv_ == NULL)
+ return 1;
+ arg = strndup (argv[0], 1024);
+ if (arg == NULL)
+ return 1;
+ argv_[0] = arg;
+
+ while (1)
+ {
+ if (fgets (inbuf, 4096, fp) == 0)
+ break;
+ p = strtok (inbuf, " \t\n");
+ while (p != NULL)
+ {
+ if (*p == '#')
+ break;
+ argc_++;
+ char **tmp = realloc (argv_, argc_ * sizeof (char *));
+ if (tmp == NULL)
+ return 1;
+ argv_ = tmp;
+ arg = strndup (p, 1024);
+ if (arg == NULL)
+ return 1;
+ argv_[argc_ - 1] = arg;
+ p = strtok (NULL, " \t\n");
+ }
+ }
+
+ fclose (fp);
+
+ char **tmp = realloc (argv_, (argc_ + 1) * sizeof (char *));
+ if (tmp == NULL)
+ return 1;
+ argv_ = tmp;
+ argv_[argc_] = NULL;
+
+ argc = argc_;
+ argv = argv_;
+ }
+
+ /*
+ * Look for and parse the "heapsize" config parameter.
+ * Manual since none of the clib infra has been bootstrapped yet.
+ *
+ * Format: heapsize <nn>[mM][gG]
+ */
+
+ for (i = 1; i < (argc - 1); i++)
+ {
+ if (!strncmp (argv[i], "plugin_path", 11))
+ {
+ if (i < (argc - 1))
+ vlib_plugin_path = argv[++i];
+ }
+ else if (!strncmp (argv[i], "heapsize", 8))
+ {
+ sizep = (u8 *) argv[i + 1];
+ size = 0;
+ while (*sizep >= '0' && *sizep <= '9')
+ {
+ size *= 10;
+ size += *sizep++ - '0';
+ }
+ if (size == 0)
+ {
+ fprintf
+ (stderr,
+ "warning: heapsize parse error '%s', use default %lld\n",
+ argv[i], (long long int) main_heap_size);
+ goto defaulted;
+ }
+
+ main_heap_size = size;
+
+ if (*sizep == 'g' || *sizep == 'G')
+ main_heap_size <<= 30;
+ else if (*sizep == 'm' || *sizep == 'M')
+ main_heap_size <<= 20;
+ }
+ }
+
+defaulted:
+
+ /* Set up the plugin message ID allocator right now... */
+ vl_msg_api_set_first_available_msg_id (VL_MSG_FIRST_AVAILABLE);
+
+ /* Allocate main heap */
+ if (clib_mem_init (0, main_heap_size))
+ {
+ vm->init_functions_called = hash_create (0, /* value bytes */ 0);
+ vpe_main_init (vm);
+#if DPDK == 0
+ unix_physmem_init (vm, 0 /* fail_if_physical_memory_not_present */ );
+#endif
+ vlib_set_get_handoff_structure_cb (&vnet_get_handoff_structure);
+ return vlib_unix_main (argc, argv);
+ }
+ else
+ {
+ {
+ int rv __attribute__ ((unused)) =
+ write (2, "Main heap allocation failure!\r\n", 31);
+ }
+ return 1;
+ }
+}
+
+static clib_error_t *
+heapsize_config (vlib_main_t * vm, unformat_input_t * input)
+{
+ u32 junk;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%dm", &junk)
+ || unformat (input, "%dM", &junk)
+ || unformat (input, "%dg", &junk) || unformat (input, "%dG", &junk))
+ return 0;
+ else
+ return clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, input);
+ }
+ return 0;
+}
+
+VLIB_CONFIG_FUNCTION (heapsize_config, "heapsize");
+
+static clib_error_t *
+plugin_path_config (vlib_main_t * vm, unformat_input_t * input)
+{
+ u8 *junk;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%s", &junk))
+ {
+ vec_free (junk);
+ return 0;
+ }
+ else
+ return clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, input);
+ }
+ return 0;
+}
+
+VLIB_CONFIG_FUNCTION (plugin_path_config, "plugin_path");
+
+void vl_msg_api_post_mortem_dump (void);
+
+void
+os_panic (void)
+{
+ vl_msg_api_post_mortem_dump ();
+ abort ();
+}
+
+void vhost_user_unmap_all (void) __attribute__ ((weak));
+void
+vhost_user_unmap_all (void)
+{
+}
+
+void
+os_exit (int code)
+{
+ static int recursion_block;
+
+ if (code)
+ {
+ if (recursion_block)
+ abort ();
+
+ recursion_block = 1;
+
+ vl_msg_api_post_mortem_dump ();
+ vhost_user_unmap_all ();
+ abort ();
+ }
+ exit (code);
+}
+
+void
+vl_msg_api_barrier_sync (void)
+{
+ vlib_worker_thread_barrier_sync (vlib_get_main ());
+}
+
+void
+vl_msg_api_barrier_release (void)
+{
+ vlib_worker_thread_barrier_release (vlib_get_main ());
+}
+
+/* This application needs 1 thread stack for the stats pthread */
+u32
+vlib_app_num_thread_stacks_needed (void)
+{
+ return 1;
+}
+
+/*
+ * Depending on the configuration selected above,
+ * it may be necessary to generate stub graph nodes.
+ * It is never OK to ignore "node 'x' refers to unknown node 'y'
+ * messages!
+ */
+
+#if CLIB_DEBUG > 0
+
+static clib_error_t *
+test_crash_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ u64 *p = (u64 *) 0xdefec8ed;
+
+ *p = 0xdeadbeef;
+
+ /* Not so much... */
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (test_crash_command, static) = {
+ .path = "test crash",
+ .short_help = "crash the bus!",
+ .function = test_crash_command_fn,
+};
+/* *INDENT-ON* */
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */