summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/vat/api_format.c537
-rw-r--r--src/vnet/interface.api74
-rw-r--r--src/vpp/CMakeLists.txt9
-rw-r--r--src/vpp/api/api.c3
-rw-r--r--src/vpp/api/summary_stats_client.c302
-rw-r--r--src/vpp/api/vpe.api1
-rw-r--r--src/vpp/api/vpe_all_api_h.h3
-rw-r--r--src/vpp/stats/stats.api521
-rw-r--r--src/vpp/stats/stats_to_be_deprecated.c3186
-rw-r--r--src/vpp/stats/stats_to_be_deprecated.h175
10 files changed, 2 insertions, 4809 deletions
diff --git a/src/vat/api_format.c b/src/vat/api_format.c
index 4c43fcfaa20..fc4c38b52c8 100644
--- a/src/vat/api_format.c
+++ b/src/vat/api_format.c
@@ -2753,288 +2753,6 @@ static void vl_api_dhcp_compl_event_t_handler_json
/* JSON output not supported */
}
-static void
-set_simple_interface_counter (u8 vnet_counter_type, u32 sw_if_index,
- u32 counter)
-{
- vat_main_t *vam = &vat_main;
- static u64 default_counter = 0;
-
- vec_validate_init_empty (vam->simple_interface_counters, vnet_counter_type,
- NULL);
- vec_validate_init_empty (vam->simple_interface_counters[vnet_counter_type],
- sw_if_index, default_counter);
- vam->simple_interface_counters[vnet_counter_type][sw_if_index] = counter;
-}
-
-static void
-set_combined_interface_counter (u8 vnet_counter_type, u32 sw_if_index,
- interface_counter_t counter)
-{
- vat_main_t *vam = &vat_main;
- static interface_counter_t default_counter = { 0, };
-
- vec_validate_init_empty (vam->combined_interface_counters,
- vnet_counter_type, NULL);
- vec_validate_init_empty (vam->combined_interface_counters
- [vnet_counter_type], sw_if_index, default_counter);
- vam->combined_interface_counters[vnet_counter_type][sw_if_index] = counter;
-}
-
-static void vl_api_vnet_interface_simple_counters_t_handler
- (vl_api_vnet_interface_simple_counters_t * mp)
-{
- /* not supported */
-}
-
-static void vl_api_vnet_interface_combined_counters_t_handler
- (vl_api_vnet_interface_combined_counters_t * mp)
-{
- /* not supported */
-}
-
-static void vl_api_vnet_interface_simple_counters_t_handler_json
- (vl_api_vnet_interface_simple_counters_t * mp)
-{
- u64 *v_packets;
- u64 packets;
- u32 count;
- u32 first_sw_if_index;
- int i;
-
- count = ntohl (mp->count);
- first_sw_if_index = ntohl (mp->first_sw_if_index);
-
- v_packets = (u64 *) & mp->data;
- for (i = 0; i < count; i++)
- {
- packets = clib_net_to_host_u64 (clib_mem_unaligned (v_packets, u64));
- set_simple_interface_counter (mp->vnet_counter_type,
- first_sw_if_index + i, packets);
- v_packets++;
- }
-}
-
-static void vl_api_vnet_interface_combined_counters_t_handler_json
- (vl_api_vnet_interface_combined_counters_t * mp)
-{
- interface_counter_t counter;
- vlib_counter_t *v;
- u32 first_sw_if_index;
- int i;
- u32 count;
-
- count = ntohl (mp->count);
- first_sw_if_index = ntohl (mp->first_sw_if_index);
-
- v = (vlib_counter_t *) & mp->data;
- for (i = 0; i < count; i++)
- {
- counter.packets =
- clib_net_to_host_u64 (clib_mem_unaligned (&v->packets, u64));
- counter.bytes =
- clib_net_to_host_u64 (clib_mem_unaligned (&v->bytes, u64));
- set_combined_interface_counter (mp->vnet_counter_type,
- first_sw_if_index + i, counter);
- v++;
- }
-}
-
-static u32
-ip4_fib_counters_get_vrf_index_by_vrf_id (u32 vrf_id)
-{
- vat_main_t *vam = &vat_main;
- u32 i;
-
- for (i = 0; i < vec_len (vam->ip4_fib_counters_vrf_id_by_index); i++)
- {
- if (vam->ip4_fib_counters_vrf_id_by_index[i] == vrf_id)
- {
- return i;
- }
- }
- return ~0;
-}
-
-static u32
-ip6_fib_counters_get_vrf_index_by_vrf_id (u32 vrf_id)
-{
- vat_main_t *vam = &vat_main;
- u32 i;
-
- for (i = 0; i < vec_len (vam->ip6_fib_counters_vrf_id_by_index); i++)
- {
- if (vam->ip6_fib_counters_vrf_id_by_index[i] == vrf_id)
- {
- return i;
- }
- }
- return ~0;
-}
-
-static void vl_api_vnet_ip4_fib_counters_t_handler
- (vl_api_vnet_ip4_fib_counters_t * mp)
-{
- /* not supported */
-}
-
-static void vl_api_vnet_ip4_fib_counters_t_handler_json
- (vl_api_vnet_ip4_fib_counters_t * mp)
-{
- vat_main_t *vam = &vat_main;
- vl_api_ip4_fib_counter_t *v;
- ip4_fib_counter_t *counter;
- struct in_addr ip4;
- u32 vrf_id;
- u32 vrf_index;
- u32 count;
- int i;
-
- vrf_id = ntohl (mp->vrf_id);
- vrf_index = ip4_fib_counters_get_vrf_index_by_vrf_id (vrf_id);
- if (~0 == vrf_index)
- {
- vrf_index = vec_len (vam->ip4_fib_counters_vrf_id_by_index);
- vec_validate (vam->ip4_fib_counters_vrf_id_by_index, vrf_index);
- vam->ip4_fib_counters_vrf_id_by_index[vrf_index] = vrf_id;
- vec_validate (vam->ip4_fib_counters, vrf_index);
- vam->ip4_fib_counters[vrf_index] = NULL;
- }
-
- vec_free (vam->ip4_fib_counters[vrf_index]);
- v = (vl_api_ip4_fib_counter_t *) & mp->c;
- count = ntohl (mp->count);
- for (i = 0; i < count; i++)
- {
- vec_validate (vam->ip4_fib_counters[vrf_index], i);
- counter = &vam->ip4_fib_counters[vrf_index][i];
- clib_memcpy (&ip4, &v->address, sizeof (ip4));
- counter->address = ip4;
- counter->address_length = v->address_length;
- counter->packets = clib_net_to_host_u64 (v->packets);
- counter->bytes = clib_net_to_host_u64 (v->bytes);
- v++;
- }
-}
-
-static void vl_api_vnet_ip4_nbr_counters_t_handler
- (vl_api_vnet_ip4_nbr_counters_t * mp)
-{
- /* not supported */
-}
-
-static void vl_api_vnet_ip4_nbr_counters_t_handler_json
- (vl_api_vnet_ip4_nbr_counters_t * mp)
-{
- vat_main_t *vam = &vat_main;
- vl_api_ip4_nbr_counter_t *v;
- ip4_nbr_counter_t *counter;
- u32 sw_if_index;
- u32 count;
- int i;
-
- sw_if_index = ntohl (mp->sw_if_index);
- count = ntohl (mp->count);
- vec_validate (vam->ip4_nbr_counters, sw_if_index);
-
- if (mp->begin)
- vec_free (vam->ip4_nbr_counters[sw_if_index]);
-
- v = (vl_api_ip4_nbr_counter_t *) & mp->c;
- for (i = 0; i < count; i++)
- {
- vec_validate (vam->ip4_nbr_counters[sw_if_index], i);
- counter = &vam->ip4_nbr_counters[sw_if_index][i];
- counter->address.s_addr = v->address;
- counter->packets = clib_net_to_host_u64 (v->packets);
- counter->bytes = clib_net_to_host_u64 (v->bytes);
- counter->linkt = v->link_type;
- v++;
- }
-}
-
-static void vl_api_vnet_ip6_fib_counters_t_handler
- (vl_api_vnet_ip6_fib_counters_t * mp)
-{
- /* not supported */
-}
-
-static void vl_api_vnet_ip6_fib_counters_t_handler_json
- (vl_api_vnet_ip6_fib_counters_t * mp)
-{
- vat_main_t *vam = &vat_main;
- vl_api_ip6_fib_counter_t *v;
- ip6_fib_counter_t *counter;
- struct in6_addr ip6;
- u32 vrf_id;
- u32 vrf_index;
- u32 count;
- int i;
-
- vrf_id = ntohl (mp->vrf_id);
- vrf_index = ip6_fib_counters_get_vrf_index_by_vrf_id (vrf_id);
- if (~0 == vrf_index)
- {
- vrf_index = vec_len (vam->ip6_fib_counters_vrf_id_by_index);
- vec_validate (vam->ip6_fib_counters_vrf_id_by_index, vrf_index);
- vam->ip6_fib_counters_vrf_id_by_index[vrf_index] = vrf_id;
- vec_validate (vam->ip6_fib_counters, vrf_index);
- vam->ip6_fib_counters[vrf_index] = NULL;
- }
-
- vec_free (vam->ip6_fib_counters[vrf_index]);
- v = (vl_api_ip6_fib_counter_t *) & mp->c;
- count = ntohl (mp->count);
- for (i = 0; i < count; i++)
- {
- vec_validate (vam->ip6_fib_counters[vrf_index], i);
- counter = &vam->ip6_fib_counters[vrf_index][i];
- clib_memcpy (&ip6, &v->address, sizeof (ip6));
- counter->address = ip6;
- counter->address_length = v->address_length;
- counter->packets = clib_net_to_host_u64 (v->packets);
- counter->bytes = clib_net_to_host_u64 (v->bytes);
- v++;
- }
-}
-
-static void vl_api_vnet_ip6_nbr_counters_t_handler
- (vl_api_vnet_ip6_nbr_counters_t * mp)
-{
- /* not supported */
-}
-
-static void vl_api_vnet_ip6_nbr_counters_t_handler_json
- (vl_api_vnet_ip6_nbr_counters_t * mp)
-{
- vat_main_t *vam = &vat_main;
- vl_api_ip6_nbr_counter_t *v;
- ip6_nbr_counter_t *counter;
- struct in6_addr ip6;
- u32 sw_if_index;
- u32 count;
- int i;
-
- sw_if_index = ntohl (mp->sw_if_index);
- count = ntohl (mp->count);
- vec_validate (vam->ip6_nbr_counters, sw_if_index);
-
- if (mp->begin)
- vec_free (vam->ip6_nbr_counters[sw_if_index]);
-
- v = (vl_api_ip6_nbr_counter_t *) & mp->c;
- for (i = 0; i < count; i++)
- {
- vec_validate (vam->ip6_nbr_counters[sw_if_index], i);
- counter = &vam->ip6_nbr_counters[sw_if_index][i];
- clib_memcpy (&ip6, &v->address, sizeof (ip6));
- counter->address = ip6;
- counter->packets = clib_net_to_host_u64 (v->packets);
- counter->bytes = clib_net_to_host_u64 (v->bytes);
- v++;
- }
-}
-
static void vl_api_get_first_msg_id_reply_t_handler
(vl_api_get_first_msg_id_reply_t * mp)
{
@@ -5434,18 +5152,6 @@ static void vl_api_flow_classify_details_t_handler_json
vat_json_object_add_uint (node, "table_index", ntohl (mp->table_index));
}
-#define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
-#define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
-#define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
-#define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
-#define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
-#define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
-#define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
-#define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
-#define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
-#define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
-#define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
-#define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
#define vl_api_one_adjacencies_get_reply_t_endian vl_noop_handler
#define vl_api_one_adjacencies_get_reply_t_print vl_noop_handler
#define vl_api_one_l2_arp_bd_get_reply_t_print vl_noop_handler
@@ -5556,7 +5262,6 @@ _(ikev2_initiate_rekey_child_sa_reply) \
_(delete_loopback_reply) \
_(bd_ip_mac_add_del_reply) \
_(want_interface_events_reply) \
-_(want_stats_reply) \
_(cop_interface_enable_disable_reply) \
_(cop_whitelist_enable_disable_reply) \
_(sw_interface_clear_stats_reply) \
@@ -5815,7 +5520,6 @@ _(BD_IP_MAC_ADD_DEL_REPLY, bd_ip_mac_add_del_reply) \
_(BD_IP_MAC_DETAILS, bd_ip_mac_details) \
_(DHCP_COMPL_EVENT, dhcp_compl_event) \
_(WANT_INTERFACE_EVENTS_REPLY, want_interface_events_reply) \
-_(WANT_STATS_REPLY, want_stats_reply) \
_(GET_FIRST_MSG_ID_REPLY, get_first_msg_id_reply) \
_(COP_INTERFACE_ENABLE_DISABLE_REPLY, cop_interface_enable_disable_reply) \
_(COP_WHITELIST_ENABLE_DISABLE_REPLY, cop_whitelist_enable_disable_reply) \
@@ -5961,13 +5665,7 @@ _(OUTPUT_ACL_SET_INTERFACE_REPLY, output_acl_set_interface_reply) \
_(QOS_RECORD_ENABLE_DISABLE_REPLY, qos_record_enable_disable_reply)
#define foreach_standalone_reply_msg \
-_(SW_INTERFACE_EVENT, sw_interface_event) \
-_(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
-_(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
-_(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
-_(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
-_(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
-_(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters)
+_(SW_INTERFACE_EVENT, sw_interface_event)
typedef struct
{
@@ -6141,205 +5839,6 @@ dump_ipv6_table (vat_main_t * vam)
return dump_ip_table (vam, 1);
}
-static char *
-counter_type_to_str (u8 counter_type, u8 is_combined)
-{
- if (!is_combined)
- {
- switch (counter_type)
- {
- case VNET_INTERFACE_COUNTER_DROP:
- return "drop";
- case VNET_INTERFACE_COUNTER_PUNT:
- return "punt";
- case VNET_INTERFACE_COUNTER_IP4:
- return "ip4";
- case VNET_INTERFACE_COUNTER_IP6:
- return "ip6";
- case VNET_INTERFACE_COUNTER_RX_NO_BUF:
- return "rx-no-buf";
- case VNET_INTERFACE_COUNTER_RX_MISS:
- return "rx-miss";
- case VNET_INTERFACE_COUNTER_RX_ERROR:
- return "rx-error";
- case VNET_INTERFACE_COUNTER_TX_ERROR:
- return "tx-error";
- default:
- return "INVALID-COUNTER-TYPE";
- }
- }
- else
- {
- switch (counter_type)
- {
- case VNET_INTERFACE_COUNTER_RX:
- return "rx";
- case VNET_INTERFACE_COUNTER_TX:
- return "tx";
- default:
- return "INVALID-COUNTER-TYPE";
- }
- }
-}
-
-static int
-dump_stats_table (vat_main_t * vam)
-{
- vat_json_node_t node;
- vat_json_node_t *msg_array;
- vat_json_node_t *msg;
- vat_json_node_t *counter_array;
- vat_json_node_t *counter;
- interface_counter_t c;
- u64 packets;
- ip4_fib_counter_t *c4;
- ip6_fib_counter_t *c6;
- ip4_nbr_counter_t *n4;
- ip6_nbr_counter_t *n6;
- int i, j;
-
- if (!vam->json_output)
- {
- clib_warning ("dump_stats_table supported only in JSON format");
- return -99;
- }
-
- vat_json_init_object (&node);
-
- /* interface counters */
- msg_array = vat_json_object_add (&node, "interface_counters");
- vat_json_init_array (msg_array);
- for (i = 0; i < vec_len (vam->simple_interface_counters); i++)
- {
- msg = vat_json_array_add (msg_array);
- vat_json_init_object (msg);
- vat_json_object_add_string_copy (msg, "vnet_counter_type",
- (u8 *) counter_type_to_str (i, 0));
- vat_json_object_add_int (msg, "is_combined", 0);
- counter_array = vat_json_object_add (msg, "data");
- vat_json_init_array (counter_array);
- for (j = 0; j < vec_len (vam->simple_interface_counters[i]); j++)
- {
- packets = vam->simple_interface_counters[i][j];
- vat_json_array_add_uint (counter_array, packets);
- }
- }
- for (i = 0; i < vec_len (vam->combined_interface_counters); i++)
- {
- msg = vat_json_array_add (msg_array);
- vat_json_init_object (msg);
- vat_json_object_add_string_copy (msg, "vnet_counter_type",
- (u8 *) counter_type_to_str (i, 1));
- vat_json_object_add_int (msg, "is_combined", 1);
- counter_array = vat_json_object_add (msg, "data");
- vat_json_init_array (counter_array);
- for (j = 0; j < vec_len (vam->combined_interface_counters[i]); j++)
- {
- c = vam->combined_interface_counters[i][j];
- counter = vat_json_array_add (counter_array);
- vat_json_init_object (counter);
- vat_json_object_add_uint (counter, "packets", c.packets);
- vat_json_object_add_uint (counter, "bytes", c.bytes);
- }
- }
-
- /* ip4 fib counters */
- msg_array = vat_json_object_add (&node, "ip4_fib_counters");
- vat_json_init_array (msg_array);
- for (i = 0; i < vec_len (vam->ip4_fib_counters); i++)
- {
- msg = vat_json_array_add (msg_array);
- vat_json_init_object (msg);
- vat_json_object_add_uint (msg, "vrf_id",
- vam->ip4_fib_counters_vrf_id_by_index[i]);
- counter_array = vat_json_object_add (msg, "c");
- vat_json_init_array (counter_array);
- for (j = 0; j < vec_len (vam->ip4_fib_counters[i]); j++)
- {
- counter = vat_json_array_add (counter_array);
- vat_json_init_object (counter);
- c4 = &vam->ip4_fib_counters[i][j];
- vat_json_object_add_ip4 (counter, "address", c4->address);
- vat_json_object_add_uint (counter, "address_length",
- c4->address_length);
- vat_json_object_add_uint (counter, "packets", c4->packets);
- vat_json_object_add_uint (counter, "bytes", c4->bytes);
- }
- }
-
- /* ip6 fib counters */
- msg_array = vat_json_object_add (&node, "ip6_fib_counters");
- vat_json_init_array (msg_array);
- for (i = 0; i < vec_len (vam->ip6_fib_counters); i++)
- {
- msg = vat_json_array_add (msg_array);
- vat_json_init_object (msg);
- vat_json_object_add_uint (msg, "vrf_id",
- vam->ip6_fib_counters_vrf_id_by_index[i]);
- counter_array = vat_json_object_add (msg, "c");
- vat_json_init_array (counter_array);
- for (j = 0; j < vec_len (vam->ip6_fib_counters[i]); j++)
- {
- counter = vat_json_array_add (counter_array);
- vat_json_init_object (counter);
- c6 = &vam->ip6_fib_counters[i][j];
- vat_json_object_add_ip6 (counter, "address", c6->address);
- vat_json_object_add_uint (counter, "address_length",
- c6->address_length);
- vat_json_object_add_uint (counter, "packets", c6->packets);
- vat_json_object_add_uint (counter, "bytes", c6->bytes);
- }
- }
-
- /* ip4 nbr counters */
- msg_array = vat_json_object_add (&node, "ip4_nbr_counters");
- vat_json_init_array (msg_array);
- for (i = 0; i < vec_len (vam->ip4_nbr_counters); i++)
- {
- msg = vat_json_array_add (msg_array);
- vat_json_init_object (msg);
- vat_json_object_add_uint (msg, "sw_if_index", i);
- counter_array = vat_json_object_add (msg, "c");
- vat_json_init_array (counter_array);
- for (j = 0; j < vec_len (vam->ip4_nbr_counters[i]); j++)
- {
- counter = vat_json_array_add (counter_array);
- vat_json_init_object (counter);
- n4 = &vam->ip4_nbr_counters[i][j];
- vat_json_object_add_ip4 (counter, "address", n4->address);
- vat_json_object_add_uint (counter, "link-type", n4->linkt);
- vat_json_object_add_uint (counter, "packets", n4->packets);
- vat_json_object_add_uint (counter, "bytes", n4->bytes);
- }
- }
-
- /* ip6 nbr counters */
- msg_array = vat_json_object_add (&node, "ip6_nbr_counters");
- vat_json_init_array (msg_array);
- for (i = 0; i < vec_len (vam->ip6_nbr_counters); i++)
- {
- msg = vat_json_array_add (msg_array);
- vat_json_init_object (msg);
- vat_json_object_add_uint (msg, "sw_if_index", i);
- counter_array = vat_json_object_add (msg, "c");
- vat_json_init_array (counter_array);
- for (j = 0; j < vec_len (vam->ip6_nbr_counters[i]); j++)
- {
- counter = vat_json_array_add (counter_array);
- vat_json_init_object (counter);
- n6 = &vam->ip6_nbr_counters[i][j];
- vat_json_object_add_ip6 (counter, "address", n6->address);
- vat_json_object_add_uint (counter, "packets", n6->packets);
- vat_json_object_add_uint (counter, "bytes", n6->bytes);
- }
- }
-
- vat_json_print (vam->ofp, &node);
- vat_json_free (&node);
-
- return 0;
-}
-
/*
* Pass CLI buffers directly in the CLI_INBAND API message,
* instead of an additional shared memory area.
@@ -6467,38 +5966,6 @@ api_delete_loopback (vat_main_t * vam)
}
static int
-api_want_stats (vat_main_t * vam)
-{
- unformat_input_t *i = vam->input;
- vl_api_want_stats_t *mp;
- int enable = -1;
- int ret;
-
- while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
- {
- if (unformat (i, "enable"))
- enable = 1;
- else if (unformat (i, "disable"))
- enable = 0;
- else
- break;
- }
-
- if (enable == -1)
- {
- errmsg ("missing enable|disable");
- return -99;
- }
-
- M (WANT_STATS, mp);
- mp->enable_disable = enable;
-
- S (mp);
- W (ret);
- return ret;
-}
-
-static int
api_want_interface_events (vat_main_t * vam)
{
unformat_input_t *i = vam->input;
@@ -23889,7 +23356,6 @@ _(delete_loopback,"sw_if_index <nn>") \
_(bd_ip_mac_add_del, "bd_id <bridge-domain-id> <ip4/6-addr> <mac-addr> [del]") \
_(bd_ip_mac_dump, "[bd_id] <id>") \
_(want_interface_events, "enable|disable") \
-_(want_stats,"enable|disable") \
_(get_first_msg_id, "client <name>") \
_(cop_interface_enable_disable, "<intfc> | sw_if_index <nn> [disable]") \
_(cop_whitelist_enable_disable, "<intfc> | sw_if_index <nn>\n" \
@@ -24111,7 +23577,6 @@ _(dump_interface_table, "usage: dump_interface_table") \
_(dump_sub_interface_table, "usage: dump_sub_interface_table") \
_(dump_ipv4_table, "usage: dump_ipv4_table") \
_(dump_ipv6_table, "usage: dump_ipv6_table") \
-_(dump_stats_table, "usage: dump_stats_table") \
_(dump_macro_table, "usage: dump_macro_table ") \
_(dump_node_table, "usage: dump_node_table") \
_(dump_msg_api_table, "usage: dump_msg_api_table") \
diff --git a/src/vnet/interface.api b/src/vnet/interface.api
index 84e0483df67..69de722b78b 100644
--- a/src/vnet/interface.api
+++ b/src/vnet/interface.api
@@ -274,80 +274,6 @@ define sw_interface_get_table_reply
u32 vrf_id;
};
-typeonly manual_print manual_endian define vlib_counter
-{
- u64 packets; /**< packet counter */
- u64 bytes; /**< byte counter */
-};
-
-/** \brief Combined interface counter data type for vnet_interface_combined_counters
- @param sw_if_index - interface indexes for counters
- @param rx_packets - received packet count
- @param rx_bytes - received byte count
- @param rx_unicast_packets - received unicast packet count
- @param rx_unicast_bytes - received unicast byte count
- @param rx_multicast_packets - received multicast packet count
- @param rx_multicast_bytes - received multicast byte count
- @param rx_broadcast_packets - received broadcast packet count
- @param rx_broadcast_bytes - received broadcast byte count
- @param tx_packets - transmitted packet count
- @param tx_bytes - transmitted byte count
- @param tx_unicast_packets - transmitted unicast packet count
- @param tx_unicast_bytes - transmitted unicast byte count
- @param tx_multicast_packets - transmitted multicast packet count
- @param tx_multicast_bytes - transmitted multicast byte count
- @param tx_broadcast_packets - transmitted broadcast packet count
- @param tx_broadcast_bytes - transmitted broadcast byte count
-
-*/
-typeonly manual_print manual_endian define vnet_combined_counter
-{
- u32 sw_if_index;
- u64 rx_packets; /**< packet counter */
- u64 rx_bytes; /**< byte counter */
- u64 rx_unicast_packets; /**< packet counter */
- u64 rx_unicast_bytes; /**< byte counter */
- u64 rx_multicast_packets; /**< packet counter */
- u64 rx_multicast_bytes; /**< byte counter */
- u64 rx_broadcast_packets; /**< packet counter */
- u64 rx_broadcast_bytes; /**< byte counter */
- u64 tx_packets; /**< packet counter */
- u64 tx_bytes; /**< byte counter */
- u64 tx_unicast_packets; /**< packet counter */
- u64 tx_unicast_bytes; /**< byte counter */
- u64 tx_multicast_packets; /**< packet counter */
- u64 tx_multicast_bytes; /**< byte counter */
- u64 tx_broadcast_packets; /**< packet counter */
- u64 tx_broadcast_bytes; /**< byte counter */
-};
-
-/** \brief Simple interface counter data type for vnet_interface_simple_counters
- @param sw_if_index - interface indexes for counters
- @param drop - RX or TX drops due to buffer starvation
- @param punt - used with VNET "punt" disposition
- @param rx_ip4 - received IP4 packets
- @param rx_ip6 - received IP6 packets
- @param rx_no_buffer - no RX buffers available
- @param rx_miss - receive misses
- @param rx_error - receive errors
- @param tx_error - transmit errors
- @param rx_mpls - received MPLS packet
-
-*/
-typeonly manual_print manual_endian define vnet_simple_counter
-{
- u32 sw_if_index;
- u64 drop;
- u64 punt;
- u64 rx_ip4;
- u64 rx_ip6;
- u64 rx_no_buffer;
- u64 rx_miss;
- u64 rx_error;
- u64 tx_error;
- u64 rx_mpls;
-};
-
/** \brief Set unnumbered interface add / del request
@param client_index - opaque cookie to identify the sender
@param context - sender context, to match reply w/ request
diff --git a/src/vpp/CMakeLists.txt b/src/vpp/CMakeLists.txt
index 98e98e5d7ae..7de8c7a9089 100644
--- a/src/vpp/CMakeLists.txt
+++ b/src/vpp/CMakeLists.txt
@@ -35,7 +35,6 @@ option(VPP_API_TEST_BUILTIN "Use builtin VPP API test." ON)
set(VPP_API_FILES
api/vpe.api
- stats/stats.api
oam/oam.api
)
@@ -56,7 +55,6 @@ set(VPP_SOURCES
app/version.c
oam/oam.c
oam/oam_api.c
- stats/stats_to_be_deprecated.c
stats/stat_segment.c
api/api.c
api/json_format.c
@@ -107,13 +105,6 @@ add_vpp_executable(vpp_get_metrics
##############################################################################
# stats binaries
##############################################################################
-add_vpp_executable(summary_stats_client
- SOURCES api/summary_stats_client.c
- LINK_LIBRARIES vppinfra svm vlibmemoryclient
- DEPENDS api_headers
- NO_INSTALL
-)
-
add_vpp_executable(vpp_get_stats
SOURCES app/vpp_get_stats.c
LINK_LIBRARIES vppapiclient vppinfra
diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c
index 3fce77acfc8..ed769d284fc 100644
--- a/src/vpp/api/api.c
+++ b/src/vpp/api/api.c
@@ -102,9 +102,6 @@ memclnt_delete_callback (u32 client_index)
vpe_api_main_t *vam = &vpe_api_main;
vpe_client_registration_t *rp;
uword *p;
- int stats_memclnt_delete_callback (u32 client_index);
-
- stats_memclnt_delete_callback (client_index);
#define _(a) \
p = hash_get (vam->a##_registration_hash, client_index); \
diff --git a/src/vpp/api/summary_stats_client.c b/src/vpp/api/summary_stats_client.c
deleted file mode 100644
index 60a0cd921b5..00000000000
--- a/src/vpp/api/summary_stats_client.c
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- *------------------------------------------------------------------
- * summary_stats_client -
- *
- * Copyright (c) 2010 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *------------------------------------------------------------------
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <netinet/in.h>
-#include <netdb.h>
-#include <signal.h>
-#include <pthread.h>
-#include <unistd.h>
-#include <time.h>
-#include <fcntl.h>
-#include <string.h>
-#include <vppinfra/clib.h>
-#include <vppinfra/vec.h>
-#include <vppinfra/hash.h>
-#include <vppinfra/bitmap.h>
-#include <vppinfra/fifo.h>
-#include <vppinfra/time.h>
-#include <vppinfra/mheap.h>
-#include <vppinfra/heap.h>
-#include <vppinfra/pool.h>
-#include <vppinfra/format.h>
-#include <vppinfra/error.h>
-
-#include <vnet/vnet.h>
-#include <vlib/vlib.h>
-#include <vlib/unix/unix.h>
-#include <vlibapi/api.h>
-#include <vlibmemory/api.h>
-
-#include <vpp/api/vpe_msg_enum.h>
-
-#include <vnet/ip/ip.h>
-
-#define f64_endian(a)
-#define f64_print(a,b)
-
-#define vl_typedefs /* define message structures */
-#include <vpp/api/vpe_all_api_h.h>
-#undef vl_typedefs
-
-#define vl_endianfun /* define message structures */
-#include <vpp/api/vpe_all_api_h.h>
-#undef vl_endianfun
-
-/* instantiate all the print functions we know about */
-#define vl_print(handle, ...)
-#define vl_printfun
-#include <vpp/api/vpe_all_api_h.h>
-#undef vl_printfun
-
-vl_shmem_hdr_t *shmem_hdr;
-
-typedef struct
-{
- volatile int sigterm_received;
-
- struct sockaddr_in send_data_addr;
- int send_data_socket;
- u8 *display_name;
-
- /* convenience */
- svm_queue_t *vl_input_queue;
- u32 my_client_index;
-} test_main_t;
-
-test_main_t test_main;
-
-/*
- * Satisfy external references when -lvlib is not available.
- */
-vlib_main_t vlib_global_main;
-vlib_main_t **vlib_mains;
-
-void
-vlib_cli_output (struct vlib_main_t *vm, char *fmt, ...)
-{
- clib_warning ("vlib_cli_output called...");
-}
-
-
-static void
- vl_api_vnet_get_summary_stats_reply_t_handler
- (vl_api_vnet_get_summary_stats_reply_t * mp)
-{
- test_main_t *tm = &test_main;
- static u8 *sb;
- int n;
-
- printf ("total rx pkts %llu, total rx bytes %llu\n",
- (unsigned long long) mp->total_pkts[0],
- (unsigned long long) mp->total_bytes[0]);
- printf ("total tx pkts %llu, total tx bytes %llu\n",
- (unsigned long long) mp->total_pkts[1],
- (unsigned long long) mp->total_bytes[1]);
- printf ("vector rate %.2f\n", mp->vector_rate);
-
- vec_reset_length (sb);
- sb = format (sb, "%v,%.0f,%llu,%llu,%llu,%llu\n%c",
- tm->display_name, mp->vector_rate,
- (unsigned long long) mp->total_pkts[0],
- (unsigned long long) mp->total_bytes[0],
- (unsigned long long) mp->total_pkts[1],
- (unsigned long long) mp->total_bytes[1], 0);
-
- n = sendto (tm->send_data_socket, sb, vec_len (sb),
- 0, (struct sockaddr *) &tm->send_data_addr,
- sizeof (tm->send_data_addr));
-
- if (n != vec_len (sb))
- clib_unix_warning ("sendto");
-
-}
-
-#define foreach_api_msg \
-_(VNET_GET_SUMMARY_STATS_REPLY, vnet_get_summary_stats_reply)
-
-int
-connect_to_vpe (char *name)
-{
- int rv = 0;
-
- rv = vl_client_connect_to_vlib ("/vpe-api", name, 32);
-
-#define _(N,n) \
- vl_msg_api_set_handlers(VL_API_##N, #n, \
- vl_api_##n##_t_handler, \
- vl_noop_handler, \
- vl_api_##n##_t_endian, \
- vl_api_##n##_t_print, \
- sizeof(vl_api_##n##_t), 1);
- foreach_api_msg;
-#undef _
-
- shmem_hdr = api_main.shmem_hdr;
-
- return rv;
-}
-
-int
-disconnect_from_vpe (void)
-{
- vl_client_disconnect_from_vlib ();
- return 0;
-}
-
-static void
-sigterm_handler (int sig)
-{
- test_main_t *tm = &test_main;
- tm->sigterm_received = 1;
-}
-
-/* Parse an IP4 address %d.%d.%d.%d. */
-uword
-unformat_ip4_address (unformat_input_t * input, va_list * args)
-{
- u8 *result = va_arg (*args, u8 *);
- unsigned a[4];
-
- if (!unformat (input, "%d.%d.%d.%d", &a[0], &a[1], &a[2], &a[3]))
- return 0;
-
- if (a[0] >= 256 || a[1] >= 256 || a[2] >= 256 || a[3] >= 256)
- return 0;
-
- result[0] = a[0];
- result[1] = a[1];
- result[2] = a[2];
- result[3] = a[3];
-
- return 1;
-}
-
-int
-main (int argc, char **argv)
-{
- api_main_t *am = &api_main;
- test_main_t *tm = &test_main;
- vl_api_vnet_get_summary_stats_t *mp;
- unformat_input_t _input, *input = &_input;
- clib_error_t *error = 0;
- ip4_address_t collector_ip;
- u8 *display_name = 0;
- u16 collector_port = 7654;
-
- collector_ip.as_u32 = (u32) ~ 0;
-
- unformat_init_command_line (input, argv);
-
- while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
- {
- if (unformat (input, "collector-ip %U",
- unformat_ip4_address, &collector_ip))
- ;
- else if (unformat (input, "display-name %v", &display_name))
- ;
- else if (unformat (input, "collector-port %d", &collector_port))
- ;
- else
- {
- error =
- clib_error_return
- (0, "Usage: %s collector-ip <ip>\n"
- " [display-name <string>] [collector-port <num>]\n"
- " port defaults to 7654", argv[0]);
- break;
- }
- }
-
- if (error == 0 && collector_ip.as_u32 == (u32) ~ 0)
- error = clib_error_return (0, "collector-ip not set...\n");
-
-
- if (error)
- {
- clib_error_report (error);
- exit (1);
- }
-
- if (display_name == 0)
- {
- display_name = format (0, "vpe-to-%d.%d.%d.%d",
- collector_ip.as_u8[0],
- collector_ip.as_u8[1],
- collector_ip.as_u8[2], collector_ip.as_u8[3]);
- }
-
-
- connect_to_vpe ("test_client");
-
- tm->vl_input_queue = shmem_hdr->vl_input_queue;
- tm->my_client_index = am->my_client_index;
- tm->display_name = display_name;
-
- signal (SIGTERM, sigterm_handler);
- signal (SIGINT, sigterm_handler);
- signal (SIGQUIT, sigterm_handler);
-
- /* data (multicast) RX socket */
- tm->send_data_socket = socket (PF_INET, SOCK_DGRAM, IPPROTO_UDP);
- if (tm->send_data_socket < 0)
- {
- clib_unix_warning (0, "data_rx_socket");
- exit (1);
- }
-
- clib_memset (&tm->send_data_addr, 0, sizeof (tm->send_data_addr));
- tm->send_data_addr.sin_family = AF_INET;
- tm->send_data_addr.sin_addr.s_addr = collector_ip.as_u32;
- tm->send_data_addr.sin_port = htons (collector_port);
-
- fformat (stdout, "Send SIGINT or SIGTERM to quit...\n");
-
- while (1)
- {
- sleep (5);
-
- if (tm->sigterm_received)
- break;
- /* Poll for stats */
- mp = vl_msg_api_alloc (sizeof (*mp));
- clib_memset (mp, 0, sizeof (*mp));
- mp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS);
- mp->client_index = tm->my_client_index;
- vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & mp);
- }
-
- fformat (stdout, "Exiting...\n");
-
- disconnect_from_vpe ();
- exit (0);
-}
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
diff --git a/src/vpp/api/vpe.api b/src/vpp/api/vpe.api
index 24d44bd31e7..378a3b3a5fa 100644
--- a/src/vpp/api/vpe.api
+++ b/src/vpp/api/vpe.api
@@ -50,7 +50,6 @@ option version = "1.1.0";
* DHCP APIs: see ... /src/vnet/dhcp/{dhcp.api, dhcp_api.c}
* COP APIs: see ... /src/vnet/cop/{cop.api, cop_api.c}
* POLICER APIs: see ... /src/vnet/policer/{policer.api, policer_api.c}
- * STATS APIs: see .../src/vpp/stats/{stats.api, stats.c}
* BIER APIs: see ... /src/vnet/policer/{bier.api, bier_api.c}
*/
diff --git a/src/vpp/api/vpe_all_api_h.h b/src/vpp/api/vpe_all_api_h.h
index de913668510..343efe470db 100644
--- a/src/vpp/api/vpe_all_api_h.h
+++ b/src/vpp/api/vpe_all_api_h.h
@@ -28,8 +28,7 @@
/* Include the current layer (third) vpp API definition layer */
#include <vpp/api/vpe.api.h>
-/* Include stats & OAM APIs */
-#include <vpp/stats/stats.api.h>
+/* Include OAM APIs */
#include <vpp/oam/oam.api.h>
/*
diff --git a/src/vpp/stats/stats.api b/src/vpp/stats/stats.api
deleted file mode 100644
index c94471ae93c..00000000000
--- a/src/vpp/stats/stats.api
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
- * Copyright (c) 2015-2016 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** \file
-
- This file defines the stats API
-*/
-
-option version = "1.0.2";
-
-import "vnet/interface.api";
-import "vnet/bier/bier.api";
-
-service {
- rpc want_stats
- returns want_stats_reply;
- rpc want_interface_simple_stats
- returns want_interface_simple_stats_reply
- events vnet_interface_simple_counters;
- rpc want_per_interface_simple_stats
- returns want_per_interface_simple_stats_reply
- events vnet_per_interface_simple_counters;
- rpc want_interface_combined_stats
- returns want_interface_combined_stats_reply
- events vnet_interface_combined_counters;
- rpc want_per_interface_combined_stats
- returns want_per_interface_combined_stats_reply
- events vnet_per_interface_combined_counters;
- rpc want_ip4_fib_stats
- returns want_ip4_fib_stats_reply
- events vnet_ip4_fib_counters;
- rpc want_ip6_fib_stats
- returns want_ip6_fib_stats_reply
- events vnet_ip6_fib_counters;
- rpc want_ip4_mfib_stats
- returns want_ip4_mfib_stats_reply
- events vnet_ip4_mfib_counters;
- rpc want_ip6_mfib_stats
- returns want_ip6_mfib_stats_reply
- events vnet_ip6_mfib_counters;
- rpc want_ip4_nbr_stats
- returns want_ip4_nbr_stats_reply
- events vnet_ip4_nbr_counters;
- rpc want_ip6_nbr_stats
- returns want_ip6_nbr_stats_reply
- events vnet_ip6_nbr_counters;
- rpc want_udp_encap_stats
- returns want_udp_encap_stats_reply
- events vnet_udp_encap_counters;
- rpc want_bier_neighbor_stats
- returns want_bier_neighbor_stats_reply
- events vnet_bier_neighbor_counters;
-};
-
-/** \brief Want Stats, enable/disable ALL stats updates
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param enable_disable - 1 = enable stats, 0 = disable
- @param pid - pid of process requesting stats updates
-*/
-autoreply define want_stats
-{
- u32 client_index;
- u32 context;
- u32 enable_disable;
- u32 pid;
-};
-
-/** \brief Want Interface Simple Stats, register for detailed interface stats
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param enable_disable - 1 = enable stats, 0 = disable
- @param pid - pid of process requesting stats updates
-
- Please consider using want_per_interface_simple_stats with sw_if_index=~0
-*/
-autoreply define want_interface_simple_stats
-{
- u32 client_index;
- u32 context;
- u32 enable_disable;
- u32 pid;
-};
-
-/** \brief Want Per Interface simple Stats, register for continuous stats
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param enable_disable - 1 = enable stats, 0 = disable
- @param pid - pid of process requesting stats updates
- @param num - number of sw_if_indexes
- @param sw_ifs - array of sw_if_index
-*/
-autoreply define want_per_interface_simple_stats
-{
- u32 client_index;
- u32 context;
- u32 enable_disable;
- u32 pid;
- u32 num;
- u32 sw_ifs[num];
-
-};
-
-/** \brief Want Interface Combined Stats, register for continuous stats
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param enable_disable - 1 = enable stats, 0 = disable
- @param pid - pid of process requesting stats updates
-
- Please consider using want_per_interface_combined_stats with sw_if_index=~0
-
-*/
-autoreply define want_interface_combined_stats
-{
- u32 client_index;
- u32 context;
- u32 enable_disable;
- u32 pid;
-};
-
-/** \brief Want Per Interface Combined Stats, register for continuous stats
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param enable_disable - 1 = enable stats, 0 = disable
- @param pid - pid of process requesting stats updates
- @param num - number of sw_if_indexes
- @param sw_ifs - array of sw_if_index
-*/
-autoreply define want_per_interface_combined_stats
-{
- u32 client_index;
- u32 context;
- u32 enable_disable;
- u32 pid;
- u32 num;
- u32 sw_ifs[num];
-
-};
-
-/** \brief Want IP4 FIB Stats, register for continuous stats
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param enable_disable - 1 = enable stats, 0 = disable
- @param pid - pid of process requesting stats updates
-*/
-autoreply define want_ip4_fib_stats
-{
- u32 client_index;
- u32 context;
- u32 enable_disable;
- u32 pid;
-};
-
-/** \brief Want IP6 FIB Stats, register for continuous stats
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param enable_disable - 1 = enable stats, 0 = disable
- @param pid - pid of process requesting stats updates
-*/
-autoreply define want_ip6_fib_stats
-{
- u32 client_index;
- u32 context;
- u32 enable_disable;
- u32 pid;
-};
-
-/** \brief Want IP4 multicast FIB Stats, register for continuous stats
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param enable_disable - 1 = enable stats, 0 = disable
- @param pid - pid of process requesting stats updates
-*/
-autoreply define want_ip4_mfib_stats
-{
- u32 client_index;
- u32 context;
- u32 enable_disable;
- u32 pid;
-};
-
-/** \brief Want IP6 multicast FIB Stats, register for continuous stats
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param enable_disable - 1 = enable stats, 0 = disable
- @param pid - pid of process requesting stats updates
-*/
-autoreply define want_ip6_mfib_stats
-{
- u32 client_index;
- u32 context;
- u32 enable_disable;
- u32 pid;
-};
-
-/** \brief Want IP4 NBR Stats, register for continuous stats
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param enable_disable - 1 = enable stats, 0 = disable
- @param pid - pid of process requesting stats updates
-*/
-autoreply define want_ip4_nbr_stats
-{
- u32 client_index;
- u32 context;
- u32 enable_disable;
- u32 pid;
-};
-
-/** \brief Want IP6 NBR Stats, register for continuous stats
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param enable_disable - 1 = enable stats, 0 = disable
- @param pid - pid of process requesting stats updates
-*/
-autoreply define want_ip6_nbr_stats
-{
- u32 client_index;
- u32 context;
- u32 enable_disable;
- u32 pid;
-};
-
-typeonly manual_print manual_endian define ip4_fib_counter
-{
- u32 address;
- u8 address_length;
- u64 packets;
- u64 bytes;
-};
-
-manual_print manual_endian define vnet_ip4_fib_counters
-{
- u32 vrf_id;
- u32 count;
- vl_api_ip4_fib_counter_t c[count];
-};
-
-typeonly manual_print manual_endian define ip4_mfib_counter
-{
- u8 source[4];
- u8 group[4];
- u8 group_length;
- u64 packets;
- u64 bytes;
-};
-
-manual_print manual_endian define vnet_ip4_mfib_counters
-{
- u32 vrf_id;
- u32 count;
- vl_api_ip4_mfib_counter_t c[count];
-};
-
-typeonly manual_print manual_endian define ip4_nbr_counter
-{
- u32 address;
- u8 link_type;
- u64 packets;
- u64 bytes;
-};
-
-/**
- * @brief Per-neighbour (i.e. per-adjacency) counters
- * @param count The size of the array of counters
- * @param sw_if_index The interface the adjacency is on
- * @param begin Flag to indicate this is the first set of stats for this
- * interface. If this flag is not set the it is a continuation of
- * stats for this interface
- * @param c counters
- */
-manual_print manual_endian define vnet_ip4_nbr_counters
-{
- u32 count;
- u32 sw_if_index;
- u8 begin;
- vl_api_ip4_nbr_counter_t c[count];
-};
-
-typeonly manual_print manual_endian define ip6_fib_counter
-{
- u64 address[2];
- u8 address_length;
- u64 packets;
- u64 bytes;
-};
-
-manual_print manual_endian define vnet_ip6_fib_counters
-{
- u32 vrf_id;
- u32 count;
- vl_api_ip6_fib_counter_t c[count];
-};
-
-typeonly manual_print manual_endian define ip6_mfib_counter
-{
- u8 source[16];
- u8 group[16];
- u8 group_length;
- u64 packets;
- u64 bytes;
-};
-
-manual_print manual_endian define vnet_ip6_mfib_counters
-{
- u32 vrf_id;
- u32 count;
- vl_api_ip6_mfib_counter_t c[count];
-};
-
-typeonly manual_print manual_endian define ip6_nbr_counter
-{
- u64 address[2];
- u8 link_type;
- u64 packets;
- u64 bytes;
-};
-
-manual_print manual_endian define vnet_ip6_nbr_counters
-{
- u32 count;
- u32 sw_if_index;
- u8 begin;
- vl_api_ip6_nbr_counter_t c[count];
-};
-
-/** \brief Simple stats counters structure
- @param vnet_counter_type- such as ip4, ip6, punts, etc
- @param first_sw_if_index - first sw index in block of index, counts
- @param count - number of counters, equal to the number of interfaces in
- this stats block
- @param data - contiguous block of u64 counters
-
- vnet_counter_type defined in enums - plural - in vnet/interface.h
-*/
-manual_print manual_endian define vnet_interface_simple_counters
-{
- u8 vnet_counter_type;
- u32 first_sw_if_index;
- u32 count;
- u64 data[count];
-};
-
-/** \brief Combined stats counters structure
- @param vnet_counter_type- such as ip4, ip6, punts, etc
- @param first_sw_if_index - first sw index in block of index, counts
- @param count - number of counters, equal to the number of interfaces in
- this stats block
- @param data - contiguous block of vlib_counter_t structures
-
- vnet_counter_type defined in enums - plural - in vnet/interface.h
-*/
-manual_print manual_endian define vnet_interface_combined_counters
-{
- u8 vnet_counter_type;
- u32 first_sw_if_index;
- u32 count;
- vl_api_vlib_counter_t data[count];
-};
-
-/** \brief Simple per interface stats counters structure
- @param count - number of elements in message
- @param timestamp - u32 vlib timestamp for control plane
- @param data[count] - vl_api_vnet_simple_counter_t
-
-*/
-manual_print manual_endian define vnet_per_interface_simple_counters
-{
- u32 count;
- u32 timestamp;
- vl_api_vnet_simple_counter_t data[count];
-};
-
-/** \brief Combined stats counters structure per interface
- @param count - number of elements in message
- @param timestamp - u32 vlib timestamp for control plane
- @param data[count] - vl_api_vnet_combined_counter_t
-*/
-manual_print manual_endian define vnet_per_interface_combined_counters
-{
- u32 count;
- u32 timestamp;
- vl_api_vnet_combined_counter_t data[count];
-};
-
-/** \brief Request for a single block of summary stats
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
-*/
-define vnet_get_summary_stats
-{
- u32 client_index;
- u32 context;
-};
-
-/** \brief Reply for vnet_get_summary_stats request
- @param context - sender context, to match reply w/ request
- @param retval - return code for request
- @param total_pkts - length of the array must match the length of
- the combined counter part of the enum in interface.h
- @param total_bytes - length of the array must match the length of
- the combined counter part of the enum in interface.h
- @param vector_rate -
-*/
-define vnet_get_summary_stats_reply
-{
- u32 context;
- i32 retval;
- u64 total_pkts[8];
- u64 total_bytes[8];
- f64 vector_rate;
-};
-
-/** \brief Get delay between polling statistics
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
-*/
-define stats_get_poller_delay
-{
- u32 client_index;
- u32 context;
-};
-
-/** \brief Get delay between polling statistics reply
- @param context - sender context, to match reply w/ request
- @param retval - return code for request
- @param delay - poller delay
-*/
-define stats_get_poller_delay_reply
-{
- u32 context;
- i32 retval;
- u32 delay;
-};
-
-/** \brief Want UDP encap Stats, register for continuous stats
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param enable - 1 = enable stats, 0 = disable
- @param pid - pid of process requesting stats updates
-*/
-autoreply define want_udp_encap_stats
-{
- u32 client_index;
- u32 context;
- u32 enable;
- u32 pid;
-};
-
-/** \brief Stat for one UDP encap object
- @param id - The ID of the object, same as passed for the create
- @param packets - number of packets sent
- @param bytes - number of bytes sent
-*/
-typeonly manual_print manual_endian define udp_encap_counter
-{
- u32 id;
- u64 packets;
- u64 bytes;
-};
-
-manual_print manual_endian define vnet_udp_encap_counters
-{
- u32 timestamp;
- u32 count;
- vl_api_udp_encap_counter_t c[count];
-};
-
-/** \brief Want BIER neighbor Stats, register for continuous stats
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param enable - 1 = enable stats, 0 = disable
- @param pid - pid of process requesting stats updates
-*/
-autoreply define want_bier_neighbor_stats
-{
- u32 client_index;
- u32 context;
- u32 enable;
- u32 pid;
-};
-
-/** \brief Stat for one BIER neighbor object
- @param tbl_id - The BIER Table ID the neighbour belongs to.
- @param path - The path describing the neighbor (this is the data
- given during a BIER route add)
- @param packets - number of packets sent
- @param bytes - number of bytes sent
-*/
-typeonly manual_print manual_endian define bier_neighbor_counter
-{
- vl_api_bier_table_id_t tbl_id;
- vl_api_fib_path_t path;
- u64 packets;
- u64 bytes;
-};
-
-manual_print manual_endian define vnet_bier_neighbor_counters
-{
- u32 timestamp;
- u32 count;
- vl_api_bier_neighbor_counter_t c[count];
-};
-
-/*
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
diff --git a/src/vpp/stats/stats_to_be_deprecated.c b/src/vpp/stats/stats_to_be_deprecated.c
deleted file mode 100644
index 08117a1b736..00000000000
--- a/src/vpp/stats/stats_to_be_deprecated.c
+++ /dev/null
@@ -1,3186 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "stats_to_be_deprecated.h"
-#include <signal.h>
-#include <vnet/fib/ip4_fib.h>
-#include <vnet/fib/fib_entry.h>
-#include <vnet/mfib/mfib_entry.h>
-#include <vnet/dpo/load_balance.h>
-#include <vnet/udp/udp_encap.h>
-#include <vnet/bier/bier_fmask.h>
-#include <vnet/bier/bier_table.h>
-#include <vnet/fib/fib_api.h>
-
-#define STATS_DEBUG 0
-
-stats_main_t stats_main;
-
-#include <vnet/ip/ip.h>
-
-#include <vpp/api/vpe_msg_enum.h>
-
-#define f64_endian(a)
-#define f64_print(a,b)
-
-#define vl_typedefs /* define message structures */
-#include <vpp/api/vpe_all_api_h.h>
-#undef vl_typedefs
-
-#define vl_endianfun /* define message structures */
-#include <vpp/api/vpe_all_api_h.h>
-#undef vl_endianfun
-
-/* instantiate all the print functions we know about */
-#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
-#define vl_printfun
-#include <vpp/api/vpe_all_api_h.h>
-#undef vl_printfun
-
-#define foreach_stats_msg \
-_(WANT_STATS, want_stats) \
-_(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
-_(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \
-_(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
-_(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \
-_(WANT_PER_INTERFACE_COMBINED_STATS, want_per_interface_combined_stats) \
-_(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \
-_(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
-_(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \
-_(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
-_(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \
-_(WANT_IP4_MFIB_STATS, want_ip4_mfib_stats) \
-_(WANT_IP6_MFIB_STATS, want_ip6_mfib_stats) \
-_(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
-_(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \
-_(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
-_(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
-_(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \
-_(STATS_GET_POLLER_DELAY, stats_get_poller_delay) \
-_(WANT_UDP_ENCAP_STATS, want_udp_encap_stats) \
-_(WANT_BIER_NEIGHBOR_STATS, want_bier_neighbor_stats)
-
-#define vl_msg_name_crc_list
-#include <vpp/stats/stats.api.h>
-#undef vl_msg_name_crc_list
-
-static void
-setup_message_id_table (api_main_t * am)
-{
-#define _(id,n,crc) \
- vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
- foreach_vl_msg_name_crc_stats;
-#undef _
-}
-
-/* These constants ensure msg sizes <= 1024, aka ring allocation */
-#define SIMPLE_COUNTER_BATCH_SIZE 126
-#define COMBINED_COUNTER_BATCH_SIZE 63
-#define IP4_FIB_COUNTER_BATCH_SIZE 48
-#define IP6_FIB_COUNTER_BATCH_SIZE 30
-#define IP4_MFIB_COUNTER_BATCH_SIZE 24
-#define IP6_MFIB_COUNTER_BATCH_SIZE 15
-#define UDP_ENCAP_COUNTER_BATCH_SIZE (1024 / sizeof(vl_api_udp_encap_counter_t))
-#define BIER_NEIGHBOR_COUNTER_BATCH_SIZE (1024 / sizeof(vl_api_bier_neighbor_counter_t))
-
-/* 5ms */
-#define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
-/* ns/us us/ms */
-
-u8 *
-format_vnet_interface_combined_counters (u8 * s, va_list * args)
-{
- stats_main_t *sm = &stats_main;
- vl_api_vnet_interface_combined_counters_t *mp =
- va_arg (*args, vl_api_vnet_interface_combined_counters_t *);
-
- char *counter_name;
- u32 count, sw_if_index;
- int i;
- count = ntohl (mp->count);
- sw_if_index = ntohl (mp->first_sw_if_index);
-
- vlib_counter_t *vp;
- u64 packets, bytes;
- vp = (vlib_counter_t *) mp->data;
-
- switch (mp->vnet_counter_type)
- {
- case VNET_INTERFACE_COUNTER_RX:
- counter_name = "rx";
- break;
- case VNET_INTERFACE_COUNTER_TX:
- counter_name = "tx";
- break;
- default:
- counter_name = "bogus";
- break;
- }
- for (i = 0; i < count; i++)
- {
- packets = clib_mem_unaligned (&vp->packets, u64);
- packets = clib_net_to_host_u64 (packets);
- bytes = clib_mem_unaligned (&vp->bytes, u64);
- bytes = clib_net_to_host_u64 (bytes);
- vp++;
- s = format (s, "%U.%s.packets %lld\n",
- format_vnet_sw_if_index_name,
- sm->vnet_main, sw_if_index, counter_name, packets);
- s = format (s, "%U.%s.bytes %lld\n",
- format_vnet_sw_if_index_name,
- sm->vnet_main, sw_if_index, counter_name, bytes);
- sw_if_index++;
- }
- return s;
-}
-
-u8 *
-format_vnet_interface_simple_counters (u8 * s, va_list * args)
-{
- stats_main_t *sm = &stats_main;
- vl_api_vnet_interface_simple_counters_t *mp =
- va_arg (*args, vl_api_vnet_interface_simple_counters_t *);
- char *counter_name;
- u32 count, sw_if_index;
- count = ntohl (mp->count);
- sw_if_index = ntohl (mp->first_sw_if_index);
- u64 *vp, v;
- vp = (u64 *) mp->data;
- int i;
-
- switch (mp->vnet_counter_type)
- {
- case VNET_INTERFACE_COUNTER_DROP:
- counter_name = "drop";
- break;
- case VNET_INTERFACE_COUNTER_PUNT:
- counter_name = "punt";
- break;
- case VNET_INTERFACE_COUNTER_IP4:
- counter_name = "ip4";
- break;
- case VNET_INTERFACE_COUNTER_IP6:
- counter_name = "ip6";
- break;
- case VNET_INTERFACE_COUNTER_RX_NO_BUF:
- counter_name = "rx-no-buff";
- break;
- case VNET_INTERFACE_COUNTER_RX_MISS:
- counter_name = "rx-miss";
- break;
- case VNET_INTERFACE_COUNTER_RX_ERROR:
- counter_name = "rx-error (fifo-full)";
- break;
- case VNET_INTERFACE_COUNTER_TX_ERROR:
- counter_name = "tx-error (fifo-full)";
- break;
- default:
- counter_name = "bogus";
- break;
- }
- for (i = 0; i < count; i++)
- {
- v = clib_mem_unaligned (vp, u64);
- v = clib_net_to_host_u64 (v);
- vp++;
- s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name,
- sm->vnet_main, sw_if_index, counter_name, v);
- sw_if_index++;
- }
-
- return s;
-}
-
-static void
-dslock (stats_main_t * sm, int release_hint, int tag)
-{
- u32 thread_index;
- data_structure_lock_t *l = sm->data_structure_lock;
-
- if (PREDICT_FALSE (l == 0))
- return;
-
- thread_index = vlib_get_thread_index ();
- if (l->lock && l->thread_index == thread_index)
- {
- l->count++;
- return;
- }
-
- if (release_hint)
- l->release_hint++;
-
- while (clib_atomic_test_and_set (&l->lock))
- /* zzzz */ ;
- l->tag = tag;
- l->thread_index = thread_index;
- l->count = 1;
-}
-
-void
-stats_dslock_with_hint (int hint, int tag)
-{
- stats_main_t *sm = &stats_main;
- dslock (sm, hint, tag);
-}
-
-static void
-dsunlock (stats_main_t * sm)
-{
- u32 thread_index;
- data_structure_lock_t *l = sm->data_structure_lock;
-
- if (PREDICT_FALSE (l == 0))
- return;
-
- thread_index = vlib_get_thread_index ();
- ASSERT (l->lock && l->thread_index == thread_index);
- l->count--;
- if (l->count == 0)
- {
- l->tag = -l->tag;
- l->release_hint = 0;
- CLIB_MEMORY_BARRIER ();
- l->lock = 0;
- }
-}
-
-void
-stats_dsunlock (int hint, int tag)
-{
- stats_main_t *sm = &stats_main;
- dsunlock (sm);
-}
-
-static vpe_client_registration_t *
-get_client_for_stat (u32 reg, u32 item, u32 client_index)
-{
- stats_main_t *sm = &stats_main;
- vpe_client_stats_registration_t *registration;
- uword *p;
-
- /* Is there anything listening for item in that reg */
- p = hash_get (sm->stats_registration_hash[reg], item);
-
- if (!p)
- return 0; // Fail
-
- /* If there is, is our client_index one of them */
- registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
- p = hash_get (registration->client_hash, client_index);
-
- if (!p)
- return 0; // Fail
-
- return pool_elt_at_index (registration->clients, p[0]);
-
-}
-
-static int
-set_client_for_stat (u32 reg, u32 item, vpe_client_registration_t * client)
-{
- stats_main_t *sm = &stats_main;
- vpe_client_stats_registration_t *registration;
- vpe_client_registration_t *cr;
- uword *p;
-
- /* Is there anything listening for item in that reg */
- p = hash_get (sm->stats_registration_hash[reg], item);
-
- if (!p)
- {
- pool_get (sm->stats_registrations[reg], registration);
- registration->item = item;
- registration->client_hash = NULL;
- registration->clients = NULL;
- hash_set (sm->stats_registration_hash[reg], item,
- registration - sm->stats_registrations[reg]);
- }
- else
- {
- registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
- }
-
- p = hash_get (registration->client_hash, client->client_index);
-
- if (!p)
- {
- pool_get (registration->clients, cr);
- cr->client_index = client->client_index;
- cr->client_pid = client->client_pid;
- hash_set (registration->client_hash, cr->client_index,
- cr - registration->clients);
- }
-
- return 1; //At least one client is doing something ... poll
-}
-
-static void
-clear_one_client (u32 reg_index, u32 reg, u32 item, u32 client_index)
-{
- stats_main_t *sm = &stats_main;
- vpe_client_stats_registration_t *registration;
- vpe_client_registration_t *client;
- uword *p;
-
- registration = pool_elt_at_index (sm->stats_registrations[reg], reg_index);
- p = hash_get (registration->client_hash, client_index);
-
- if (p)
- {
- client = pool_elt_at_index (registration->clients, p[0]);
- hash_unset (registration->client_hash, client->client_index);
- pool_put (registration->clients, client);
-
- /* Now check if that was the last client for that item */
- if (0 == pool_elts (registration->clients))
- {
- hash_unset (sm->stats_registration_hash[reg], item);
- hash_free (registration->client_hash);
- pool_free (registration->clients);
- pool_put (sm->stats_registrations[reg], registration);
- }
- }
-}
-
-int
-clear_client_for_stat (u32 reg, u32 item, u32 client_index)
-{
- stats_main_t *sm = &stats_main;
- uword *p;
- int i, elts;
-
- /* Clear the client first */
- /* Is there anything listening for item in that reg */
- p = hash_get (sm->stats_registration_hash[reg], item);
-
- if (!p)
- goto exit;
-
- /* If there is, is our client_index one of them */
- clear_one_client (p[0], reg, item, client_index);
-
-exit:
- elts = 0;
- /* Now check if that was the last item in any of the listened to stats */
- for (i = 0; i < STATS_REG_N_IDX; i++)
- {
- elts += pool_elts (sm->stats_registrations[i]);
- }
- return elts;
-}
-
-static int
-clear_client_for_all_stats (u32 client_index)
-{
- stats_main_t *sm = &stats_main;
- u32 reg_index, item, reg;
- int i, elts;
-
- /* *INDENT-OFF* */
- vec_foreach_index(reg, sm->stats_registration_hash)
- {
- hash_foreach(item, reg_index, sm->stats_registration_hash[reg],
- ({
- clear_one_client(reg_index, reg, item, client_index);
- }));
- }
- /* *INDENT-OFF* */
-
- elts = 0;
- /* Now check if that was the last item in any of the listened to stats */
- for (i = 0; i < STATS_REG_N_IDX; i++)
- {
- elts += pool_elts (sm->stats_registrations[i]);
- }
- return elts;
-}
-
-static clib_error_t *
-want_stats_reaper (u32 client_index)
-{
- stats_main_t *sm = &stats_main;
-
- sm->enable_poller = clear_client_for_all_stats (client_index);
-
- return (NULL);
-}
-
-VL_MSG_API_REAPER_FUNCTION (want_stats_reaper);
-
-
-/*
- * Return a copy of the clients list.
- */
-vpe_client_registration_t *
-get_clients_for_stat (u32 reg, u32 item)
-{
- stats_main_t *sm = &stats_main;
- vpe_client_registration_t *client, *clients = 0;
- vpe_client_stats_registration_t *registration;
- uword *p;
-
- /* Is there anything listening for item in that reg */
- p = hash_get (sm->stats_registration_hash[reg], item);
-
- if (!p)
- return 0; // Fail
-
- /* If there is, is our client_index one of them */
- registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
-
- vec_reset_length (clients);
-
- /* *INDENT-OFF* */
- pool_foreach (client, registration->clients,
- ({
- vec_add1 (clients, *client);}
- ));
- /* *INDENT-ON* */
- return clients;
-}
-
-
-static void
-clear_client_reg (u32 ** registrations)
-{
- /* When registrations[x] is a vector of pool indices
- here is a good place to clean up the pools
- */
-#define stats_reg(n) vec_free(registrations[IDX_##n]);
-#include <vpp/stats/stats.reg>
-#undef stats_reg
-
- vec_free (registrations);
-}
-
-u32 **
-init_client_reg (u32 ** registrations)
-{
-
- /*
- Initialise the stats registrations for each
- type of stat a client can register for as well as
- a vector of "interested" indexes.
- Initially this is a u32 of either sw_if_index or fib_index
- but eventually this should migrate to a pool_index (u32)
- with a type specific pool that can include more complex things
- such as timing and structured events.
- */
- vec_validate (registrations, STATS_REG_N_IDX);
-#define stats_reg(n) \
- vec_reset_length(registrations[IDX_##n]);
-#include <vpp/stats/stats.reg>
-#undef stats_reg
-
- /*
- When registrations[x] is a vector of pool indices, here
- is a good place to init the pools.
- */
- return registrations;
-}
-
-u32 **
-enable_all_client_reg (u32 ** registrations)
-{
-
- /*
- Enable all stats known by adding
- ~0 to the index vector. Eventually this
- should be deprecated.
- */
-#define stats_reg(n) \
- vec_add1(registrations[IDX_##n], ~0);
-#include <vpp/stats/stats.reg>
-#undef stats_reg
- return registrations;
-}
-
-static void
-do_simple_interface_counters (stats_main_t * sm)
-{
- vl_api_vnet_interface_simple_counters_t *mp = 0;
- vnet_interface_main_t *im = sm->interface_main;
- api_main_t *am = sm->api_main;
- vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
- svm_queue_t *q = shmem_hdr->vl_input_queue;
- vlib_simple_counter_main_t *cm;
- u32 items_this_message = 0;
- u64 v, *vp = 0;
- int i, n_counts;
-
- /*
- * Prevent interface registration from expanding / moving the vectors...
- * That tends never to happen, so we can hold this lock for a while.
- */
- vnet_interface_counter_lock (im);
-
- vec_foreach (cm, im->sw_if_counters)
- {
- n_counts = vlib_simple_counter_n_counters (cm);
- for (i = 0; i < n_counts; i++)
- {
- if (mp == 0)
- {
- items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
- n_counts - i);
-
- mp = vl_msg_api_alloc_as_if_client
- (sizeof (*mp) + items_this_message * sizeof (v));
- mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS);
- mp->vnet_counter_type = cm - im->sw_if_counters;
- mp->first_sw_if_index = htonl (i);
- mp->count = 0;
- vp = (u64 *) mp->data;
- }
- v = vlib_get_simple_counter (cm, i);
- clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
- vp++;
- mp->count++;
- if (mp->count == items_this_message)
- {
- mp->count = htonl (items_this_message);
- /* Send to the main thread... */
- vl_msg_api_send_shmem (q, (u8 *) & mp);
- mp = 0;
- }
- }
- ASSERT (mp == 0);
- }
- vnet_interface_counter_unlock (im);
-}
-
-void
-handle_client_registration (vpe_client_registration_t * client, u32 stat,
- u32 item, int enable_disable)
-{
- stats_main_t *sm = &stats_main;
- vpe_client_registration_t *rp, _rp;
-
- rp = get_client_for_stat (stat, item, client->client_index);
-
- /* Disable case */
- if (enable_disable == 0)
- {
- if (!rp) // No client to disable
- {
- clib_warning ("pid %d: already disabled for stats...",
- client->client_pid);
- return;
- }
- sm->enable_poller =
- clear_client_for_stat (stat, item, client->client_index);
- return;
- }
- /* Enable case */
- if (!rp)
- {
- rp = &_rp;
- rp->client_index = client->client_index;
- rp->client_pid = client->client_pid;
- sm->enable_poller = set_client_for_stat (stat, item, rp);
- }
-}
-
-
-/**********************************
- * ALL Interface Combined stats - to be deprecated
- **********************************/
-
-/*
- * This API should be deprecated as _per_interface_ works with ~0 as sw_if_index.
- */
-static void
- vl_api_want_interface_combined_stats_t_handler
- (vl_api_want_interface_combined_stats_t * mp)
-{
- stats_main_t *sm = &stats_main;
- vpe_client_registration_t rp;
- vl_api_want_interface_combined_stats_reply_t *rmp;
- uword *p;
- i32 retval = 0;
- vl_api_registration_t *reg;
- u32 swif;
-
- swif = ~0; //Using same mechanism as _per_interface_
- rp.client_index = mp->client_index;
- rp.client_pid = mp->pid;
-
- handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
- mp->enable_disable);
-
-reply:
- reg = vl_api_client_index_to_registration (mp->client_index);
- if (!reg)
- {
- sm->enable_poller =
- clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
- mp->client_index);
- return;
- }
-
- rmp = vl_msg_api_alloc (sizeof (*rmp));
- rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY);
- rmp->context = mp->context;
- rmp->retval = retval;
-
- vl_api_send_msg (reg, (u8 *) rmp);
-}
-
-static void
- vl_api_vnet_interface_combined_counters_t_handler
- (vl_api_vnet_interface_combined_counters_t * mp)
-{
- vpe_client_registration_t *clients, client;
- stats_main_t *sm = &stats_main;
- vl_api_registration_t *reg, *reg_prev = NULL;
- vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
- u32 mp_size;
- int i;
-
- mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
-
- clients =
- get_clients_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
- ~0 /*flag for all */ );
-
- for (i = 0; i < vec_len (clients); i++)
- {
- client = clients[i];
- reg = vl_api_client_index_to_registration (client.client_index);
- if (reg)
- {
- if (reg_prev && vl_api_can_send_msg (reg_prev))
- {
- mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
- clib_memcpy (mp_copy, mp, mp_size);
- vl_api_send_msg (reg_prev, (u8 *) mp);
- mp = mp_copy;
- }
- reg_prev = reg;
- }
- }
- vec_free (clients);
-#if STATS_DEBUG > 0
- fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
-#endif
-
- if (reg_prev && vl_api_can_send_msg (reg_prev))
- {
- vl_api_send_msg (reg_prev, (u8 *) mp);
- }
- else
- {
- vl_msg_api_free (mp);
- }
-}
-
-static void
-do_combined_interface_counters (stats_main_t * sm)
-{
- vl_api_vnet_interface_combined_counters_t *mp = 0;
- vnet_interface_main_t *im = sm->interface_main;
- api_main_t *am = sm->api_main;
- vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
- svm_queue_t *q = shmem_hdr->vl_input_queue;
- vlib_combined_counter_main_t *cm;
- u32 items_this_message = 0;
- vlib_counter_t v, *vp = 0;
- int i, n_counts;
-
- vnet_interface_counter_lock (im);
-
- vec_foreach (cm, im->combined_sw_if_counters)
- {
- n_counts = vlib_combined_counter_n_counters (cm);
- for (i = 0; i < n_counts; i++)
- {
- if (mp == 0)
- {
- items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
- n_counts - i);
-
- mp = vl_msg_api_alloc_as_if_client
- (sizeof (*mp) + items_this_message * sizeof (v));
- mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS);
- mp->vnet_counter_type = cm - im->combined_sw_if_counters;
- mp->first_sw_if_index = htonl (i);
- mp->count = 0;
- vp = (vlib_counter_t *) mp->data;
- }
- vlib_get_combined_counter (cm, i, &v);
- clib_mem_unaligned (&vp->packets, u64)
- = clib_host_to_net_u64 (v.packets);
- clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
- vp++;
- mp->count++;
- if (mp->count == items_this_message)
- {
- mp->count = htonl (items_this_message);
- /* Send to the main thread... */
- vl_msg_api_send_shmem (q, (u8 *) & mp);
- mp = 0;
- }
- }
- ASSERT (mp == 0);
- }
- vnet_interface_counter_unlock (im);
-}
-
-/**********************************
- * Per Interface Combined stats
- **********************************/
-
-/* Request from client registering interfaces it wants */
-static void
- vl_api_want_per_interface_combined_stats_t_handler
- (vl_api_want_per_interface_combined_stats_t * mp)
-{
- stats_main_t *sm = &stats_main;
- vpe_client_registration_t rp;
- vl_api_want_per_interface_combined_stats_reply_t *rmp;
- vlib_combined_counter_main_t *cm;
- uword *p;
- i32 retval = 0;
- vl_api_registration_t *reg;
- u32 i, swif, num = 0;
-
- num = ntohl (mp->num);
-
- /*
- * Validate sw_if_indexes before registering
- */
- for (i = 0; i < num; i++)
- {
- swif = ntohl (mp->sw_ifs[i]);
-
- /*
- * Check its a real sw_if_index that the client is allowed to see
- */
- if (swif != ~0)
- {
- if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
- {
- retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
- goto reply;
- }
- }
- }
-
- for (i = 0; i < num; i++)
- {
- swif = ntohl (mp->sw_ifs[i]);
-
- rp.client_index = mp->client_index;
- rp.client_pid = mp->pid;
- handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
- swif, ntohl (mp->enable_disable));
- }
-
-reply:
- reg = vl_api_client_index_to_registration (mp->client_index);
- if (!reg)
- {
- for (i = 0; i < num; i++)
- {
- swif = ntohl (mp->sw_ifs[i]);
-
- sm->enable_poller =
- clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
- mp->client_index);
- }
- return;
- }
-
- rmp = vl_msg_api_alloc (sizeof (*rmp));
- rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_COMBINED_STATS_REPLY);
- rmp->context = mp->context;
- rmp->retval = retval;
-
- vl_api_send_msg (reg, (u8 *) rmp);
-}
-
-/* Per Interface Combined distribution to client */
-static void
-do_combined_per_interface_counters (stats_main_t * sm)
-{
- vl_api_vnet_per_interface_combined_counters_t *mp = 0;
- vnet_interface_main_t *im = sm->interface_main;
- api_main_t *am = sm->api_main;
- vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
- vl_api_registration_t *vl_reg;
- vlib_combined_counter_main_t *cm;
- vl_api_vnet_combined_counter_t *vp = 0;
- vlib_counter_t v;
- u32 i, j;
- vpe_client_stats_registration_t *reg;
- vpe_client_registration_t *client;
- u32 *sw_if_index = 0;
-
- vnet_interface_counter_lock (im);
-
- vec_reset_length (sm->regs_tmp);
-
- /* *INDENT-OFF* */
- pool_foreach (reg,
- sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
- ({ vec_add1 (sm->regs_tmp, reg); }));
- /* *INDENT-ON* */
-
- for (i = 0; i < vec_len (sm->regs_tmp); i++)
- {
- reg = sm->regs_tmp[i];
- if (reg->item == ~0)
- {
- vnet_interface_counter_unlock (im);
- do_combined_interface_counters (sm);
- vnet_interface_counter_lock (im);
- continue;
- }
- vec_reset_length (sm->clients_tmp);
-
- /* *INDENT-OFF* */
- pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
- client);}));
- /* *INDENT-ON* */
-
- for (j = 0; j < vec_len (sm->clients_tmp); j++)
- {
- client = sm->clients_tmp[j];
-
- vl_reg = vl_api_client_index_to_registration (client->client_index);
-
- //Client may have disconnected abrubtly, clean up so we don't poll nothing.
- if (!vl_reg)
- {
- sm->enable_poller =
- clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
- reg->item, client->client_index);
- continue;
- }
- mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
- clib_memset (mp, 0, sizeof (*mp));
-
- mp->_vl_msg_id =
- ntohs (VL_API_VNET_PER_INTERFACE_COMBINED_COUNTERS);
-
- /*
- * count will eventually be used to optimise the batching
- * of per client messages for each stat. For now setting this to 1 then
- * iterate. This will not affect API.
- *
- * FIXME instead of enqueueing here, this should be sent to a batch
- * storer for per-client transmission. Each "mp" sent would be a single entry
- * and if a client is listening to other sw_if_indexes for same, it would be
- * appended to that *mp
- *
- *
- * FIXME(s):
- * - capturing the timestamp of the counters "when VPP knew them" is important.
- * Less so is that the timing of the delivery to the control plane be in the same
- * timescale.
-
- * i.e. As long as the control plane can delta messages from VPP and work out
- * velocity etc based on the timestamp, it can do so in a more "batch mode".
-
- * It would be beneficial to keep a "per-client" message queue, and then
- * batch all the stat messages for a client into one message, with
- * discrete timestamps.
-
- * Given this particular API is for "per interface" one assumes that the scale
- * is less than the ~0 case, which the prior API is suited for.
- */
-
- /*
- * 1 message per api call for now
- */
- mp->count = htonl (1);
- mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
-
- vp = (vl_api_vnet_combined_counter_t *) mp->data;
- vp->sw_if_index = htonl (reg->item);
-
- im = &vnet_get_main ()->interface_main;
-
-#define _(X, x) \
- cm = im->combined_sw_if_counters + X; \
- vlib_get_combined_counter (cm, reg->item, &v); \
- clib_mem_unaligned (&vp->x##_packets, u64) = \
- clib_host_to_net_u64 (v.packets); \
- clib_mem_unaligned (&vp->x##_bytes, u64) = \
- clib_host_to_net_u64 (v.bytes);
-
-
- _(VNET_INTERFACE_COUNTER_RX, rx);
- _(VNET_INTERFACE_COUNTER_TX, tx);
- _(VNET_INTERFACE_COUNTER_RX_UNICAST, rx_unicast);
- _(VNET_INTERFACE_COUNTER_TX_UNICAST, tx_unicast);
- _(VNET_INTERFACE_COUNTER_RX_MULTICAST, rx_multicast);
- _(VNET_INTERFACE_COUNTER_TX_MULTICAST, tx_multicast);
- _(VNET_INTERFACE_COUNTER_RX_BROADCAST, rx_broadcast);
- _(VNET_INTERFACE_COUNTER_TX_BROADCAST, tx_broadcast);
-
-#undef _
-
- vl_api_send_msg (vl_reg, (u8 *) mp);
- }
- }
-
- vnet_interface_counter_unlock (im);
-}
-
-/**********************************
- * Per Interface simple stats
- **********************************/
-
-/* Request from client registering interfaces it wants */
-static void
- vl_api_want_per_interface_simple_stats_t_handler
- (vl_api_want_per_interface_simple_stats_t * mp)
-{
- stats_main_t *sm = &stats_main;
- vpe_client_registration_t rp;
- vl_api_want_per_interface_simple_stats_reply_t *rmp;
- vlib_simple_counter_main_t *cm;
- uword *p;
- i32 retval = 0;
- vl_api_registration_t *reg;
- u32 i, swif, num = 0;
-
- num = ntohl (mp->num);
-
- for (i = 0; i < num; i++)
- {
- swif = ntohl (mp->sw_ifs[i]);
-
- /* Check its a real sw_if_index that the client is allowed to see */
- if (swif != ~0)
- {
- if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
- {
- retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
- goto reply;
- }
- }
- }
-
- for (i = 0; i < num; i++)
- {
- swif = ntohl (mp->sw_ifs[i]);
-
- rp.client_index = mp->client_index;
- rp.client_pid = mp->pid;
- handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
- swif, ntohl (mp->enable_disable));
- }
-
-reply:
- reg = vl_api_client_index_to_registration (mp->client_index);
-
- /* Client may have disconnected abruptly, clean up */
- if (!reg)
- {
- for (i = 0; i < num; i++)
- {
- swif = ntohl (mp->sw_ifs[i]);
- sm->enable_poller =
- clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
- mp->client_index);
- }
-
- return;
- }
-
-
- rmp = vl_msg_api_alloc (sizeof (*rmp));
- rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
- rmp->context = mp->context;
- rmp->retval = retval;
-
- vl_api_send_msg (reg, (u8 *) rmp);
-}
-
-/* Per Interface Simple distribution to client */
-static void
-do_simple_per_interface_counters (stats_main_t * sm)
-{
- vl_api_vnet_per_interface_simple_counters_t *mp = 0;
- vnet_interface_main_t *im = sm->interface_main;
- api_main_t *am = sm->api_main;
- vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
- vl_api_registration_t *vl_reg;
- vlib_simple_counter_main_t *cm;
- u32 i, j, size;
- vpe_client_stats_registration_t *reg;
- vpe_client_registration_t *client;
- u32 timestamp, count;
- vl_api_vnet_simple_counter_t *vp = 0;
- counter_t v;
-
- vnet_interface_counter_lock (im);
-
- vec_reset_length (sm->regs_tmp);
-
- /* *INDENT-OFF* */
- pool_foreach (reg,
- sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS],
- ({ vec_add1 (sm->regs_tmp, reg); }));
- /* *INDENT-ON* */
-
- for (i = 0; i < vec_len (sm->regs_tmp); i++)
- {
- reg = sm->regs_tmp[i];
- if (reg->item == ~0)
- {
- vnet_interface_counter_unlock (im);
- do_simple_interface_counters (sm);
- vnet_interface_counter_lock (im);
- continue;
- }
- vec_reset_length (sm->clients_tmp);
-
- /* *INDENT-OFF* */
- pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
- client);}));
- /* *INDENT-ON* */
-
- for (j = 0; j < vec_len (sm->clients_tmp); j++)
- {
- client = sm->clients_tmp[j];
- vl_reg = vl_api_client_index_to_registration (client->client_index);
-
- /* Client may have disconnected abrubtly, clean up */
- if (!vl_reg)
- {
- sm->enable_poller =
- clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
- reg->item, client->client_index);
- continue;
- }
-
- mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
- clib_memset (mp, 0, sizeof (*mp));
- mp->_vl_msg_id = ntohs (VL_API_VNET_PER_INTERFACE_SIMPLE_COUNTERS);
-
- /*
- * count will eventually be used to optimise the batching
- * of per client messages for each stat. For now setting this to 1 then
- * iterate. This will not affect API.
- *
- * FIXME instead of enqueueing here, this should be sent to a batch
- * storer for per-client transmission. Each "mp" sent would be a single entry
- * and if a client is listening to other sw_if_indexes for same, it would be
- * appended to that *mp
- *
- *
- * FIXME(s):
- * - capturing the timestamp of the counters "when VPP knew them" is important.
- * Less so is that the timing of the delivery to the control plane be in the same
- * timescale.
-
- * i.e. As long as the control plane can delta messages from VPP and work out
- * velocity etc based on the timestamp, it can do so in a more "batch mode".
-
- * It would be beneficial to keep a "per-client" message queue, and then
- * batch all the stat messages for a client into one message, with
- * discrete timestamps.
-
- * Given this particular API is for "per interface" one assumes that the scale
- * is less than the ~0 case, which the prior API is suited for.
- */
-
- /*
- * 1 message per api call for now
- */
- mp->count = htonl (1);
- mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
- vp = (vl_api_vnet_simple_counter_t *) mp->data;
-
- vp->sw_if_index = htonl (reg->item);
-
- // VNET_INTERFACE_COUNTER_DROP
- cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
- v = vlib_get_simple_counter (cm, reg->item);
- clib_mem_unaligned (&vp->drop, u64) = clib_host_to_net_u64 (v);
-
- // VNET_INTERFACE_COUNTER_PUNT
- cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_PUNT;
- v = vlib_get_simple_counter (cm, reg->item);
- clib_mem_unaligned (&vp->punt, u64) = clib_host_to_net_u64 (v);
-
- // VNET_INTERFACE_COUNTER_IP4
- cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP4;
- v = vlib_get_simple_counter (cm, reg->item);
- clib_mem_unaligned (&vp->rx_ip4, u64) = clib_host_to_net_u64 (v);
-
- //VNET_INTERFACE_COUNTER_IP6
- cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP6;
- v = vlib_get_simple_counter (cm, reg->item);
- clib_mem_unaligned (&vp->rx_ip6, u64) = clib_host_to_net_u64 (v);
-
- //VNET_INTERFACE_COUNTER_RX_NO_BUF
- cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_NO_BUF;
- v = vlib_get_simple_counter (cm, reg->item);
- clib_mem_unaligned (&vp->rx_no_buffer, u64) =
- clib_host_to_net_u64 (v);
-
- //VNET_INTERFACE_COUNTER_RX_MISS
- cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_MISS;
- v = vlib_get_simple_counter (cm, reg->item);
- clib_mem_unaligned (&vp->rx_miss, u64) = clib_host_to_net_u64 (v);
-
- //VNET_INTERFACE_COUNTER_RX_ERROR
- cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_ERROR;
- v = vlib_get_simple_counter (cm, reg->item);
- clib_mem_unaligned (&vp->rx_error, u64) = clib_host_to_net_u64 (v);
-
- //VNET_INTERFACE_COUNTER_TX_ERROR
- cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_TX_ERROR;
- v = vlib_get_simple_counter (cm, reg->item);
- clib_mem_unaligned (&vp->tx_error, u64) = clib_host_to_net_u64 (v);
-
- //VNET_INTERFACE_COUNTER_MPLS
- cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_MPLS;
- v = vlib_get_simple_counter (cm, reg->item);
- clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
-
- vl_api_send_msg (vl_reg, (u8 *) mp);
- }
- }
-
- vnet_interface_counter_unlock (im);
-}
-
-/**********************************
- * Per FIB IP4 stats
- **********************************/
-
-static void
-ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
-{
- struct timespec _req, *req = &_req;
- struct timespec _rem, *rem = &_rem;
-
- req->tv_sec = sec;
- req->tv_nsec = nsec;
- while (1)
- {
- if (nanosleep (req, rem) == 0)
- break;
- *req = *rem;
- if (errno == EINTR)
- continue;
- clib_unix_warning ("nanosleep");
- break;
- }
-}
-
-/**
- * @brief The context passed when collecting adjacency counters
- */
-typedef struct ip4_nbr_stats_ctx_t_
-{
- /**
- * The SW IF index all these adjs belong to
- */
- u32 sw_if_index;
-
- /**
- * A vector of ip4 nbr counters
- */
- vl_api_ip4_nbr_counter_t *counters;
-} ip4_nbr_stats_ctx_t;
-
-static adj_walk_rc_t
-ip4_nbr_stats_cb (adj_index_t ai, void *arg)
-{
- vl_api_ip4_nbr_counter_t *vl_counter;
- vlib_counter_t adj_counter;
- ip4_nbr_stats_ctx_t *ctx;
- ip_adjacency_t *adj;
-
- ctx = arg;
- vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
-
- if (0 != adj_counter.packets)
- {
- vec_add2 (ctx->counters, vl_counter, 1);
- adj = adj_get (ai);
-
- vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
- vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
- vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
- vl_counter->link_type = adj->ia_link;
- }
- return (ADJ_WALK_RC_CONTINUE);
-}
-
-#define MIN(x,y) (((x)<(y))?(x):(y))
-
-static void
-send_and_pause (stats_main_t * sm, svm_queue_t * q, u8 * mp)
-{
- u8 pause = 0;
-
- svm_queue_lock (q);
- pause = svm_queue_is_full (q);
-
- vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
- svm_queue_unlock (q);
- dsunlock (sm);
-
- if (pause)
- ip46_fib_stats_delay (sm, 0 /* sec */ ,
- STATS_RELEASE_DELAY_NS);
-}
-
-static void
-ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
-{
- api_main_t *am = sm->api_main;
- vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
- svm_queue_t *q = shmem_hdr->vl_input_queue;
- vl_api_vnet_ip4_nbr_counters_t *mp = 0;
- int first = 0;
-
- /*
- * If the walk context has counters, which may be left over from the last
- * suspend, then we continue from there.
- */
- while (0 != vec_len (ctx->counters))
- {
- u32 n_items = MIN (vec_len (ctx->counters),
- IP4_FIB_COUNTER_BATCH_SIZE);
- u8 pause = 0;
-
- dslock (sm, 0 /* release hint */ , 1 /* tag */ );
-
- mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
- (n_items *
- sizeof
- (vl_api_ip4_nbr_counter_t)));
- mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
- mp->count = ntohl (n_items);
- mp->sw_if_index = ntohl (ctx->sw_if_index);
- mp->begin = first;
- first = 0;
-
- /*
- * copy the counters from the back of the context, then we can easily
- * 'erase' them by resetting the vector length.
- * The order we push the stats to the caller is not important.
- */
- clib_memcpy (mp->c,
- &ctx->counters[vec_len (ctx->counters) - n_items],
- n_items * sizeof (*ctx->counters));
-
- _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
-
- /*
- * send to the shm q
- */
- send_and_pause (sm, q, (u8 *) & mp);
- }
-}
-
-static void
-do_ip4_nbr_counters (stats_main_t * sm)
-{
- vnet_main_t *vnm = vnet_get_main ();
- vnet_interface_main_t *im = &vnm->interface_main;
- vnet_sw_interface_t *si;
-
- ip4_nbr_stats_ctx_t ctx = {
- .sw_if_index = 0,
- .counters = NULL,
- };
-
- /* *INDENT-OFF* */
- pool_foreach (si, im->sw_interfaces,
- ({
- /*
- * update the interface we are now concerned with
- */
- ctx.sw_if_index = si->sw_if_index;
-
- /*
- * we are about to walk another interface, so we shouldn't have any pending
- * stats to export.
- */
- ASSERT(ctx.counters == NULL);
-
- /*
- * visit each neighbour adjacency on the interface and collect
- * its current stats.
- * Because we hold the lock the walk is synchronous, so safe to routing
- * updates. It's limited in work by the number of adjacenies on an
- * interface, which is typically not huge.
- */
- dslock (sm, 0 /* release hint */ , 1 /* tag */ );
- adj_nbr_walk (si->sw_if_index,
- FIB_PROTOCOL_IP4,
- ip4_nbr_stats_cb,
- &ctx);
- dsunlock (sm);
-
- /*
- * if this interface has some adjacencies with counters then ship them,
- * else continue to the next interface.
- */
- if (NULL != ctx.counters)
- {
- ip4_nbr_ship(sm, &ctx);
- }
- }));
- /* *INDENT-OFF* */
-}
-
-/**
- * @brief The context passed when collecting adjacency counters
- */
-typedef struct ip6_nbr_stats_ctx_t_
-{
- /**
- * The SW IF index all these adjs belong to
- */
- u32 sw_if_index;
-
- /**
- * A vector of ip6 nbr counters
- */
- vl_api_ip6_nbr_counter_t *counters;
-} ip6_nbr_stats_ctx_t;
-
-static adj_walk_rc_t
-ip6_nbr_stats_cb (adj_index_t ai,
- void *arg)
-{
- vl_api_ip6_nbr_counter_t *vl_counter;
- vlib_counter_t adj_counter;
- ip6_nbr_stats_ctx_t *ctx;
- ip_adjacency_t *adj;
-
- ctx = arg;
- vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
-
- if (0 != adj_counter.packets)
- {
- vec_add2(ctx->counters, vl_counter, 1);
- adj = adj_get(ai);
-
- vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
- vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
- vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
- vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
- vl_counter->link_type = adj->ia_link;
- }
- return (ADJ_WALK_RC_CONTINUE);
-}
-
-#define MIN(x,y) (((x)<(y))?(x):(y))
-
-static void
-ip6_nbr_ship (stats_main_t * sm,
- ip6_nbr_stats_ctx_t *ctx)
-{
- api_main_t *am = sm->api_main;
- vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
- svm_queue_t *q = shmem_hdr->vl_input_queue;
- vl_api_vnet_ip6_nbr_counters_t *mp = 0;
- int first = 0;
-
- /*
- * If the walk context has counters, which may be left over from the last
- * suspend, then we continue from there.
- */
- while (0 != vec_len(ctx->counters))
- {
- u32 n_items = MIN (vec_len (ctx->counters),
- IP6_FIB_COUNTER_BATCH_SIZE);
- u8 pause = 0;
-
- dslock (sm, 0 /* release hint */ , 1 /* tag */ );
-
- mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
- (n_items *
- sizeof
- (vl_api_ip6_nbr_counter_t)));
- mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
- mp->count = ntohl (n_items);
- mp->sw_if_index = ntohl (ctx->sw_if_index);
- mp->begin = first;
- first = 0;
-
- /*
- * copy the counters from the back of the context, then we can easily
- * 'erase' them by resetting the vector length.
- * The order we push the stats to the caller is not important.
- */
- clib_memcpy (mp->c,
- &ctx->counters[vec_len (ctx->counters) - n_items],
- n_items * sizeof (*ctx->counters));
-
- _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
-
- /*
- * send to the shm q
- */
- send_and_pause(sm, q, (u8 *) & mp);
- }
-}
-
-static void
-do_ip6_nbr_counters (stats_main_t * sm)
-{
- vnet_main_t *vnm = vnet_get_main ();
- vnet_interface_main_t *im = &vnm->interface_main;
- vnet_sw_interface_t *si;
-
- ip6_nbr_stats_ctx_t ctx = {
- .sw_if_index = 0,
- .counters = NULL,
- };
-
- /* *INDENT-OFF* */
- pool_foreach (si, im->sw_interfaces,
- ({
- /*
- * update the interface we are now concerned with
- */
- ctx.sw_if_index = si->sw_if_index;
-
- /*
- * we are about to walk another interface, so we shouldn't have any pending
- * stats to export.
- */
- ASSERT(ctx.counters == NULL);
-
- /*
- * visit each neighbour adjacency on the interface and collect
- * its current stats.
- * Because we hold the lock the walk is synchronous, so safe to routing
- * updates. It's limited in work by the number of adjacenies on an
- * interface, which is typically not huge.
- */
- dslock (sm, 0 /* release hint */ , 1 /* tag */ );
- adj_nbr_walk (si->sw_if_index,
- FIB_PROTOCOL_IP6,
- ip6_nbr_stats_cb,
- &ctx);
- dsunlock (sm);
-
- /*
- * if this interface has some adjacencies with counters then ship them,
- * else continue to the next interface.
- */
- if (NULL != ctx.counters)
- {
- ip6_nbr_ship(sm, &ctx);
- }
- }));
- /* *INDENT-OFF* */
-}
-
-static void
-do_ip4_fib_counters (stats_main_t * sm)
-{
- ip4_main_t *im4 = &ip4_main;
- api_main_t *am = sm->api_main;
- vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
- svm_queue_t *q = shmem_hdr->vl_input_queue;
- ip4_route_t *r;
- fib_table_t *fib;
- ip4_fib_t *v4_fib;
- do_ip46_fibs_t *do_fibs;
- vl_api_vnet_ip4_fib_counters_t *mp = 0;
- u32 items_this_message;
- vl_api_ip4_fib_counter_t *ctrp = 0;
- u32 start_at_fib_index = 0;
- int i, j, k;
-
- do_fibs = &sm->do_ip46_fibs;
-
-again:
- vec_reset_length (do_fibs->fibs);
- /* *INDENT-OFF* */
- pool_foreach (fib, im4->fibs,
- ({vec_add1(do_fibs->fibs,fib);}));
-
- /* *INDENT-ON* */
-
- for (j = 0; j < vec_len (do_fibs->fibs); j++)
- {
- fib = do_fibs->fibs[j];
- /* We may have bailed out due to control-plane activity */
- while ((fib - im4->fibs) < start_at_fib_index)
- continue;
-
- v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
-
- if (mp == 0)
- {
- items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
- mp = vl_msg_api_alloc_as_if_client
- (sizeof (*mp) +
- items_this_message * sizeof (vl_api_ip4_fib_counter_t));
- mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
- mp->count = 0;
- mp->vrf_id = ntohl (fib->ft_table_id);
- ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
- }
- else
- {
- /* happens if the last FIB was empty... */
- ASSERT (mp->count == 0);
- mp->vrf_id = ntohl (fib->ft_table_id);
- }
-
- dslock (sm, 0 /* release hint */ , 1 /* tag */ );
-
- vec_reset_length (do_fibs->ip4routes);
- vec_reset_length (do_fibs->results);
-
- for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
- {
- uword *hash = v4_fib->fib_entry_by_dst_address[i];
- hash_pair_t *p;
- ip4_route_t x;
-
- vec_reset_length (do_fibs->pvec);
-
- x.address_length = i;
-
- hash_foreach_pair (p, hash, (
- {
- vec_add1 (do_fibs->pvec, p);}
- ));
- for (k = 0; k < vec_len (do_fibs->pvec); k++)
- {
- p = do_fibs->pvec[k];
- x.address.data_u32 = p->key;
- x.index = p->value[0];
-
- vec_add1 (do_fibs->ip4routes, x);
- if (sm->data_structure_lock->release_hint)
- {
- start_at_fib_index = fib - im4->fibs;
- dsunlock (sm);
- ip46_fib_stats_delay (sm, 0 /* sec */ ,
- STATS_RELEASE_DELAY_NS);
- mp->count = 0;
- ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
- goto again;
- }
- }
- }
-
- vec_foreach (r, do_fibs->ip4routes)
- {
- vlib_counter_t c;
- const dpo_id_t *dpo_id;
- u32 index;
-
- dpo_id = fib_entry_contribute_ip_forwarding (r->index);
- index = (u32) dpo_id->dpoi_index;
-
- vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
- index, &c);
- /*
- * If it has actually
- * seen at least one packet, send it.
- */
- if (c.packets > 0)
- {
-
- /* already in net byte order */
- ctrp->address = r->address.as_u32;
- ctrp->address_length = r->address_length;
- ctrp->packets = clib_host_to_net_u64 (c.packets);
- ctrp->bytes = clib_host_to_net_u64 (c.bytes);
- mp->count++;
- ctrp++;
-
- if (mp->count == items_this_message)
- {
- mp->count = htonl (items_this_message);
- /*
- * If the main thread's input queue is stuffed,
- * drop the data structure lock (which the main thread
- * may want), and take a pause.
- */
- svm_queue_lock (q);
- if (svm_queue_is_full (q))
- {
- dsunlock (sm);
- vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
- svm_queue_unlock (q);
- mp = 0;
- ip46_fib_stats_delay (sm, 0 /* sec */ ,
- STATS_RELEASE_DELAY_NS);
- goto again;
- }
- vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
- svm_queue_unlock (q);
-
- items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
- mp = vl_msg_api_alloc_as_if_client
- (sizeof (*mp) +
- items_this_message * sizeof (vl_api_ip4_fib_counter_t));
- mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
- mp->count = 0;
- mp->vrf_id = ntohl (fib->ft_table_id);
- ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
- }
- } /* for each (mp or single) adj */
- if (sm->data_structure_lock->release_hint)
- {
- start_at_fib_index = fib - im4->fibs;
- dsunlock (sm);
- ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
- mp->count = 0;
- ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
- goto again;
- }
- } /* vec_foreach (routes) */
-
- dsunlock (sm);
-
- /* Flush any data from this fib */
- if (mp->count)
- {
- mp->count = htonl (mp->count);
- vl_msg_api_send_shmem (q, (u8 *) & mp);
- mp = 0;
- }
- }
-
- /* If e.g. the last FIB had no reportable routes, free the buffer */
- if (mp)
- vl_msg_api_free (mp);
-}
-
-static int
-mfib_table_stats_walk_cb (fib_node_index_t fei, void *ctx)
-{
- stats_main_t *sm = ctx;
- do_ip46_fibs_t *do_fibs;
- mfib_entry_t *entry;
-
- do_fibs = &sm->do_ip46_fibs;
- entry = mfib_entry_get (fei);
-
- vec_add1 (do_fibs->mroutes, entry->mfe_prefix);
-
- return (1);
-}
-
-static void
-do_ip4_mfib_counters (stats_main_t * sm)
-{
- ip4_main_t *im4 = &ip4_main;
- api_main_t *am = sm->api_main;
- vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
- svm_queue_t *q = shmem_hdr->vl_input_queue;
- mfib_prefix_t *pfx;
- mfib_table_t *mfib;
- do_ip46_fibs_t *do_fibs;
- vl_api_vnet_ip4_mfib_counters_t *mp = 0;
- u32 items_this_message;
- vl_api_ip4_mfib_counter_t *ctrp = 0;
- u32 start_at_mfib_index = 0;
- int i, j, k;
-
- do_fibs = &sm->do_ip46_fibs;
-
- vec_reset_length (do_fibs->mfibs);
- /* *INDENT-OFF* */
- pool_foreach (mfib, im4->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
- /* *INDENT-ON* */
-
- for (j = 0; j < vec_len (do_fibs->mfibs); j++)
- {
- mfib = do_fibs->mfibs[j];
- /* We may have bailed out due to control-plane activity */
- while ((mfib - im4->mfibs) < start_at_mfib_index)
- continue;
-
- if (mp == 0)
- {
- items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
- mp = vl_msg_api_alloc_as_if_client
- (sizeof (*mp) +
- items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
- mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
- mp->count = 0;
- mp->vrf_id = ntohl (mfib->mft_table_id);
- ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
- }
- else
- {
- /* happens if the last MFIB was empty... */
- ASSERT (mp->count == 0);
- mp->vrf_id = ntohl (mfib->mft_table_id);
- }
-
- vec_reset_length (do_fibs->mroutes);
-
- /*
- * walk the table with table updates blocked
- */
- dslock (sm, 0 /* release hint */ , 1 /* tag */ );
-
- mfib_table_walk (mfib->mft_index,
- FIB_PROTOCOL_IP4, mfib_table_stats_walk_cb, sm);
- dsunlock (sm);
-
- vec_foreach (pfx, do_fibs->mroutes)
- {
- const dpo_id_t *dpo_id;
- fib_node_index_t mfei;
- vlib_counter_t c;
- u32 index;
-
- /*
- * re-lookup the entry, since we suspend during the collection
- */
- mfei = mfib_table_lookup (mfib->mft_index, pfx);
-
- if (FIB_NODE_INDEX_INVALID == mfei)
- continue;
-
- dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
- index = (u32) dpo_id->dpoi_index;
-
- vlib_get_combined_counter (&replicate_main.repm_counters,
- dpo_id->dpoi_index, &c);
- /*
- * If it has seen at least one packet, send it.
- */
- if (c.packets > 0)
- {
- /* already in net byte order */
- memcpy (ctrp->group, &pfx->fp_grp_addr.ip4, 4);
- memcpy (ctrp->source, &pfx->fp_src_addr.ip4, 4);
- ctrp->group_length = pfx->fp_len;
- ctrp->packets = clib_host_to_net_u64 (c.packets);
- ctrp->bytes = clib_host_to_net_u64 (c.bytes);
- mp->count++;
- ctrp++;
-
- if (mp->count == items_this_message)
- {
- mp->count = htonl (items_this_message);
- /*
- * If the main thread's input queue is stuffed,
- * drop the data structure lock (which the main thread
- * may want), and take a pause.
- */
- svm_queue_lock (q);
-
- while (svm_queue_is_full (q))
- {
- svm_queue_unlock (q);
- ip46_fib_stats_delay (sm, 0 /* sec */ ,
- STATS_RELEASE_DELAY_NS);
- svm_queue_lock (q);
- }
- vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
- svm_queue_unlock (q);
-
- items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
- mp = vl_msg_api_alloc_as_if_client
- (sizeof (*mp) +
- items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
- mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
- mp->count = 0;
- mp->vrf_id = ntohl (mfib->mft_table_id);
- ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
- }
- }
- }
-
- /* Flush any data from this mfib */
- if (mp->count)
- {
- mp->count = htonl (mp->count);
- vl_msg_api_send_shmem (q, (u8 *) & mp);
- mp = 0;
- }
- }
-
- /* If e.g. the last FIB had no reportable routes, free the buffer */
- if (mp)
- vl_msg_api_free (mp);
-}
-
-static void
-do_ip6_mfib_counters (stats_main_t * sm)
-{
- ip6_main_t *im6 = &ip6_main;
- api_main_t *am = sm->api_main;
- vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
- svm_queue_t *q = shmem_hdr->vl_input_queue;
- mfib_prefix_t *pfx;
- mfib_table_t *mfib;
- do_ip46_fibs_t *do_fibs;
- vl_api_vnet_ip6_mfib_counters_t *mp = 0;
- u32 items_this_message;
- vl_api_ip6_mfib_counter_t *ctrp = 0;
- u32 start_at_mfib_index = 0;
- int i, j, k;
-
- do_fibs = &sm->do_ip46_fibs;
-
- vec_reset_length (do_fibs->mfibs);
- /* *INDENT-OFF* */
- pool_foreach (mfib, im6->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
- /* *INDENT-ON* */
-
- for (j = 0; j < vec_len (do_fibs->mfibs); j++)
- {
- mfib = do_fibs->mfibs[j];
- /* We may have bailed out due to control-plane activity */
- while ((mfib - im6->mfibs) < start_at_mfib_index)
- continue;
-
- if (mp == 0)
- {
- items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
- mp = vl_msg_api_alloc_as_if_client
- (sizeof (*mp) +
- items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
- mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
- mp->count = 0;
- mp->vrf_id = ntohl (mfib->mft_table_id);
- ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
- }
- else
- {
- /* happens if the last MFIB was empty... */
- ASSERT (mp->count == 0);
- mp->vrf_id = ntohl (mfib->mft_table_id);
- }
-
- vec_reset_length (do_fibs->mroutes);
-
- /*
- * walk the table with table updates blocked
- */
- dslock (sm, 0 /* release hint */ , 1 /* tag */ );
-
- mfib_table_walk (mfib->mft_index,
- FIB_PROTOCOL_IP6, mfib_table_stats_walk_cb, sm);
- dsunlock (sm);
-
- vec_foreach (pfx, do_fibs->mroutes)
- {
- const dpo_id_t *dpo_id;
- fib_node_index_t mfei;
- vlib_counter_t c;
- u32 index;
-
- /*
- * re-lookup the entry, since we suspend during the collection
- */
- mfei = mfib_table_lookup (mfib->mft_index, pfx);
-
- if (FIB_NODE_INDEX_INVALID == mfei)
- continue;
-
- dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
- index = (u32) dpo_id->dpoi_index;
-
- vlib_get_combined_counter (&replicate_main.repm_counters,
- dpo_id->dpoi_index, &c);
- /*
- * If it has seen at least one packet, send it.
- */
- if (c.packets > 0)
- {
- /* already in net byte order */
- memcpy (ctrp->group, &pfx->fp_grp_addr.ip6, 16);
- memcpy (ctrp->source, &pfx->fp_src_addr.ip6, 16);
- ctrp->group_length = pfx->fp_len;
- ctrp->packets = clib_host_to_net_u64 (c.packets);
- ctrp->bytes = clib_host_to_net_u64 (c.bytes);
- mp->count++;
- ctrp++;
-
- if (mp->count == items_this_message)
- {
- mp->count = htonl (items_this_message);
- /*
- * If the main thread's input queue is stuffed,
- * drop the data structure lock (which the main thread
- * may want), and take a pause.
- */
- svm_queue_lock (q);
-
- while (svm_queue_is_full (q))
- {
- svm_queue_unlock (q);
- ip46_fib_stats_delay (sm, 0 /* sec */ ,
- STATS_RELEASE_DELAY_NS);
- svm_queue_lock (q);
- }
- vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
- svm_queue_unlock (q);
-
- items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
- mp = vl_msg_api_alloc_as_if_client
- (sizeof (*mp) +
- items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
- mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
- mp->count = 0;
- mp->vrf_id = ntohl (mfib->mft_table_id);
- ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
- }
- }
- }
-
- /* Flush any data from this mfib */
- if (mp->count)
- {
- mp->count = htonl (mp->count);
- vl_msg_api_send_shmem (q, (u8 *) & mp);
- mp = 0;
- }
- }
-
- /* If e.g. the last FIB had no reportable routes, free the buffer */
- if (mp)
- vl_msg_api_free (mp);
-}
-
-typedef struct
-{
- u32 fib_index;
- ip6_route_t **routep;
- stats_main_t *sm;
-} add_routes_in_fib_arg_t;
-
-static void
-add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
-{
- add_routes_in_fib_arg_t *ap = arg;
- stats_main_t *sm = ap->sm;
-
- if (sm->data_structure_lock->release_hint)
- clib_longjmp (&sm->jmp_buf, 1);
-
- if (kvp->key[2] >> 32 == ap->fib_index)
- {
- ip6_address_t *addr;
- ip6_route_t *r;
- addr = (ip6_address_t *) kvp;
- vec_add2 (*ap->routep, r, 1);
- r->address = addr[0];
- r->address_length = kvp->key[2] & 0xFF;
- r->index = kvp->value;
- }
-}
-
-static void
-do_ip6_fib_counters (stats_main_t * sm)
-{
- ip6_main_t *im6 = &ip6_main;
- api_main_t *am = sm->api_main;
- vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
- svm_queue_t *q = shmem_hdr->vl_input_queue;
- ip6_route_t *r;
- fib_table_t *fib;
- do_ip46_fibs_t *do_fibs;
- vl_api_vnet_ip6_fib_counters_t *mp = 0;
- u32 items_this_message;
- vl_api_ip6_fib_counter_t *ctrp = 0;
- u32 start_at_fib_index = 0;
- clib_bihash_24_8_t *h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
- add_routes_in_fib_arg_t _a, *a = &_a;
- int i;
-
- do_fibs = &sm->do_ip46_fibs;
-again:
- vec_reset_length (do_fibs->fibs);
- /* *INDENT-OFF* */
- pool_foreach (fib, im6->fibs,
- ({vec_add1(do_fibs->fibs,fib);}));
- /* *INDENT-ON* */
-
-
- for (i = 0; i < vec_len (do_fibs->fibs); i++)
- {
- fib = do_fibs->fibs[i];
- /* We may have bailed out due to control-plane activity */
- while ((fib - im6->fibs) < start_at_fib_index)
- continue;
-
- if (mp == 0)
- {
- items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
- mp = vl_msg_api_alloc_as_if_client
- (sizeof (*mp) +
- items_this_message * sizeof (vl_api_ip6_fib_counter_t));
- mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
- mp->count = 0;
- mp->vrf_id = ntohl (fib->ft_table_id);
- ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
- }
-
- dslock (sm, 0 /* release hint */ , 1 /* tag */ );
-
- vec_reset_length (do_fibs->ip6routes);
- vec_reset_length (do_fibs->results);
-
- a->fib_index = fib - im6->fibs;
- a->routep = &do_fibs->ip6routes;
- a->sm = sm;
-
- if (clib_setjmp (&sm->jmp_buf, 0) == 0)
- {
- start_at_fib_index = fib - im6->fibs;
- clib_bihash_foreach_key_value_pair_24_8 (h, add_routes_in_fib, a);
- }
- else
- {
- dsunlock (sm);
- ip46_fib_stats_delay (sm, 0 /* sec */ ,
- STATS_RELEASE_DELAY_NS);
- mp->count = 0;
- ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
- goto again;
- }
-
- vec_foreach (r, do_fibs->ip6routes)
- {
- vlib_counter_t c;
-
- vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
- r->index, &c);
- /*
- * If it has actually
- * seen at least one packet, send it.
- */
- if (c.packets > 0)
- {
- /* already in net byte order */
- ctrp->address[0] = r->address.as_u64[0];
- ctrp->address[1] = r->address.as_u64[1];
- ctrp->address_length = (u8) r->address_length;
- ctrp->packets = clib_host_to_net_u64 (c.packets);
- ctrp->bytes = clib_host_to_net_u64 (c.bytes);
- mp->count++;
- ctrp++;
-
- if (mp->count == items_this_message)
- {
- mp->count = htonl (items_this_message);
- /*
- * If the main thread's input queue is stuffed,
- * drop the data structure lock (which the main thread
- * may want), and take a pause.
- */
- svm_queue_lock (q);
- if (svm_queue_is_full (q))
- {
- dsunlock (sm);
- vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
- svm_queue_unlock (q);
- mp = 0;
- ip46_fib_stats_delay (sm, 0 /* sec */ ,
- STATS_RELEASE_DELAY_NS);
- goto again;
- }
- vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
- svm_queue_unlock (q);
-
- items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
- mp = vl_msg_api_alloc_as_if_client
- (sizeof (*mp) +
- items_this_message * sizeof (vl_api_ip6_fib_counter_t));
- mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
- mp->count = 0;
- mp->vrf_id = ntohl (fib->ft_table_id);
- ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
- }
- }
-
- if (sm->data_structure_lock->release_hint)
- {
- start_at_fib_index = fib - im6->fibs;
- dsunlock (sm);
- ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
- mp->count = 0;
- ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
- goto again;
- }
- } /* vec_foreach (routes) */
-
- dsunlock (sm);
-
- /* Flush any data from this fib */
- if (mp->count)
- {
- mp->count = htonl (mp->count);
- vl_msg_api_send_shmem (q, (u8 *) & mp);
- mp = 0;
- }
- }
-
- /* If e.g. the last FIB had no reportable routes, free the buffer */
- if (mp)
- vl_msg_api_free (mp);
-}
-
-typedef struct udp_encap_stats_walk_t_
-{
- vl_api_udp_encap_counter_t *stats;
-} udp_encap_stats_walk_t;
-
-static walk_rc_t
-udp_encap_stats_walk_cb (index_t uei, void *arg)
-{
- udp_encap_stats_walk_t *ctx = arg;
- vl_api_udp_encap_counter_t *stat;
- udp_encap_t *ue;
-
- vec_add2 (ctx->stats, stat, 1);
-
- udp_encap_get_stats (uei, &stat->packets, &stat->bytes);
-
- return (WALK_CONTINUE);
-}
-
-static void
-udp_encap_ship (udp_encap_stats_walk_t * ctx)
-{
- vl_api_vnet_udp_encap_counters_t *mp;
- vl_shmem_hdr_t *shmem_hdr;
- stats_main_t *sm;
- api_main_t *am;
- svm_queue_t *q;
-
- mp = NULL;
- sm = &stats_main;
- am = sm->api_main;
- shmem_hdr = am->shmem_hdr;
- q = shmem_hdr->vl_input_queue;
-
- /*
- * If the walk context has counters, which may be left over from the last
- * suspend, then we continue from there.
- */
- while (0 != vec_len (ctx->stats))
- {
- u32 n_items = MIN (vec_len (ctx->stats),
- UDP_ENCAP_COUNTER_BATCH_SIZE);
- u8 pause = 0;
-
- dslock (sm, 0 /* release hint */ , 1 /* tag */ );
-
- mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
- (n_items *
- sizeof
- (vl_api_udp_encap_counter_t)));
- mp->_vl_msg_id = ntohs (VL_API_VNET_UDP_ENCAP_COUNTERS);
- mp->count = ntohl (n_items);
-
- /*
- * copy the counters from the back of the context, then we can easily
- * 'erase' them by resetting the vector length.
- * The order we push the stats to the caller is not important.
- */
- clib_memcpy (mp->c,
- &ctx->stats[vec_len (ctx->stats) - n_items],
- n_items * sizeof (*ctx->stats));
-
- _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
-
- /*
- * send to the shm q
- */
- send_and_pause (sm, q, (u8 *) & mp);
- }
-}
-
-static void
-do_udp_encap_counters (stats_main_t * sm)
-{
- vl_api_udp_encap_counter_t *stat;
-
- udp_encap_stats_walk_t ctx = {
- .stats = NULL,
- };
-
- dslock (sm, 0 /* release hint */ , 1 /* tag */ );
- udp_encap_walk (udp_encap_stats_walk_cb, &ctx);
- dsunlock (sm);
-
- udp_encap_ship (&ctx);
-}
-
-typedef struct bier_neighbor_stats_walk_t_
-{
- vl_api_bier_neighbor_counter_t *stats;
-} bier_neighbor_stats_walk_t;
-
-static walk_rc_t
-bier_neighbor_stats_walk_cb (index_t bfmi, void *arg)
-{
- bier_neighbor_stats_walk_t *ctx = arg;
- vl_api_bier_neighbor_counter_t *stat;
- fib_route_path_encode_t rpath;
- bier_table_id_t btid;
-
- vec_add2 (ctx->stats, stat, 1);
-
- bier_fmask_encode (bfmi, &btid, &rpath);
-
- stat->tbl_id.bt_set = btid.bti_set;
- stat->tbl_id.bt_sub_domain = btid.bti_sub_domain;
- stat->tbl_id.bt_hdr_len_id = btid.bti_hdr_len;
- fib_api_path_encode (&rpath, &stat->path);
- bier_fmask_get_stats (bfmi, &stat->packets, &stat->bytes);
-
- return (WALK_CONTINUE);
-}
-
-static void
-bier_neighbor_ship (bier_neighbor_stats_walk_t * ctx)
-{
- vl_api_vnet_bier_neighbor_counters_t *mp;
- vl_shmem_hdr_t *shmem_hdr;
- stats_main_t *sm;
- api_main_t *am;
- svm_queue_t *q;
-
- mp = NULL;
- sm = &stats_main;
- am = sm->api_main;
- shmem_hdr = am->shmem_hdr;
- q = shmem_hdr->vl_input_queue;
-
- /*
- * If the walk context has counters, which may be left over from the last
- * suspend, then we continue from there.
- */
- while (0 != vec_len (ctx->stats))
- {
- u32 n_items = MIN (vec_len (ctx->stats),
- BIER_NEIGHBOR_COUNTER_BATCH_SIZE);
- u8 pause = 0;
-
- dslock (sm, 0 /* release hint */ , 1 /* tag */ );
-
- mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
- (n_items *
- sizeof
- (vl_api_bier_neighbor_counter_t)));
- mp->_vl_msg_id = ntohs (VL_API_VNET_BIER_NEIGHBOR_COUNTERS);
- mp->count = ntohl (n_items);
-
- /*
- * copy the counters from the back of the context, then we can easily
- * 'erase' them by resetting the vector length.
- * The order we push the stats to the caller is not important.
- */
- clib_memcpy (mp->c,
- &ctx->stats[vec_len (ctx->stats) - n_items],
- n_items * sizeof (*ctx->stats));
-
- _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
-
- /*
- * send to the shm q
- */
- send_and_pause (sm, q, (u8 *) & mp);
- }
-}
-
-static void
-do_bier_neighbor_counters (stats_main_t * sm)
-{
- vl_api_bier_neighbor_counter_t *stat;
-
- bier_neighbor_stats_walk_t ctx = {
- .stats = NULL,
- };
-
- dslock (sm, 0 /* release hint */ , 1 /* tag */ );
- bier_fmask_db_walk (bier_neighbor_stats_walk_cb, &ctx);
- dsunlock (sm);
-
- bier_neighbor_ship (&ctx);
-}
-
-int
-stats_set_poller_delay (u32 poller_delay_sec)
-{
- stats_main_t *sm = &stats_main;
- if (!poller_delay_sec)
- {
- return VNET_API_ERROR_INVALID_ARGUMENT;
- }
- else
- {
- sm->stats_poll_interval_in_seconds = poller_delay_sec;
- return 0;
- }
-}
-
-static clib_error_t *
-stats_config (vlib_main_t * vm, unformat_input_t * input)
-{
- stats_main_t *sm = &stats_main;
- u32 sec;
-
- while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
- {
- if (unformat (input, "interval %u", &sec))
- {
- int rv = stats_set_poller_delay (sec);
- if (rv)
- {
- return clib_error_return (0,
- "`stats_set_poller_delay' API call failed, rv=%d:%U",
- (int) rv, format_vnet_api_errno, rv);
- }
- }
- else
- {
- return clib_error_return (0, "unknown input '%U'",
- format_unformat_error, input);
- }
- }
-
- return 0;
-}
-
-/* stats { ... } configuration. */
-/*?
- *
- * @cfgcmd{interval, &lt;seconds&gt;}
- * Configure stats poller delay to be @c seconds.
- *
-?*/
-VLIB_CONFIG_FUNCTION (stats_config, "stats");
-
-static void
- vl_api_stats_get_poller_delay_t_handler
- (vl_api_stats_get_poller_delay_t * mp)
-{
- stats_main_t *sm = &stats_main;
- vl_api_registration_t *reg;
- reg = vl_api_client_index_to_registration (mp->client_index);
- if (!reg)
- return;
- vl_api_stats_get_poller_delay_reply_t *rmp;
-
- rmp = vl_msg_api_alloc (sizeof (*rmp));
- rmp->_vl_msg_id = ntohs (VL_API_STATS_GET_POLLER_DELAY_REPLY);
- rmp->context = mp->context;
- rmp->retval = 0;
- rmp->delay = clib_host_to_net_u32 (sm->stats_poll_interval_in_seconds);
-
- vl_api_send_msg (reg, (u8 *) rmp);
-
-}
-
-static void
-stats_thread_fn (void *arg)
-{
- stats_main_t *sm = &stats_main;
- vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
- vlib_thread_main_t *tm = vlib_get_thread_main ();
-
- /* stats thread wants no signals. */
- {
- sigset_t s;
- sigfillset (&s);
- pthread_sigmask (SIG_SETMASK, &s, 0);
- }
-
- clib_mem_set_heap (w->thread_mheap);
-
- if (vec_len (tm->thread_prefix))
- vlib_set_thread_name ((char *)
- format (0, "%v_stats%c", tm->thread_prefix, '\0'));
-
- while (1)
- {
- ip46_fib_stats_delay (sm, sm->stats_poll_interval_in_seconds,
- 0 /* nsec */ );
-
- if (!(sm->enable_poller))
- continue;
-
- if (pool_elts
- (sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS]))
- do_combined_per_interface_counters (sm);
-
- if (pool_elts
- (sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS]))
- do_simple_per_interface_counters (sm);
-
- if (pool_elts (sm->stats_registrations[IDX_IP4_FIB_COUNTERS]))
- do_ip4_fib_counters (sm);
-
- if (pool_elts (sm->stats_registrations[IDX_IP6_FIB_COUNTERS]))
- do_ip6_fib_counters (sm);
-
- if (pool_elts (sm->stats_registrations[IDX_IP4_MFIB_COUNTERS]))
- do_ip4_mfib_counters (sm);
-
- if (pool_elts (sm->stats_registrations[IDX_IP6_MFIB_COUNTERS]))
- do_ip6_mfib_counters (sm);
-
- if (pool_elts (sm->stats_registrations[IDX_IP4_NBR_COUNTERS]))
- do_ip4_nbr_counters (sm);
-
- if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS]))
- do_ip6_nbr_counters (sm);
-
- if (pool_elts (sm->stats_registrations[IDX_BIER_NEIGHBOR_COUNTERS]))
- do_bier_neighbor_counters (sm);
- }
-}
-
-static void
- vl_api_vnet_interface_simple_counters_t_handler
- (vl_api_vnet_interface_simple_counters_t * mp)
-{
- vpe_client_registration_t *clients, client;
- stats_main_t *sm = &stats_main;
- vl_api_registration_t *reg, *reg_prev = NULL;
- vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
- u32 mp_size;
- int i;
-
- mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
-
- clients =
- get_clients_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
- ~0 /*flag for all */ );
-
- for (i = 0; i < vec_len (clients); i++)
- {
- client = clients[i];
- reg = vl_api_client_index_to_registration (client.client_index);
- if (reg)
- {
- if (reg_prev && vl_api_can_send_msg (reg_prev))
- {
- mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
- clib_memcpy (mp_copy, mp, mp_size);
- vl_api_send_msg (reg_prev, (u8 *) mp);
- mp = mp_copy;
- }
- reg_prev = reg;
- }
- else
- {
- sm->enable_poller =
- clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, ~0,
- client.client_index);
- continue;
- }
- }
- vec_free (clients);
-
-#if STATS_DEBUG > 0
- fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
-#endif
-
- if (reg_prev && vl_api_can_send_msg (reg_prev))
- {
- vl_api_send_msg (reg_prev, (u8 *) mp);
- }
- else
- {
- vl_msg_api_free (mp);
- }
-}
-
-static void
-vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
-{
- stats_main_t *sm = &stats_main;
- vl_api_registration_t *reg, *reg_prev = NULL;
- vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
- u32 mp_size;
- vpe_client_registration_t *clients, client;
- int i;
-
- mp_size = sizeof (*mp_copy) +
- ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
-
- clients =
- get_clients_for_stat (IDX_IP4_FIB_COUNTERS, ~0 /*flag for all */ );
-
- for (i = 0; i < vec_len (clients); i++)
- {
- client = clients[i];
- reg = vl_api_client_index_to_registration (client.client_index);
- if (reg)
- {
- if (reg_prev && vl_api_can_send_msg (reg_prev))
- {
- mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
- clib_memcpy (mp_copy, mp, mp_size);
- vl_api_send_msg (reg_prev, (u8 *) mp);
- mp = mp_copy;
- }
- reg_prev = reg;
- }
- else
- {
- sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
- ~0, client.client_index);
- continue;
- }
- }
- vec_free (clients);
-
- if (reg_prev && vl_api_can_send_msg (reg_prev))
- {
- vl_api_send_msg (reg_prev, (u8 *) mp);
- }
- else
- {
- vl_msg_api_free (mp);
- }
-}
-
-static void
-vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
-{
- stats_main_t *sm = &stats_main;
- vl_api_registration_t *reg, *reg_prev = NULL;
- vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
- u32 mp_size;
- vpe_client_registration_t *clients, client;
- int i;
-
- mp_size = sizeof (*mp_copy) +
- ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
-
- clients =
- get_clients_for_stat (IDX_IP4_NBR_COUNTERS, ~0 /*flag for all */ );
-
- for (i = 0; i < vec_len (clients); i++)
- {
- client = clients[i];
- reg = vl_api_client_index_to_registration (client.client_index);
- if (reg)
- {
- if (reg_prev && vl_api_can_send_msg (reg_prev))
- {
- mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
- clib_memcpy (mp_copy, mp, mp_size);
- vl_api_send_msg (reg_prev, (u8 *) mp);
- mp = mp_copy;
- }
- reg_prev = reg;
- }
- else
- {
- sm->enable_poller = clear_client_for_stat (IDX_IP4_NBR_COUNTERS,
- ~0, client.client_index);
- continue;
- }
- }
- vec_free (clients);
-
- /* *INDENT-ON* */
- if (reg_prev && vl_api_can_send_msg (reg_prev))
- {
- vl_api_send_msg (reg_prev, (u8 *) mp);
- }
- else
- {
- vl_msg_api_free (mp);
- }
-}
-
-static void
-vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
-{
- stats_main_t *sm = &stats_main;
- vl_api_registration_t *reg, *reg_prev = NULL;
- vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
- u32 mp_size;
- vpe_client_registration_t *clients, client;
- int i;
-
- mp_size = sizeof (*mp_copy) +
- ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
-
- clients =
- get_clients_for_stat (IDX_IP6_FIB_COUNTERS, ~0 /*flag for all */ );
-
- for (i = 0; i < vec_len (clients); i++)
- {
- client = clients[i];
- reg = vl_api_client_index_to_registration (client.client_index);
- if (reg)
- {
- if (reg_prev && vl_api_can_send_msg (reg_prev))
- {
- mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
- clib_memcpy (mp_copy, mp, mp_size);
- vl_api_send_msg (reg_prev, (u8 *) mp);
- mp = mp_copy;
- }
- reg_prev = reg;
- }
- else
- {
- sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
- ~0, client.client_index);
- continue;
- }
- }
- vec_free (clients);
-
- /* *INDENT-ON* */
- if (reg_prev && vl_api_can_send_msg (reg_prev))
- {
- vl_api_send_msg (reg_prev, (u8 *) mp);
- }
- else
- {
- vl_msg_api_free (mp);
- }
-}
-
-static void
-vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
-{
- stats_main_t *sm = &stats_main;
- vl_api_registration_t *reg, *reg_prev = NULL;
- vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
- u32 mp_size;
- vpe_client_registration_t *clients, client;
- int i;
-
- mp_size = sizeof (*mp_copy) +
- ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
-
- clients =
- get_clients_for_stat (IDX_IP6_NBR_COUNTERS, ~0 /*flag for all */ );
-
- for (i = 0; i < vec_len (clients); i++)
- {
- client = clients[i];
- reg = vl_api_client_index_to_registration (client.client_index);
- if (reg)
- {
- if (reg_prev && vl_api_can_send_msg (reg_prev))
- {
- mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
- clib_memcpy (mp_copy, mp, mp_size);
- vl_api_send_msg (reg_prev, (u8 *) mp);
- mp = mp_copy;
- }
- reg_prev = reg;
- }
- else
- {
- sm->enable_poller = clear_client_for_stat (IDX_IP6_NBR_COUNTERS,
- ~0, client.client_index);
- continue;
- }
- }
- vec_free (clients);
-
- /* *INDENT-ON* */
- if (reg_prev && vl_api_can_send_msg (reg_prev))
- {
- vl_api_send_msg (reg_prev, (u8 *) mp);
- }
- else
- {
- vl_msg_api_free (mp);
- }
-}
-
-static void
-vl_api_want_udp_encap_stats_t_handler (vl_api_want_udp_encap_stats_t * mp)
-{
- stats_main_t *sm = &stats_main;
- vpe_client_registration_t rp;
- vl_api_want_udp_encap_stats_reply_t *rmp;
- uword *p;
- i32 retval = 0;
- vl_api_registration_t *reg;
- u32 fib;
-
- fib = ~0; //Using same mechanism as _per_interface_
- rp.client_index = mp->client_index;
- rp.client_pid = mp->pid;
-
- handle_client_registration (&rp, IDX_UDP_ENCAP_COUNTERS, fib, mp->enable);
-
-reply:
- reg = vl_api_client_index_to_registration (mp->client_index);
-
- if (!reg)
- {
- sm->enable_poller = clear_client_for_stat (IDX_UDP_ENCAP_COUNTERS,
- fib, mp->client_index);
- return;
- }
-
- rmp = vl_msg_api_alloc (sizeof (*rmp));
- rmp->_vl_msg_id = ntohs (VL_API_WANT_UDP_ENCAP_STATS_REPLY);
- rmp->context = mp->context;
- rmp->retval = retval;
-
- vl_api_send_msg (reg, (u8 *) rmp);
-}
-
-static void
-vl_api_want_bier_neighbor_stats_t_handler (vl_api_want_bier_neighbor_stats_t *
- mp)
-{
- stats_main_t *sm = &stats_main;
- vpe_client_registration_t rp;
- vl_api_want_bier_neighbor_stats_reply_t *rmp;
- uword *p;
- i32 retval = 0;
- vl_api_registration_t *reg;
- u32 fib;
-
- fib = ~0; //Using same mechanism as _per_interface_
- rp.client_index = mp->client_index;
- rp.client_pid = mp->pid;
-
- handle_client_registration (&rp, IDX_BIER_NEIGHBOR_COUNTERS, fib,
- mp->enable);
-
-reply:
- reg = vl_api_client_index_to_registration (mp->client_index);
-
- if (!reg)
- {
- sm->enable_poller = clear_client_for_stat (IDX_BIER_NEIGHBOR_COUNTERS,
- fib, mp->client_index);
- return;
- }
-
- rmp = vl_msg_api_alloc (sizeof (*rmp));
- rmp->_vl_msg_id = ntohs (VL_API_WANT_BIER_NEIGHBOR_STATS_REPLY);
- rmp->context = mp->context;
- rmp->retval = retval;
-
- vl_api_send_msg (reg, (u8 *) rmp);
-}
-
-static void
-vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
-{
- stats_main_t *sm = &stats_main;
- vpe_client_registration_t rp;
- vl_api_want_stats_reply_t *rmp;
- uword *p;
- i32 retval = 0;
- u32 item;
- vl_api_registration_t *reg;
-
- item = ~0; //"ALL THE THINGS IN THE THINGS
- rp.client_index = mp->client_index;
- rp.client_pid = mp->pid;
-
- handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
- item, mp->enable_disable);
-
- handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
- item, mp->enable_disable);
-
- handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS,
- item, mp->enable_disable);
-
- handle_client_registration (&rp, IDX_IP4_NBR_COUNTERS,
- item, mp->enable_disable);
-
- handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS,
- item, mp->enable_disable);
-
- handle_client_registration (&rp, IDX_IP6_NBR_COUNTERS,
- item, mp->enable_disable);
-
-reply:
- reg = vl_api_client_index_to_registration (mp->client_index);
- if (!reg)
- return;
-
- rmp = vl_msg_api_alloc (sizeof (*rmp));
- rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
- rmp->context = mp->context;
- rmp->retval = retval;
-
- vl_api_send_msg (reg, (u8 *) rmp);
-}
-
-static void
- vl_api_want_interface_simple_stats_t_handler
- (vl_api_want_interface_simple_stats_t * mp)
-{
- stats_main_t *sm = &stats_main;
- vpe_client_registration_t rp;
- vl_api_want_interface_simple_stats_reply_t *rmp;
- uword *p;
- i32 retval = 0;
- u32 swif;
- vl_api_registration_t *reg;
-
- swif = ~0; //Using same mechanism as _per_interface_
- rp.client_index = mp->client_index;
- rp.client_pid = mp->pid;
-
- handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
- mp->enable_disable);
-
-reply:
- reg = vl_api_client_index_to_registration (mp->client_index);
-
- if (!reg)
- {
- sm->enable_poller =
- clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
- mp->client_index);
- return;
- }
-
- rmp = vl_msg_api_alloc (sizeof (*rmp));
- rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY);
- rmp->context = mp->context;
- rmp->retval = retval;
-
- vl_api_send_msg (reg, (u8 *) rmp);
-}
-
-
-static void
-vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp)
-{
- stats_main_t *sm = &stats_main;
- vpe_client_registration_t rp;
- vl_api_want_ip4_fib_stats_reply_t *rmp;
- uword *p;
- i32 retval = 0;
- vl_api_registration_t *reg;
- u32 fib;
-
- fib = ~0; //Using same mechanism as _per_interface_
- rp.client_index = mp->client_index;
- rp.client_pid = mp->pid;
-
- handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS, fib,
- mp->enable_disable);
-
-reply:
- reg = vl_api_client_index_to_registration (mp->client_index);
-
- if (!reg)
- {
- sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
- fib, mp->client_index);
- return;
- }
-
- rmp = vl_msg_api_alloc (sizeof (*rmp));
- rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY);
- rmp->context = mp->context;
- rmp->retval = retval;
-
- vl_api_send_msg (reg, (u8 *) rmp);
-}
-
-static void
-vl_api_want_ip4_mfib_stats_t_handler (vl_api_want_ip4_mfib_stats_t * mp)
-{
- stats_main_t *sm = &stats_main;
- vpe_client_registration_t rp;
- vl_api_want_ip4_mfib_stats_reply_t *rmp;
- uword *p;
- i32 retval = 0;
- vl_api_registration_t *reg;
- u32 mfib;
-
- mfib = ~0; //Using same mechanism as _per_interface_
- rp.client_index = mp->client_index;
- rp.client_pid = mp->pid;
-
- handle_client_registration (&rp, IDX_IP4_MFIB_COUNTERS, mfib,
- mp->enable_disable);
-
-reply:
- reg = vl_api_client_index_to_registration (mp->client_index);
- if (!reg)
- {
- sm->enable_poller = clear_client_for_stat (IDX_IP4_MFIB_COUNTERS,
- mfib, mp->client_index);
- return;
- }
-
- rmp = vl_msg_api_alloc (sizeof (*rmp));
- rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_MFIB_STATS_REPLY);
- rmp->context = mp->context;
- rmp->retval = retval;
-
- vl_api_send_msg (reg, (u8 *) rmp);
-}
-
-static void
-vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp)
-{
- stats_main_t *sm = &stats_main;
- vpe_client_registration_t rp;
- vl_api_want_ip4_fib_stats_reply_t *rmp;
- uword *p;
- i32 retval = 0;
- vl_api_registration_t *reg;
- u32 fib;
-
- fib = ~0; //Using same mechanism as _per_interface_
- rp.client_index = mp->client_index;
- rp.client_pid = mp->pid;
-
- handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS, fib,
- mp->enable_disable);
-
-reply:
- reg = vl_api_client_index_to_registration (mp->client_index);
- if (!reg)
- {
- sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
- fib, mp->client_index);
- return;
- }
-
- rmp = vl_msg_api_alloc (sizeof (*rmp));
- rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY);
- rmp->context = mp->context;
- rmp->retval = retval;
-
- vl_api_send_msg (reg, (u8 *) rmp);
-}
-
-static void
-vl_api_want_ip6_mfib_stats_t_handler (vl_api_want_ip6_mfib_stats_t * mp)
-{
- stats_main_t *sm = &stats_main;
- vpe_client_registration_t rp;
- vl_api_want_ip4_mfib_stats_reply_t *rmp;
- uword *p;
- i32 retval = 0;
- vl_api_registration_t *reg;
- u32 mfib;
-
- mfib = ~0; //Using same mechanism as _per_interface_
- rp.client_index = mp->client_index;
- rp.client_pid = mp->pid;
-
- handle_client_registration (&rp, IDX_IP6_MFIB_COUNTERS, mfib,
- mp->enable_disable);
-
-reply:
- reg = vl_api_client_index_to_registration (mp->client_index);
- if (!reg)
- {
- sm->enable_poller = clear_client_for_stat (IDX_IP6_MFIB_COUNTERS,
- mfib, mp->client_index);
- return;
- }
-
- rmp = vl_msg_api_alloc (sizeof (*rmp));
- rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_MFIB_STATS_REPLY);
- rmp->context = mp->context;
- rmp->retval = retval;
-
- vl_api_send_msg (reg, (u8 *) rmp);
-}
-
-/* FIXME - NBR stats broken - this will be fixed in subsequent patch */
-static void
-vl_api_want_ip4_nbr_stats_t_handler (vl_api_want_ip4_nbr_stats_t * mp)
-{
-}
-
-static void
-vl_api_want_ip6_nbr_stats_t_handler (vl_api_want_ip6_nbr_stats_t * mp)
-{
-}
-
-static void
-vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp)
-{
- stats_main_t *sm = &stats_main;
- vnet_interface_main_t *im = sm->interface_main;
- vl_api_vnet_get_summary_stats_reply_t *rmp;
- vlib_combined_counter_main_t *cm;
- vlib_counter_t v;
- vnet_interface_counter_type_t ct;
- int i, which;
- u64 total_pkts[VNET_N_COMBINED_INTERFACE_COUNTER];
- u64 total_bytes[VNET_N_COMBINED_INTERFACE_COUNTER];
- vl_api_registration_t *reg;
-
- reg = vl_api_client_index_to_registration (mp->client_index);
- if (!reg)
- return;
-
- rmp = vl_msg_api_alloc (sizeof (*rmp));
- rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
- rmp->context = mp->context;
- rmp->retval = 0;
-
- clib_memset (total_pkts, 0, sizeof (total_pkts));
- clib_memset (total_bytes, 0, sizeof (total_bytes));
-
- vnet_interface_counter_lock (im);
-
- vec_foreach (cm, im->combined_sw_if_counters)
- {
- which = cm - im->combined_sw_if_counters;
-
- for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
- {
- vlib_get_combined_counter (cm, i, &v);
- total_pkts[which] += v.packets;
- total_bytes[which] += v.bytes;
- }
- }
- vnet_interface_counter_unlock (im);
-
- foreach_rx_combined_interface_counter (ct)
- {
- rmp->total_pkts[ct] = clib_host_to_net_u64 (total_pkts[ct]);
- rmp->total_bytes[ct] = clib_host_to_net_u64 (total_bytes[ct]);
- }
-
- foreach_tx_combined_interface_counter (ct)
- {
- rmp->total_pkts[ct] = clib_host_to_net_u64 (total_pkts[ct]);
- rmp->total_bytes[ct] = clib_host_to_net_u64 (total_bytes[ct]);
- }
- rmp->vector_rate =
- clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
-
- vl_api_send_msg (reg, (u8 *) rmp);
-}
-
-int
-stats_memclnt_delete_callback (u32 client_index)
-{
- vpe_client_stats_registration_t *rp;
- stats_main_t *sm = &stats_main;
- uword *p;
-
- // FIXME
- /* p = hash_get (sm->stats_registration_hash, client_index); */
- /* if (p) */
- /* { */
- /* rp = pool_elt_at_index (sm->stats_registrations, p[0]); */
- /* pool_put (sm->stats_registrations, rp); */
- /* hash_unset (sm->stats_registration_hash, client_index); */
- /* } */
-
- return 0;
-}
-
-#define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
-#define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
-#define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
-#define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
-#define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
-#define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
-#define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
-#define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
-#define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
-#define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
-#define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
-#define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
-
-static clib_error_t *
-stats_init (vlib_main_t * vm)
-{
- stats_main_t *sm = &stats_main;
- api_main_t *am = &api_main;
- void *vlib_worker_thread_bootstrap_fn (void *arg);
-
- sm->vlib_main = vm;
- sm->vnet_main = vnet_get_main ();
- sm->interface_main = &vnet_get_main ()->interface_main;
- sm->api_main = am;
- sm->stats_poll_interval_in_seconds = 10;
- sm->data_structure_lock =
- clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
- CLIB_CACHE_LINE_BYTES);
- clib_memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
-
-#define _(N,n) \
- vl_msg_api_set_handlers(VL_API_##N, #n, \
- vl_api_##n##_t_handler, \
- vl_noop_handler, \
- vl_api_##n##_t_endian, \
- vl_api_##n##_t_print, \
- sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
- foreach_stats_msg;
-#undef _
-
- /* tell the msg infra not to free these messages... */
- am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1;
- am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1;
- am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
- am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
- am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
- am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
-
- /*
- * Set up the (msg_name, crc, message-id) table
- */
- setup_message_id_table (am);
-
- vec_validate (sm->stats_registrations, STATS_REG_N_IDX);
- vec_validate (sm->stats_registration_hash, STATS_REG_N_IDX);
-#define stats_reg(n) \
- sm->stats_registrations[IDX_##n] = 0; \
- sm->stats_registration_hash[IDX_##n] = 0;
-#include <vpp/stats/stats.reg>
-#undef stats_reg
-
- return 0;
-}
-
-VLIB_INIT_FUNCTION (stats_init);
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
- .name = "stats",
- .function = stats_thread_fn,
- .fixed_count = 1,
- .count = 1,
- .no_data_structure_clone = 1,
- .use_pthreads = 1,
-};
-/* *INDENT-ON* */
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
diff --git a/src/vpp/stats/stats_to_be_deprecated.h b/src/vpp/stats/stats_to_be_deprecated.h
deleted file mode 100644
index 9259527661b..00000000000
--- a/src/vpp/stats/stats_to_be_deprecated.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef __included_stats_h__
-#define __included_stats_h__
-
-#include <time.h>
-#include <vlib/vlib.h>
-#include <vnet/vnet.h>
-#include <vnet/interface.h>
-#include <pthread.h>
-#include <vlib/threads.h>
-#include <vnet/fib/fib_table.h>
-#include <vnet/mfib/mfib_table.h>
-#include <vlib/unix/unix.h>
-#include <vlibmemory/api.h>
-#include <vlibapi/api_helper_macros.h>
-
-typedef struct
-{
- volatile u32 lock;
- volatile u32 release_hint;
- u32 thread_index;
- u32 count;
- int tag;
-} data_structure_lock_t;
-
-/**
- * @brief stats request registration indexes
- *
- */
-/* from .../vnet/vnet/ip/lookup.c. Yuck */
-/* *INDENT-OFF* */
-typedef CLIB_PACKED (struct
-{
- ip4_address_t address;
- u32 address_length: 6;
- u32 index: 26;
-}) ip4_route_t;
-/* *INDENT-ON* */
-
-typedef struct
-{
- ip6_address_t address;
- u32 address_length;
- u32 index;
-} ip6_route_t;
-
-typedef struct
-{
- ip4_route_t *ip4routes;
- ip6_route_t *ip6routes;
- mfib_prefix_t *mroutes;
- fib_table_t **fibs;
- mfib_table_t **mfibs;
- hash_pair_t **pvec;
- uword *results;
-} do_ip46_fibs_t;
-
-typedef struct
-{
- u16 msg_id;
- u32 size;
- u32 client_index;
- u32 context;
- i32 retval;
-} client_registration_reply_t;
-
-typedef enum
-{
-#define stats_reg(n) IDX_##n,
-#include <vpp/stats/stats.reg>
-#undef stats_reg
- STATS_REG_N_IDX,
-} stats_reg_index_t;
-
-typedef struct
-{
- //Standard client information
- uword *client_hash;
- vpe_client_registration_t *clients;
- u32 item;
-
-} vpe_client_stats_registration_t;
-
-typedef struct
-{
- void *mheap;
- pthread_t thread_self;
- pthread_t thread_handle;
-
- u32 stats_poll_interval_in_seconds;
- u32 enable_poller;
-
- /*
- * stats_registrations is a vector, indexed by
- * IDX_xxxx_COUNTER generated for each streaming
- * stat a client can register for. (see stats.reg)
- *
- * The values in the vector refer to pools.
- *
- * The pool is of type vpe_client_stats_registration_t
- *
- * This typedef consists of:
- *
- * u32 item: This is the instance of the IDX_xxxx_COUNTER a
- * client is interested in.
- * vpe_client_registration_t *clients: The list of clients interested.
- *
- * e.g.
- * stats_registrations[IDX_INTERFACE_SIMPLE_COUNTERS] refers to a pool
- * containing elements:
- *
- * u32 item = sw_if_index1
- * clients = ["clienta","clientb"]
- *
- * When clients == NULL the pool element is freed. When the pool is empty
- *
- * ie
- * 0 == pool_elts(stats_registrations[IDX_INTERFACE_SIMPLE_COUNTERS]
- *
- * then there is no need to process INTERFACE_SIMPLE_COUNTERS
- *
- * Note that u32 item = ~0 is the simple case for ALL interfaces or fibs.
- *
- */
-
- uword **stats_registration_hash;
- vpe_client_stats_registration_t **stats_registrations;
-
- /* control-plane data structure lock */
- data_structure_lock_t *data_structure_lock;
-
- /* bail out of FIB walk if set */
- clib_longjmp_t jmp_buf;
-
- /* Vectors for Distribution funcs: do_ip4_fibs and do_ip6_fibs. */
- do_ip46_fibs_t do_ip46_fibs;
-
- /*
- Working vector vars so as to not thrash memory allocator.
- Has effect of making "static"
- */
- vpe_client_stats_registration_t **regs_tmp;
- vpe_client_registration_t **clients_tmp;
-
- /* convenience */
- vlib_main_t *vlib_main;
- vnet_main_t *vnet_main;
- vnet_interface_main_t *interface_main;
- api_main_t *api_main;
-} stats_main_t;
-
-extern stats_main_t stats_main;
-
-#endif /* __included_stats_h__ */
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */