aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorNeale Ranns <nranns@cisco.com>2017-01-24 01:34:25 -0800
committerDamjan Marion <dmarion.lists@gmail.com>2017-01-25 20:53:26 +0000
commit044183faacc5eb8a055658f9deebefb56b254adc (patch)
tree5d9af96930941b5344b1653e057b7008a75f081e /src
parentf3dc11a9d71a30ac89e7ceb523abdb1ff13f0ac3 (diff)
[re]Enable per-Adjacency/neighbour counters
Change-Id: I953b3888bbc6d8a5f53f684a5edc8742b382f323 Signed-off-by: Neale Ranns <nranns@cisco.com>
Diffstat (limited to 'src')
-rw-r--r--src/scripts/vnet/ip618
-rw-r--r--src/vat/api_format.c124
-rw-r--r--src/vat/vat.h18
-rw-r--r--src/vlib/counter.h14
-rw-r--r--src/vnet/adj/adj.c4
-rw-r--r--src/vnet/adj/adj_l2.c10
-rw-r--r--src/vnet/ip/ip4_forward.c42
-rw-r--r--src/vnet/ip/ip6_forward.c24
-rw-r--r--src/vnet/mpls/mpls_output.c37
-rw-r--r--src/vpp/api/vpe.api41
-rw-r--r--src/vpp/stats/stats.c405
11 files changed, 675 insertions, 62 deletions
diff --git a/src/scripts/vnet/ip6 b/src/scripts/vnet/ip6
index 4f9f3ee5474..adb27225fed 100644
--- a/src/scripts/vnet/ip6
+++ b/src/scripts/vnet/ip6
@@ -6,10 +6,24 @@ packet-generator new {
no-recycle
data {
IP6: 1.2.3 -> 4.5.6
- ICMP: ::1 -> ::2
+ ICMP: 3002::2 -> 3001::2
ICMP echo_request
incrementing 100
}
}
-tr add pg-input 100
+
+loop create
+loop create
+set int state loop0 up
+set int state loop1 up
+
+set int ip address loop0 2001:1::1/64
+set int ip address loop1 2001:2::1/64
+
+set ip6 neighbor loop0 2001:1::2 00:00:DD:EE:AA:DD
+set ip6 neighbor loop1 2001:2::2 00:00:DD:EE:AA:EE
+
+ip route add 3001::/64 via 2001:2::2 loop1
+
+trace add pg-input 100
diff --git a/src/vat/api_format.c b/src/vat/api_format.c
index 653cf79f3db..839bcdaab1d 100644
--- a/src/vat/api_format.c
+++ b/src/vat/api_format.c
@@ -2042,6 +2042,42 @@ static void vl_api_vnet_ip4_fib_counters_t_handler_json
}
}
+static void vl_api_vnet_ip4_nbr_counters_t_handler
+ (vl_api_vnet_ip4_nbr_counters_t * mp)
+{
+ /* not supported */
+}
+
+static void vl_api_vnet_ip4_nbr_counters_t_handler_json
+ (vl_api_vnet_ip4_nbr_counters_t * mp)
+{
+ vat_main_t *vam = &vat_main;
+ vl_api_ip4_nbr_counter_t *v;
+ ip4_nbr_counter_t *counter;
+ u32 sw_if_index;
+ u32 count;
+ int i;
+
+ sw_if_index = ntohl (mp->sw_if_index);
+ count = ntohl (mp->count);
+ vec_validate (vam->ip4_nbr_counters, sw_if_index);
+
+ if (mp->begin)
+ vec_free (vam->ip4_nbr_counters[sw_if_index]);
+
+ v = (vl_api_ip4_nbr_counter_t *) & mp->c;
+ for (i = 0; i < count; i++)
+ {
+ vec_validate (vam->ip4_nbr_counters[sw_if_index], i);
+ counter = &vam->ip4_nbr_counters[sw_if_index][i];
+ counter->address.s_addr = v->address;
+ counter->packets = clib_net_to_host_u64 (v->packets);
+ counter->bytes = clib_net_to_host_u64 (v->bytes);
+ counter->linkt = v->link_type;
+ v++;
+ }
+}
+
static void vl_api_vnet_ip6_fib_counters_t_handler
(vl_api_vnet_ip6_fib_counters_t * mp)
{
@@ -2087,6 +2123,43 @@ static void vl_api_vnet_ip6_fib_counters_t_handler_json
}
}
+static void vl_api_vnet_ip6_nbr_counters_t_handler
+ (vl_api_vnet_ip6_nbr_counters_t * mp)
+{
+ /* not supported */
+}
+
+static void vl_api_vnet_ip6_nbr_counters_t_handler_json
+ (vl_api_vnet_ip6_nbr_counters_t * mp)
+{
+ vat_main_t *vam = &vat_main;
+ vl_api_ip6_nbr_counter_t *v;
+ ip6_nbr_counter_t *counter;
+ struct in6_addr ip6;
+ u32 sw_if_index;
+ u32 count;
+ int i;
+
+ sw_if_index = ntohl (mp->sw_if_index);
+ count = ntohl (mp->count);
+ vec_validate (vam->ip6_nbr_counters, sw_if_index);
+
+ if (mp->begin)
+ vec_free (vam->ip6_nbr_counters[sw_if_index]);
+
+ v = (vl_api_ip6_nbr_counter_t *) & mp->c;
+ for (i = 0; i < count; i++)
+ {
+ vec_validate (vam->ip6_nbr_counters[sw_if_index], i);
+ counter = &vam->ip6_nbr_counters[sw_if_index][i];
+ clib_memcpy (&ip6, &v->address, sizeof (ip6));
+ counter->address = ip6;
+ counter->packets = clib_net_to_host_u64 (v->packets);
+ counter->bytes = clib_net_to_host_u64 (v->bytes);
+ v++;
+ }
+}
+
static void vl_api_get_first_msg_id_reply_t_handler
(vl_api_get_first_msg_id_reply_t * mp)
{
@@ -3490,6 +3563,10 @@ static void vl_api_flow_classify_details_t_handler_json
#define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
#define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
#define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
+#define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
+#define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
+#define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
+#define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
#define vl_api_lisp_adjacencies_get_reply_t_endian vl_noop_handler
#define vl_api_lisp_adjacencies_get_reply_t_print vl_noop_handler
@@ -3799,6 +3876,8 @@ _(DHCP_COMPL_EVENT, dhcp_compl_event) \
_(VNET_INTERFACE_COUNTERS, vnet_interface_counters) \
_(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
_(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
+_(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
+_(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
_(MAP_ADD_DOMAIN_REPLY, map_add_domain_reply) \
_(MAP_DEL_DOMAIN_REPLY, map_del_domain_reply) \
_(MAP_ADD_DEL_RULE_REPLY, map_add_del_rule_reply) \
@@ -4131,6 +4210,8 @@ dump_stats_table (vat_main_t * vam)
u64 packets;
ip4_fib_counter_t *c4;
ip6_fib_counter_t *c6;
+ ip4_nbr_counter_t *n4;
+ ip6_nbr_counter_t *n6;
int i, j;
if (!vam->json_output)
@@ -4226,6 +4307,49 @@ dump_stats_table (vat_main_t * vam)
}
}
+ /* ip4 nbr counters */
+ msg_array = vat_json_object_add (&node, "ip4_nbr_counters");
+ vat_json_init_array (msg_array);
+ for (i = 0; i < vec_len (vam->ip4_nbr_counters); i++)
+ {
+ msg = vat_json_array_add (msg_array);
+ vat_json_init_object (msg);
+ vat_json_object_add_uint (msg, "sw_if_index", i);
+ counter_array = vat_json_object_add (msg, "c");
+ vat_json_init_array (counter_array);
+ for (j = 0; j < vec_len (vam->ip4_nbr_counters[i]); j++)
+ {
+ counter = vat_json_array_add (counter_array);
+ vat_json_init_object (counter);
+ n4 = &vam->ip4_nbr_counters[i][j];
+ vat_json_object_add_ip4 (counter, "address", n4->address);
+ vat_json_object_add_uint (counter, "link-type", n4->linkt);
+ vat_json_object_add_uint (counter, "packets", n4->packets);
+ vat_json_object_add_uint (counter, "bytes", n4->bytes);
+ }
+ }
+
+ /* ip6 nbr counters */
+ msg_array = vat_json_object_add (&node, "ip6_nbr_counters");
+ vat_json_init_array (msg_array);
+ for (i = 0; i < vec_len (vam->ip6_nbr_counters); i++)
+ {
+ msg = vat_json_array_add (msg_array);
+ vat_json_init_object (msg);
+ vat_json_object_add_uint (msg, "sw_if_index", i);
+ counter_array = vat_json_object_add (msg, "c");
+ vat_json_init_array (counter_array);
+ for (j = 0; j < vec_len (vam->ip6_nbr_counters[i]); j++)
+ {
+ counter = vat_json_array_add (counter_array);
+ vat_json_init_object (counter);
+ n6 = &vam->ip6_nbr_counters[i][j];
+ vat_json_object_add_ip6 (counter, "address", n6->address);
+ vat_json_object_add_uint (counter, "packets", n6->packets);
+ vat_json_object_add_uint (counter, "bytes", n6->bytes);
+ }
+ }
+
vat_json_print (vam->ofp, &node);
vat_json_free (&node);
diff --git a/src/vat/vat.h b/src/vat/vat.h
index 3d7d96ae566..831bdf50d9d 100644
--- a/src/vat/vat.h
+++ b/src/vat/vat.h
@@ -97,6 +97,22 @@ typedef struct
typedef struct
{
+ struct in_addr address;
+ vnet_link_t linkt;
+ u64 packets;
+ u64 bytes;
+} ip4_nbr_counter_t;
+
+typedef struct
+{
+ struct in6_addr address;
+ vnet_link_t linkt;
+ u64 packets;
+ u64 bytes;
+} ip6_nbr_counter_t;
+
+typedef struct
+{
/* vpe input queue */
unix_shared_memory_queue_t *vl_input_queue;
@@ -185,6 +201,8 @@ typedef struct
u32 *ip4_fib_counters_vrf_id_by_index;
ip6_fib_counter_t **ip6_fib_counters;
u32 *ip6_fib_counters_vrf_id_by_index;
+ ip4_nbr_counter_t **ip4_nbr_counters;
+ ip6_nbr_counter_t **ip6_nbr_counters;
/* Convenience */
vlib_main_t *vlib_main;
diff --git a/src/vlib/counter.h b/src/vlib/counter.h
index a79032065d9..abfa89eea6d 100644
--- a/src/vlib/counter.h
+++ b/src/vlib/counter.h
@@ -273,6 +273,20 @@ vlib_increment_combined_counter (vlib_combined_counter_main_t * cm,
}
}
+#define vlib_prefetch_combined_counter(_cm, _cpu_index, _index) \
+{ \
+ vlib_mini_counter_t *_cpu_minis; \
+ \
+ /* \
+ * This CPU's mini index is assumed to already be in cache \
+ */ \
+ _cpu_minis = (_cm)->minis[(_cpu_index)]; \
+ CLIB_PREFETCH(_cpu_minis + (_index), \
+ sizeof(*_cpu_minis), \
+ STORE); \
+}
+
+
/** Get the value of a combined counter, never called in the speed path
Scrapes the entire set of mini counters. Innacurate unless
worker threads which might increment the counter are
diff --git a/src/vnet/adj/adj.c b/src/vnet/adj/adj.c
index e740c4cb79b..d0be0f0eaff 100644
--- a/src/vnet/adj/adj.c
+++ b/src/vnet/adj/adj.c
@@ -122,6 +122,10 @@ format_ip_adjacency (u8 * s, va_list * args)
if (fiaf & FORMAT_IP_ADJACENCY_DETAIL)
{
+ vlib_counter_t counts;
+
+ vlib_get_combined_counter(&adjacency_counters, adj_index, &counts);
+ s = format (s, "\n counts:[%Ld:%Ld]", counts.packets, counts.bytes);
s = format (s, "\n locks:%d", adj->ia_node.fn_locks);
s = format (s, " node:[%d]:%U",
adj->rewrite_header.node_index,
diff --git a/src/vnet/adj/adj_l2.c b/src/vnet/adj/adj_l2.c
index 4d2dd7082f1..5a083643302 100644
--- a/src/vnet/adj/adj_l2.c
+++ b/src/vnet/adj/adj_l2.c
@@ -94,11 +94,11 @@ adj_l2_rewrite_inline (vlib_main_t * vm,
rw_len0 = adj0[0].rewrite_header.data_bytes;
vnet_buffer(p0)->ip.save_rewrite_length = rw_len0;
- vlib_increment_combined_counter
- (&adjacency_counters,
- cpu_index, adj_index0,
- /* packet increment */ 0,
- /* byte increment */ rw_len0-sizeof(ethernet_header_t));
+ vlib_increment_combined_counter(&adjacency_counters,
+ cpu_index,
+ adj_index0,
+ /* packet increment */ 0,
+ /* byte increment */ rw_len0);
/* Check MTU of outgoing interface. */
if (PREDICT_TRUE((vlib_buffer_length_in_chain (vm, p0) <=
diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c
index 6e91b9e91e1..87b345bd3f5 100644
--- a/src/vnet/ip/ip4_forward.c
+++ b/src/vnet/ip/ip4_forward.c
@@ -2402,19 +2402,12 @@ ip4_rewrite_inline (vlib_main_t * vm,
error1);
/*
- * We've already accounted for an ethernet_header_t elsewhere
+ * pre-fetch the per-adjacency counters
*/
- if (PREDICT_FALSE (rw_len0 > sizeof (ethernet_header_t)))
- vlib_increment_combined_counter
- (&adjacency_counters, cpu_index, adj_index0,
- /* packet increment */ 0,
- /* byte increment */ rw_len0 - sizeof (ethernet_header_t));
-
- if (PREDICT_FALSE (rw_len1 > sizeof (ethernet_header_t)))
- vlib_increment_combined_counter
- (&adjacency_counters, cpu_index, adj_index1,
- /* packet increment */ 0,
- /* byte increment */ rw_len1 - sizeof (ethernet_header_t));
+ vlib_prefetch_combined_counter (&adjacency_counters,
+ cpu_index, adj_index0);
+ vlib_prefetch_combined_counter (&adjacency_counters,
+ cpu_index, adj_index1);
/* Don't adjust the buffer for ttl issue; icmp-error node wants
* to see the IP headerr */
@@ -2446,6 +2439,19 @@ ip4_rewrite_inline (vlib_main_t * vm,
vnet_rewrite_two_headers (adj0[0], adj1[0],
ip0, ip1, sizeof (ethernet_header_t));
+ /*
+ * Bump the per-adjacency counters
+ */
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ cpu_index,
+ adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0);
+
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ cpu_index,
+ adj_index1, 1, vlib_buffer_length_in_chain (vm, p1) + rw_len1);
+
if (is_midchain)
{
adj0->sub_type.midchain.fixup_func (vm, adj0, p0);
@@ -2519,6 +2525,9 @@ ip4_rewrite_inline (vlib_main_t * vm,
p0->flags &= ~VNET_BUFFER_LOCALLY_ORIGINATED;
}
+ vlib_prefetch_combined_counter (&adjacency_counters,
+ cpu_index, adj_index0);
+
/* Guess we are only writing on simple Ethernet header. */
vnet_rewrite_one_header (adj0[0], ip0, sizeof (ethernet_header_t));
@@ -2526,11 +2535,10 @@ ip4_rewrite_inline (vlib_main_t * vm,
rw_len0 = adj0[0].rewrite_header.data_bytes;
vnet_buffer (p0)->ip.save_rewrite_length = rw_len0;
- if (PREDICT_FALSE (rw_len0 > sizeof (ethernet_header_t)))
- vlib_increment_combined_counter
- (&adjacency_counters, cpu_index, adj_index0,
- /* packet increment */ 0,
- /* byte increment */ rw_len0 - sizeof (ethernet_header_t));
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ cpu_index,
+ adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0);
/* Check MTU of outgoing interface. */
error0 = (vlib_buffer_length_in_chain (vm, p0)
diff --git a/src/vnet/ip/ip6_forward.c b/src/vnet/ip/ip6_forward.c
index 197a9b7925d..232f72833fd 100644
--- a/src/vnet/ip/ip6_forward.c
+++ b/src/vnet/ip/ip6_forward.c
@@ -2108,14 +2108,14 @@ ip6_rewrite_inline (vlib_main_t * vm,
vnet_buffer (p0)->ip.save_rewrite_length = rw_len0;
vnet_buffer (p1)->ip.save_rewrite_length = rw_len1;
- vlib_increment_combined_counter (&adjacency_counters,
- cpu_index, adj_index0,
- /* packet increment */ 0,
- /* byte increment */ rw_len0);
- vlib_increment_combined_counter (&adjacency_counters,
- cpu_index, adj_index1,
- /* packet increment */ 0,
- /* byte increment */ rw_len1);
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ cpu_index,
+ adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0);
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ cpu_index, adj_index1,
+ 1, vlib_buffer_length_in_chain (vm, p1) + rw_len1);
/* Check MTU of outgoing interface. */
error0 =
@@ -2233,10 +2233,10 @@ ip6_rewrite_inline (vlib_main_t * vm,
rw_len0 = adj0[0].rewrite_header.data_bytes;
vnet_buffer (p0)->ip.save_rewrite_length = rw_len0;
- vlib_increment_combined_counter (&adjacency_counters,
- cpu_index, adj_index0,
- /* packet increment */ 0,
- /* byte increment */ rw_len0);
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ cpu_index,
+ adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0);
/* Check MTU of outgoing interface. */
error0 =
diff --git a/src/vnet/mpls/mpls_output.c b/src/vnet/mpls/mpls_output.c
index 8292a0cb3d2..c06cf917b88 100644
--- a/src/vnet/mpls/mpls_output.c
+++ b/src/vnet/mpls/mpls_output.c
@@ -128,18 +128,19 @@ mpls_output_inline (vlib_main_t * vm,
rw_len0 = adj0[0].rewrite_header.data_bytes;
rw_len1 = adj1[0].rewrite_header.data_bytes;
- if (PREDICT_FALSE (rw_len0 > sizeof(ethernet_header_t)))
- vlib_increment_combined_counter
- (&adjacency_counters,
- cpu_index, adj_index0,
- /* packet increment */ 0,
- /* byte increment */ rw_len0-sizeof(ethernet_header_t));
- if (PREDICT_FALSE (rw_len1 > sizeof(ethernet_header_t)))
- vlib_increment_combined_counter
- (&adjacency_counters,
- cpu_index, adj_index1,
- /* packet increment */ 0,
- /* byte increment */ rw_len1-sizeof(ethernet_header_t));
+ /* Bump the adj counters for packet and bytes */
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ cpu_index,
+ adj_index0,
+ 1,
+ vlib_buffer_length_in_chain (vm, p0) + rw_len0);
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ cpu_index,
+ adj_index1,
+ 1,
+ vlib_buffer_length_in_chain (vm, p1) + rw_len1);
/* Check MTU of outgoing interface. */
if (PREDICT_TRUE(vlib_buffer_length_in_chain (vm, p0) <=
@@ -234,12 +235,12 @@ mpls_output_inline (vlib_main_t * vm,
/* Update packet buffer attributes/set output interface. */
rw_len0 = adj0[0].rewrite_header.data_bytes;
- if (PREDICT_FALSE (rw_len0 > sizeof(ethernet_header_t)))
- vlib_increment_combined_counter
- (&adjacency_counters,
- cpu_index, adj_index0,
- /* packet increment */ 0,
- /* byte increment */ rw_len0-sizeof(ethernet_header_t));
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ cpu_index,
+ adj_index0,
+ 1,
+ vlib_buffer_length_in_chain (vm, p0) + rw_len0);
/* Check MTU of outgoing interface. */
if (PREDICT_TRUE(vlib_buffer_length_in_chain (vm, p0) <=
diff --git a/src/vpp/api/vpe.api b/src/vpp/api/vpe.api
index f32ba670cfd..a00033c58b9 100644
--- a/src/vpp/api/vpe.api
+++ b/src/vpp/api/vpe.api
@@ -230,6 +230,31 @@ manual_print manual_endian define vnet_ip4_fib_counters
vl_api_ip4_fib_counter_t c[count];
};
+typeonly manual_print manual_endian define ip4_nbr_counter
+{
+ u32 address;
+ u8 link_type;
+ u64 packets;
+ u64 bytes;
+};
+
+/**
+ * @brief Per-neighbour (i.e. per-adjacency) coutners
+ * @param count The size of the array of counters
+ * @param sw_if_index The interface the adjacency is on
+ * @param begin Flag to indicate this is the first set of stats for this
+ * interface. If this flag is not set the it is a continuation of
+ * stats for this interface
+ * @param c counters
+ */
+manual_print manual_endian define vnet_ip4_nbr_counters
+{
+ u32 count;
+ u32 sw_if_index;
+ u8 begin;
+ vl_api_ip4_nbr_counter_t c[count];
+};
+
typeonly manual_print manual_endian define ip6_fib_counter
{
u64 address[2];
@@ -245,6 +270,22 @@ manual_print manual_endian define vnet_ip6_fib_counters
vl_api_ip6_fib_counter_t c[count];
};
+typeonly manual_print manual_endian define ip6_nbr_counter
+{
+ u64 address[2];
+ u8 link_type;
+ u64 packets;
+ u64 bytes;
+};
+
+manual_print manual_endian define vnet_ip6_nbr_counters
+{
+ u32 count;
+ u32 sw_if_index;
+ u8 begin;
+ vl_api_ip6_nbr_counter_t c[count];
+};
+
/** \brief Request for a single block of summary stats
@param client_index - opaque cookie to identify the sender
@param context - sender context, to match reply w/ request
diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c
index 391e02f61fc..5e9b0d6911e 100644
--- a/src/vpp/stats/stats.c
+++ b/src/vpp/stats/stats.c
@@ -49,7 +49,9 @@ _(WANT_STATS, want_stats) \
_(WANT_STATS_REPLY, want_stats_reply) \
_(VNET_INTERFACE_COUNTERS, vnet_interface_counters) \
_(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
-_(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters)
+_(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
+_(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
+_(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters)
/* These constants ensure msg sizes <= 1024, aka ring allocation */
#define SIMPLE_COUNTER_BATCH_SIZE 126
@@ -258,6 +260,313 @@ ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
}
}
+/**
+ * @brief The context passed when collecting adjacency counters
+ */
+typedef struct ip4_nbr_stats_ctx_t_
+{
+ /**
+ * The SW IF index all these adjs belong to
+ */
+ u32 sw_if_index;
+
+ /**
+ * A vector of ip4 nbr counters
+ */
+ vl_api_ip4_nbr_counter_t *counters;
+} ip4_nbr_stats_ctx_t;
+
+static adj_walk_rc_t
+ip4_nbr_stats_cb (adj_index_t ai, void *arg)
+{
+ vl_api_ip4_nbr_counter_t *vl_counter;
+ vlib_counter_t adj_counter;
+ ip4_nbr_stats_ctx_t *ctx;
+ ip_adjacency_t *adj;
+
+ ctx = arg;
+ vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
+
+ if (0 != adj_counter.packets)
+ {
+ vec_add2 (ctx->counters, vl_counter, 1);
+ adj = adj_get (ai);
+
+ vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
+ vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
+ vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
+ vl_counter->link_type = adj->ia_link;
+ }
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+#define MIN(x,y) (((x)<(y))?(x):(y))
+
+static void
+ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
+{
+ api_main_t *am = sm->api_main;
+ vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
+ unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
+ vl_api_vnet_ip4_nbr_counters_t *mp = 0;
+ int first = 0;
+
+ /*
+ * If the walk context has counters, which may be left over from the last
+ * suspend, then we continue from there.
+ */
+ while (0 != vec_len (ctx->counters))
+ {
+ u32 n_items = MIN (vec_len (ctx->counters),
+ IP4_FIB_COUNTER_BATCH_SIZE);
+ u8 pause = 0;
+
+ dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+
+ mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
+ (n_items *
+ sizeof
+ (vl_api_ip4_nbr_counter_t)));
+ mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
+ mp->count = ntohl (n_items);
+ mp->sw_if_index = ntohl (ctx->sw_if_index);
+ mp->begin = first;
+ first = 0;
+
+ /*
+ * copy the counters from the back of the context, then we can easily
+ * 'erase' them by resetting the vector length.
+ * The order we push the stats to the caller is not important.
+ */
+ clib_memcpy (mp->c,
+ &ctx->counters[vec_len (ctx->counters) - n_items],
+ n_items * sizeof (*ctx->counters));
+
+ _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
+
+ /*
+ * send to the shm q
+ */
+ unix_shared_memory_queue_lock (q);
+ pause = unix_shared_memory_queue_is_full (q);
+
+ vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
+ unix_shared_memory_queue_unlock (q);
+ dsunlock (sm);
+
+ if (pause)
+ ip46_fib_stats_delay (sm, 0 /* sec */ ,
+ STATS_RELEASE_DELAY_NS);
+ }
+}
+
+static void
+do_ip4_nbrs (stats_main_t * sm)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_sw_interface_t *si;
+
+ ip4_nbr_stats_ctx_t ctx = {
+ .sw_if_index = 0,
+ .counters = NULL,
+ };
+
+ /* *INDENT-OFF* */
+ pool_foreach (si, im->sw_interfaces,
+ ({
+ /*
+ * update the interface we are now concerned with
+ */
+ ctx.sw_if_index = si->sw_if_index;
+
+ /*
+ * we are about to walk another interface, so we shouldn't have any pending
+ * stats to export.
+ */
+ ASSERT(ctx.counters == NULL);
+
+ /*
+ * visit each neighbour adjacency on the interface and collect
+ * its current stats.
+ * Because we hold the lock the walk is synchronous, so safe to routing
+ * updates. It's limited in work by the number of adjacenies on an
+ * interface, which is typically not huge.
+ */
+ dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+ adj_nbr_walk (si->sw_if_index,
+ FIB_PROTOCOL_IP4,
+ ip4_nbr_stats_cb,
+ &ctx);
+ dsunlock (sm);
+
+ /*
+ * if this interface has some adjacencies with counters then ship them,
+ * else continue to the next interface.
+ */
+ if (NULL != ctx.counters)
+ {
+ ip4_nbr_ship(sm, &ctx);
+ }
+ }));
+ /* *INDENT-OFF* */
+}
+
+/**
+ * @brief The context passed when collecting adjacency counters
+ */
+typedef struct ip6_nbr_stats_ctx_t_
+{
+ /**
+ * The SW IF index all these adjs belong to
+ */
+ u32 sw_if_index;
+
+ /**
+ * A vector of ip6 nbr counters
+ */
+ vl_api_ip6_nbr_counter_t *counters;
+} ip6_nbr_stats_ctx_t;
+
+static adj_walk_rc_t
+ip6_nbr_stats_cb (adj_index_t ai,
+ void *arg)
+{
+ vl_api_ip6_nbr_counter_t *vl_counter;
+ vlib_counter_t adj_counter;
+ ip6_nbr_stats_ctx_t *ctx;
+ ip_adjacency_t *adj;
+
+ ctx = arg;
+ vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
+
+ if (0 != adj_counter.packets)
+ {
+ vec_add2(ctx->counters, vl_counter, 1);
+ adj = adj_get(ai);
+
+ vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
+ vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
+ vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
+ vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
+ vl_counter->link_type = adj->ia_link;
+ }
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+#define MIN(x,y) (((x)<(y))?(x):(y))
+
+static void
+ip6_nbr_ship (stats_main_t * sm,
+ ip6_nbr_stats_ctx_t *ctx)
+{
+ api_main_t *am = sm->api_main;
+ vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
+ unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
+ vl_api_vnet_ip6_nbr_counters_t *mp = 0;
+ int first = 0;
+
+ /*
+ * If the walk context has counters, which may be left over from the last
+ * suspend, then we continue from there.
+ */
+ while (0 != vec_len(ctx->counters))
+ {
+ u32 n_items = MIN (vec_len (ctx->counters),
+ IP6_FIB_COUNTER_BATCH_SIZE);
+ u8 pause = 0;
+
+ dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+
+ mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
+ (n_items *
+ sizeof
+ (vl_api_ip6_nbr_counter_t)));
+ mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
+ mp->count = ntohl (n_items);
+ mp->sw_if_index = ntohl (ctx->sw_if_index);
+ mp->begin = first;
+ first = 0;
+
+ /*
+ * copy the counters from the back of the context, then we can easily
+ * 'erase' them by resetting the vector length.
+ * The order we push the stats to the caller is not important.
+ */
+ clib_memcpy (mp->c,
+ &ctx->counters[vec_len (ctx->counters) - n_items],
+ n_items * sizeof (*ctx->counters));
+
+ _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
+
+ /*
+ * send to the shm q
+ */
+ unix_shared_memory_queue_lock (q);
+ pause = unix_shared_memory_queue_is_full (q);
+
+ vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
+ unix_shared_memory_queue_unlock (q);
+ dsunlock (sm);
+
+ if (pause)
+ ip46_fib_stats_delay (sm, 0 /* sec */ ,
+ STATS_RELEASE_DELAY_NS);
+ }
+}
+
+static void
+do_ip6_nbrs (stats_main_t * sm)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_sw_interface_t *si;
+
+ ip6_nbr_stats_ctx_t ctx = {
+ .sw_if_index = 0,
+ .counters = NULL,
+ };
+
+ /* *INDENT-OFF* */
+ pool_foreach (si, im->sw_interfaces,
+ ({
+ /*
+ * update the interface we are now concerned with
+ */
+ ctx.sw_if_index = si->sw_if_index;
+
+ /*
+ * we are about to walk another interface, so we shouldn't have any pending
+ * stats to export.
+ */
+ ASSERT(ctx.counters == NULL);
+
+ /*
+ * visit each neighbour adjacency on the interface and collect
+ * its current stats.
+ * Because we hold the lock the walk is synchronous, so safe to routing
+ * updates. It's limited in work by the number of adjacenies on an
+ * interface, which is typically not huge.
+ */
+ dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+ adj_nbr_walk (si->sw_if_index,
+ FIB_PROTOCOL_IP6,
+ ip6_nbr_stats_cb,
+ &ctx);
+ dsunlock (sm);
+
+ /*
+ * if this interface has some adjacencies with counters then ship them,
+ * else continue to the next interface.
+ */
+ if (NULL != ctx.counters)
+ {
+ ip6_nbr_ship(sm, &ctx);
+ }
+ }));
+ /* *INDENT-OFF* */
+}
+
static void
do_ip4_fibs (stats_main_t * sm)
{
@@ -318,13 +627,7 @@ again:
hash_foreach_pair (p, hash,
({
x.address.data_u32 = p->key;
- if (lm->fib_result_n_words > 1)
- {
- x.index = vec_len (results);
- vec_add (results, p->value, lm->fib_result_n_words);
- }
- else
- x.index = p->value[0];
+ x.index = p->value[0];
vec_add1 (routes, x);
if (sm->data_structure_lock->release_hint)
@@ -631,6 +934,8 @@ stats_thread_fn (void *arg)
do_combined_interface_counters (sm);
do_ip4_fibs (sm);
do_ip6_fibs (sm);
+ do_ip4_nbrs (sm);
+ do_ip6_nbrs (sm);
}
}
@@ -805,6 +1110,45 @@ vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
}
static void
+vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
+{
+ vpe_client_registration_t *reg;
+ stats_main_t *sm = &stats_main;
+ unix_shared_memory_queue_t *q, *q_prev = NULL;
+ vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
+ u32 mp_size;
+
+ mp_size = sizeof (*mp_copy) +
+ ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
+
+ /* *INDENT-OFF* */
+ pool_foreach(reg, sm->stats_registrations,
+ ({
+ q = vl_api_client_index_to_input_queue (reg->client_index);
+ if (q)
+ {
+ if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ {
+ mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
+ clib_memcpy(mp_copy, mp, mp_size);
+ vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
+ mp = mp_copy;
+ }
+ q_prev = q;
+ }
+ }));
+ /* *INDENT-ON* */
+ if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ {
+ vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ }
+ else
+ {
+ vl_msg_api_free (mp);
+ }
+}
+
+static void
vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
{
vpe_client_registration_t *reg;
@@ -844,6 +1188,45 @@ vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
}
static void
+vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
+{
+ vpe_client_registration_t *reg;
+ stats_main_t *sm = &stats_main;
+ unix_shared_memory_queue_t *q, *q_prev = NULL;
+ vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
+ u32 mp_size;
+
+ mp_size = sizeof (*mp_copy) +
+ ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
+
+ /* *INDENT-OFF* */
+ pool_foreach(reg, sm->stats_registrations,
+ ({
+ q = vl_api_client_index_to_input_queue (reg->client_index);
+ if (q)
+ {
+ if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ {
+ mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
+ clib_memcpy(mp_copy, mp, mp_size);
+ vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
+ mp = mp_copy;
+ }
+ q_prev = q;
+ }
+ }));
+ /* *INDENT-ON* */
+ if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ {
+ vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ }
+ else
+ {
+ vl_msg_api_free (mp);
+ }
+}
+
+static void
vl_api_want_stats_reply_t_handler (vl_api_want_stats_reply_t * mp)
{
clib_warning ("BUG");
@@ -929,6 +1312,10 @@ stats_memclnt_delete_callback (u32 client_index)
#define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
#define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
#define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
+#define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
+#define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
+#define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
+#define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
static clib_error_t *
stats_init (vlib_main_t * vm)
@@ -961,6 +1348,8 @@ stats_init (vlib_main_t * vm)
am->message_bounce[VL_API_VNET_INTERFACE_COUNTERS] = 1;
am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
+ am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
+ am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
return 0;
}