summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeale Ranns <nranns@cisco.com>2018-09-07 09:32:36 -0700
committerDamjan Marion <dmarion@me.com>2018-09-20 10:05:11 +0000
commit008dbe109ce2714be69ffb6549a0c0198a07f7d0 (patch)
tree6a4a4d7fb7c884036a6c4a1cda6e866d7087eb4c
parentee2e58f6ea802437ed52cc3e4d59b89d92757135 (diff)
Route counters in the stats segment
route ADD API changed to return the stats segment index to use to read the counters Change-Id: I2ef41e01eaa2f9cfaa49d9c88968897793825925 Signed-off-by: Neale Ranns <nranns@cisco.com>
-rw-r--r--src/vat/api_format.c8
-rw-r--r--src/vnet/dpo/load_balance.c11
-rw-r--r--src/vnet/fib/fib_entry.c10
-rw-r--r--src/vnet/fib/fib_entry.h2
-rw-r--r--src/vnet/fib/fib_table.c8
-rw-r--r--src/vnet/fib/fib_table.h11
-rw-r--r--src/vnet/ip/ip.api14
-rw-r--r--src/vnet/ip/ip_api.c118
-rw-r--r--src/vnet/mpls/mpls.api9
-rw-r--r--src/vnet/mpls/mpls_api.c68
-rw-r--r--test/test_mpls.py21
-rw-r--r--test/vpp_ip_route.py24
12 files changed, 216 insertions, 88 deletions
diff --git a/src/vat/api_format.c b/src/vat/api_format.c
index 5e1114fdfbd..61b34973d6f 100644
--- a/src/vat/api_format.c
+++ b/src/vat/api_format.c
@@ -20440,9 +20440,9 @@ vl_api_ip_fib_details_t_handler (vl_api_ip_fib_details_t * mp)
int i;
print (vam->ofp,
- "table-id %d, prefix %U/%d",
+ "table-id %d, prefix %U/%d stats-index %d",
ntohl (mp->table_id), format_ip4_address, mp->address,
- mp->address_length);
+ mp->address_length, ntohl (mp->stats_index));
fp = mp->path;
for (i = 0; i < count; i++)
{
@@ -20652,9 +20652,9 @@ vl_api_ip6_fib_details_t_handler (vl_api_ip6_fib_details_t * mp)
int i;
print (vam->ofp,
- "table-id %d, prefix %U/%d",
+ "table-id %d, prefix %U/%d stats-index %d",
ntohl (mp->table_id), format_ip6_address, mp->address,
- mp->address_length);
+ mp->address_length, ntohl (mp->stats_index));
fp = mp->path;
for (i = 0; i < count; i++)
{
diff --git a/src/vnet/dpo/load_balance.c b/src/vnet/dpo/load_balance.c
index ae95b6e1b3e..37f8ca1d89d 100644
--- a/src/vnet/dpo/load_balance.c
+++ b/src/vnet/dpo/load_balance.c
@@ -53,7 +53,16 @@ load_balance_t *load_balance_pool;
/**
* The one instance of load-balance main
*/
-load_balance_main_t load_balance_main;
+load_balance_main_t load_balance_main = {
+ .lbm_to_counters = {
+ .name = "route-to",
+ .stat_segment_name = "/net/route/to",
+ },
+ .lbm_via_counters = {
+ .name = "route-via",
+ .stat_segment_name = "/net/route/via",
+ }
+};
f64
load_balance_get_multipath_tolerance (void)
diff --git a/src/vnet/fib/fib_entry.c b/src/vnet/fib/fib_entry.c
index 655526586cb..8d7ce00b9c0 100644
--- a/src/vnet/fib/fib_entry.c
+++ b/src/vnet/fib/fib_entry.c
@@ -1495,6 +1495,16 @@ fib_entry_set_flow_hash_config (fib_node_index_t fib_entry_index,
}
}
+u32
+fib_entry_get_stats_index (fib_node_index_t fib_entry_index)
+{
+ fib_entry_t *fib_entry;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ return (fib_entry->fe_lb.dpoi_index);
+}
+
static int
fib_ip4_address_compare (const ip4_address_t * a1,
const ip4_address_t * a2)
diff --git a/src/vnet/fib/fib_entry.h b/src/vnet/fib/fib_entry.h
index 61b81493756..9175a571032 100644
--- a/src/vnet/fib/fib_entry.h
+++ b/src/vnet/fib/fib_entry.h
@@ -623,6 +623,8 @@ extern void fib_entry_set_flow_hash_config(fib_node_index_t fib_entry_index,
extern void fib_entry_module_init(void);
+extern u32 fib_entry_get_stats_index(fib_node_index_t fib_entry_index);
+
/*
* unsafe... beware the raw pointer.
*/
diff --git a/src/vnet/fib/fib_table.c b/src/vnet/fib/fib_table.c
index f63edaa76f0..d8e8d63b9ae 100644
--- a/src/vnet/fib/fib_table.c
+++ b/src/vnet/fib/fib_table.c
@@ -885,6 +885,14 @@ fib_table_entry_delete_index (fib_node_index_t fib_entry_index,
fib_entry_index, prefix, source);
}
+u32
+fib_table_entry_get_stats_index(u32 fib_index,
+ const fib_prefix_t *prefix)
+{
+ return (fib_entry_get_stats_index(
+ fib_table_lookup_exact_match(fib_index, prefix)));
+}
+
fib_node_index_t
fib_table_entry_local_label_add (u32 fib_index,
const fib_prefix_t *prefix,
diff --git a/src/vnet/fib/fib_table.h b/src/vnet/fib/fib_table.h
index 8b86f8d6dd9..f13dd77c8b4 100644
--- a/src/vnet/fib/fib_table.h
+++ b/src/vnet/fib/fib_table.h
@@ -596,6 +596,17 @@ extern void fib_table_entry_delete_index(fib_node_index_t entry_index,
/**
* @brief
+ * Return the stats index for a FIB entry
+ * @param fib_index
+ * The table's FIB index
+ * @param prefix
+ * The entry's prefix's
+ */
+extern u32 fib_table_entry_get_stats_index(u32 fib_index,
+ const fib_prefix_t *prefix);
+
+/**
+ * @brief
* Flush all entries from a table for the source
*
* @param fib_index
diff --git a/src/vnet/ip/ip.api b/src/vnet/ip/ip.api
index 616d621577d..7c7f656d2e6 100644
--- a/src/vnet/ip/ip.api
+++ b/src/vnet/ip/ip.api
@@ -1,5 +1,6 @@
+/* Hey Emacs use -*- mode: C -*- */
/*
- * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Copyright (c) 2018 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -70,6 +71,7 @@ manual_endian manual_print define ip_fib_details
u8 address_length;
u8 address[4];
u32 count;
+ u32 stats_index;
vl_api_fib_path_t path[count];
};
@@ -97,6 +99,7 @@ manual_endian manual_print define ip6_fib_details
u8 address_length;
u8 address[16];
u32 count;
+ u32 stats_index;
vl_api_fib_path_t path[count];
};
@@ -389,7 +392,7 @@ autoreply define sw_interface_ip6_set_link_local_address
@param next_hop_out_label_stack - the next-hop output label stack, outer most first
@param next_hop_via_label - The next-hop is a resolved via a local label
*/
-autoreply define ip_add_del_route
+define ip_add_del_route
{
u32 client_index;
u32 context;
@@ -422,6 +425,13 @@ autoreply define ip_add_del_route
vl_api_fib_mpls_label_t next_hop_out_label_stack[next_hop_n_out_labels];
};
+define ip_add_del_route_reply
+{
+ u32 context;
+ i32 retval;
+ u32 stats_index;
+};
+
/** \brief Add / del route request
@param client_index - opaque cookie to identify the sender
@param context - sender context, to match reply w/ request
diff --git a/src/vnet/ip/ip_api.c b/src/vnet/ip/ip_api.c
index 431a777442f..bed5889b24a 100644
--- a/src/vnet/ip/ip_api.c
+++ b/src/vnet/ip/ip_api.c
@@ -212,6 +212,8 @@ send_ip_fib_details (vpe_api_main_t * am,
clib_min (vec_len (table->ft_desc), sizeof (mp->table_name)));
mp->address_length = pfx->fp_len;
memcpy (mp->address, &pfx->fp_addr.ip4, sizeof (pfx->fp_addr.ip4));
+ mp->stats_index =
+ htonl (fib_table_entry_get_stats_index (table->ft_index, pfx));
mp->count = htonl (path_count);
fp = mp->path;
@@ -309,6 +311,8 @@ send_ip6_fib_details (vpe_api_main_t * am,
memcpy (mp->address, &pfx->fp_addr.ip6, sizeof (pfx->fp_addr.ip6));
memcpy (mp->table_name, table->ft_desc,
clib_min (vec_len (table->ft_desc), sizeof (mp->table_name)));
+ mp->stats_index =
+ htonl (fib_table_entry_get_stats_index (table->ft_index, pfx));
mp->count = htonl (path_count);
fp = mp->path;
@@ -962,7 +966,8 @@ add_del_route_check (fib_protocol_t table_proto,
}
static int
-ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
+ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp,
+ u32 * stats_index)
{
u32 fib_index, next_hop_fib_index;
fib_mpls_label_t *label_stack = NULL;
@@ -1006,32 +1011,37 @@ ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
}
}
- return (add_del_route_t_handler (mp->is_multipath,
- mp->is_add,
- mp->is_drop,
- mp->is_unreach,
- mp->is_prohibit,
- mp->is_local, 0,
- mp->is_classify,
- mp->classify_table_index,
- mp->is_resolve_host,
- mp->is_resolve_attached, 0, 0,
- mp->is_dvr,
- mp->is_source_lookup,
- mp->is_udp_encap,
- fib_index, &pfx, DPO_PROTO_IP4,
- &nh,
- ntohl (mp->next_hop_id),
- ntohl (mp->next_hop_sw_if_index),
- next_hop_fib_index,
- mp->next_hop_weight,
- mp->next_hop_preference,
- ntohl (mp->next_hop_via_label),
- label_stack));
+ rv = add_del_route_t_handler (mp->is_multipath,
+ mp->is_add,
+ mp->is_drop,
+ mp->is_unreach,
+ mp->is_prohibit,
+ mp->is_local, 0,
+ mp->is_classify,
+ mp->classify_table_index,
+ mp->is_resolve_host,
+ mp->is_resolve_attached, 0, 0,
+ mp->is_dvr,
+ mp->is_source_lookup,
+ mp->is_udp_encap,
+ fib_index, &pfx, DPO_PROTO_IP4,
+ &nh,
+ ntohl (mp->next_hop_id),
+ ntohl (mp->next_hop_sw_if_index),
+ next_hop_fib_index,
+ mp->next_hop_weight,
+ mp->next_hop_preference,
+ ntohl (mp->next_hop_via_label), label_stack);
+
+ if (mp->is_add && 0 == rv)
+ *stats_index = fib_table_entry_get_stats_index (fib_index, &pfx);
+
+ return (rv);
}
static int
-ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
+ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp,
+ u32 * stats_index)
{
fib_mpls_label_t *label_stack = NULL;
u32 fib_index, next_hop_fib_index;
@@ -1075,46 +1085,57 @@ ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
}
}
- return (add_del_route_t_handler (mp->is_multipath,
- mp->is_add,
- mp->is_drop,
- mp->is_unreach,
- mp->is_prohibit,
- mp->is_local, 0,
- mp->is_classify,
- mp->classify_table_index,
- mp->is_resolve_host,
- mp->is_resolve_attached, 0, 0,
- mp->is_dvr,
- mp->is_source_lookup,
- mp->is_udp_encap,
- fib_index, &pfx, DPO_PROTO_IP6,
- &nh, ntohl (mp->next_hop_id),
- ntohl (mp->next_hop_sw_if_index),
- next_hop_fib_index,
- mp->next_hop_weight,
- mp->next_hop_preference,
- ntohl (mp->next_hop_via_label),
- label_stack));
+ rv = add_del_route_t_handler (mp->is_multipath,
+ mp->is_add,
+ mp->is_drop,
+ mp->is_unreach,
+ mp->is_prohibit,
+ mp->is_local, 0,
+ mp->is_classify,
+ mp->classify_table_index,
+ mp->is_resolve_host,
+ mp->is_resolve_attached, 0, 0,
+ mp->is_dvr,
+ mp->is_source_lookup,
+ mp->is_udp_encap,
+ fib_index, &pfx, DPO_PROTO_IP6,
+ &nh, ntohl (mp->next_hop_id),
+ ntohl (mp->next_hop_sw_if_index),
+ next_hop_fib_index,
+ mp->next_hop_weight,
+ mp->next_hop_preference,
+ ntohl (mp->next_hop_via_label), label_stack);
+
+ if (mp->is_add && 0 == rv)
+ *stats_index = fib_table_entry_get_stats_index (fib_index, &pfx);
+
+ return (rv);
}
void
vl_api_ip_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
{
vl_api_ip_add_del_route_reply_t *rmp;
+ u32 stats_index;
int rv;
vnet_main_t *vnm = vnet_get_main ();
vnm->api_errno = 0;
+ stats_index = ~0;
if (mp->is_ipv6)
- rv = ip6_add_del_route_t_handler (mp);
+ rv = ip6_add_del_route_t_handler (mp, &stats_index);
else
- rv = ip4_add_del_route_t_handler (mp);
+ rv = ip4_add_del_route_t_handler (mp, &stats_index);
rv = (rv == 0) ? vnm->api_errno : rv;
- REPLY_MACRO (VL_API_IP_ADD_DEL_ROUTE_REPLY);
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_IP_ADD_DEL_ROUTE_REPLY,
+ ({
+ rmp->stats_index = htonl (stats_index);
+ }))
+ /* *INDENT-ON* */
}
void
@@ -1401,6 +1422,7 @@ vl_api_ip_address_dump_t_handler (vl_api_ip_address_dump_t * mp)
}));
/* *INDENT-ON* */
}
+
BAD_SW_IF_INDEX_LABEL;
}
diff --git a/src/vnet/mpls/mpls.api b/src/vnet/mpls/mpls.api
index 7ab0f3750cc..7fa24f4812d 100644
--- a/src/vnet/mpls/mpls.api
+++ b/src/vnet/mpls/mpls.api
@@ -142,7 +142,7 @@ autoreply define mpls_table_add_del
@param mr_next_hop_out_label_stack - the next-hop output label stack, outer most first
@param next_hop_via_label - The next-hop is a resolved via a local label
*/
-autoreply define mpls_route_add_del
+define mpls_route_add_del
{
u32 client_index;
u32 context;
@@ -169,6 +169,13 @@ autoreply define mpls_route_add_del
vl_api_fib_mpls_label_t mr_next_hop_out_label_stack[mr_next_hop_n_out_labels];
};
+define mpls_route_add_del_reply
+{
+ u32 context;
+ i32 retval;
+ u32 stats_index;
+};
+
/** \brief Dump MPLS fib table
@param client_index - opaque cookie to identify the sender
*/
diff --git a/src/vnet/mpls/mpls_api.c b/src/vnet/mpls/mpls_api.c
index 8fec8e82e1e..dbd1d8b6e31 100644
--- a/src/vnet/mpls/mpls_api.c
+++ b/src/vnet/mpls/mpls_api.c
@@ -168,7 +168,8 @@ vl_api_mpls_ip_bind_unbind_t_handler (vl_api_mpls_ip_bind_unbind_t * mp)
static int
mpls_route_add_del_t_handler (vnet_main_t * vnm,
- vl_api_mpls_route_add_del_t * mp)
+ vl_api_mpls_route_add_del_t * mp,
+ u32 * stats_index)
{
fib_mpls_label_t *label_stack = NULL;
u32 fib_index, next_hop_fib_index;
@@ -227,31 +228,36 @@ mpls_route_add_del_t_handler (vnet_main_t * vnm,
}
/* *INDENT-OFF* */
- return (add_del_route_t_handler (mp->mr_is_multipath, mp->mr_is_add,
- 0, // mp->is_drop,
- 0, // mp->is_unreach,
- 0, // mp->is_prohibit,
- 0, // mp->is_local,
- mp->mr_is_multicast,
- mp->mr_is_classify,
- mp->mr_classify_table_index,
- mp->mr_is_resolve_host,
- mp->mr_is_resolve_attached,
- mp->mr_is_interface_rx,
- mp->mr_is_rpf_id,
- 0, // l2_bridged
- 0, // is source_lookup
- 0, // is_udp_encap
+ rv = add_del_route_t_handler (mp->mr_is_multipath, mp->mr_is_add,
+ 0, // mp->is_drop,
+ 0, // mp->is_unreach,
+ 0, // mp->is_prohibit,
+ 0, // mp->is_local,
+ mp->mr_is_multicast,
+ mp->mr_is_classify,
+ mp->mr_classify_table_index,
+ mp->mr_is_resolve_host,
+ mp->mr_is_resolve_attached,
+ mp->mr_is_interface_rx,
+ mp->mr_is_rpf_id,
+ 0, // l2_bridged
+ 0, // is source_lookup
+ 0, // is_udp_encap
fib_index, &pfx,
- mp->mr_next_hop_proto,
- &nh, ~0, // next_hop_id
- ntohl (mp->mr_next_hop_sw_if_index),
- next_hop_fib_index,
- mp->mr_next_hop_weight,
- mp->mr_next_hop_preference,
- ntohl (mp->mr_next_hop_via_label),
- label_stack));
+ mp->mr_next_hop_proto,
+ &nh, ~0, // next_hop_id
+ ntohl (mp->mr_next_hop_sw_if_index),
+ next_hop_fib_index,
+ mp->mr_next_hop_weight,
+ mp->mr_next_hop_preference,
+ ntohl (mp->mr_next_hop_via_label),
+ label_stack);
/* *INDENT-ON* */
+
+ if (mp->mr_is_add && 0 == rv)
+ *stats_index = fib_table_entry_get_stats_index (fib_index, &pfx);
+
+ return (rv);
}
void
@@ -259,16 +265,20 @@ vl_api_mpls_route_add_del_t_handler (vl_api_mpls_route_add_del_t * mp)
{
vl_api_mpls_route_add_del_reply_t *rmp;
vnet_main_t *vnm;
+ u32 stats_index;
int rv;
vnm = vnet_get_main ();
- vnm->api_errno = 0;
+ stats_index = ~0;
- rv = mpls_route_add_del_t_handler (vnm, mp);
-
- rv = (rv == 0) ? vnm->api_errno : rv;
+ rv = mpls_route_add_del_t_handler (vnm, mp, &stats_index);
- REPLY_MACRO (VL_API_MPLS_ROUTE_ADD_DEL_REPLY);
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_MPLS_ROUTE_ADD_DEL_REPLY,
+ ({
+ rmp->stats_index = htonl (stats_index);
+ }));
+ /* *INDENT-ON* */
}
void
diff --git a/test/test_mpls.py b/test/test_mpls.py
index d943f8281e9..1a4dad18e30 100644
--- a/test/test_mpls.py
+++ b/test/test_mpls.py
@@ -389,6 +389,8 @@ class TestMPLS(VppTestCase):
self.verify_capture_labelled(self.pg0, rx, tx,
[VppMplsLabel(33, ttl=31, exp=1)])
+ self.assertEqual(route_32_eos.get_stats_to()['packets'], 257)
+
#
# A simple MPLS xconnect - non-eos label in label out
#
@@ -409,6 +411,7 @@ class TestMPLS(VppTestCase):
self.verify_capture_labelled(self.pg0, rx, tx,
[VppMplsLabel(33, ttl=20, exp=7),
VppMplsLabel(99)])
+ self.assertEqual(route_32_neos.get_stats_to()['packets'], 257)
#
# A simple MPLS xconnect - non-eos label in label out, uniform mode
@@ -575,6 +578,9 @@ class TestMPLS(VppTestCase):
VppMplsLabel(44),
VppMplsLabel(45, ttl=2)])
+ self.assertEqual(route_34_eos.get_stats_to()['packets'], 257)
+ self.assertEqual(route_32_neos.get_stats_via()['packets'], 257)
+
#
# A recursive EOS x-connect, which resolves through another x-connect
# in uniform mode
@@ -635,6 +641,7 @@ class TestMPLS(VppTestCase):
VppMplsLabel(44),
VppMplsLabel(46),
VppMplsLabel(55)])
+ self.assertEqual(ip_10_0_0_1.get_stats_to()['packets'], 257)
ip_10_0_0_1.remove_vpp_config()
route_34_neos.remove_vpp_config()
@@ -782,6 +789,8 @@ class TestMPLS(VppTestCase):
[VppMplsLabel(32),
VppMplsLabel(44)])
+ self.assertEqual(route_11_0_0_1.get_stats_to()['packets'], 257)
+
#
# add a recursive path, with 2 labels, via the 3 label route
#
@@ -805,6 +814,18 @@ class TestMPLS(VppTestCase):
VppMplsLabel(44),
VppMplsLabel(45)])
+ self.assertEqual(route_11_0_0_2.get_stats_to()['packets'], 257)
+
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx,
+ [VppMplsLabel(32),
+ VppMplsLabel(33),
+ VppMplsLabel(34),
+ VppMplsLabel(44),
+ VppMplsLabel(45)])
+
+ self.assertEqual(route_11_0_0_2.get_stats_to()['packets'], 514)
+
#
# cleanup
#
diff --git a/test/vpp_ip_route.py b/test/vpp_ip_route.py
index 18c27ffa942..d24e4b1e487 100644
--- a/test/vpp_ip_route.py
+++ b/test/vpp_ip_route.py
@@ -244,7 +244,7 @@ class VppIpRoute(VppObject):
def add_vpp_config(self):
if self.is_local or self.is_unreach or \
self.is_prohibit or self.is_drop:
- self._test.vapi.ip_add_del_route(
+ r = self._test.vapi.ip_add_del_route(
self.dest_addr,
self.dest_addr_len,
inet_pton(AF_INET6, "::"),
@@ -259,7 +259,7 @@ class VppIpRoute(VppObject):
for path in self.paths:
lstack = path.encode_labels()
- self._test.vapi.ip_add_del_route(
+ r = self._test.vapi.ip_add_del_route(
self.dest_addr,
self.dest_addr_len,
path.nh_addr,
@@ -277,6 +277,7 @@ class VppIpRoute(VppObject):
is_source_lookup=path.is_source_lookup,
is_udp_encap=path.is_udp_encap,
is_multipath=1 if len(self.paths) > 1 else 0)
+ self.stats_index = r.stats_index
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
@@ -325,6 +326,14 @@ class VppIpRoute(VppObject):
self.dest_addr_p,
self.dest_addr_len))
+ def get_stats_to(self):
+ c = self._test.statistics.get_counter("/net/route/to")
+ return c[0][self.stats_index]
+
+ def get_stats_via(self):
+ c = self._test.statistics.get_counter("/net/route/via")
+ return c[0][self.stats_index]
+
class VppIpMRoute(VppObject):
"""
@@ -581,7 +590,7 @@ class VppMplsRoute(VppObject):
for path in self.paths:
lstack = path.encode_labels()
- self._test.vapi.mpls_route_add_del(
+ r = self._test.vapi.mpls_route_add_del(
self.local_label,
self.eos_bit,
path.proto,
@@ -596,6 +605,7 @@ class VppMplsRoute(VppObject):
next_hop_n_out_labels=len(lstack),
next_hop_via_label=path.nh_via_label,
next_hop_table_id=path.nh_table_id)
+ self.stats_index = r.stats_index
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
@@ -626,3 +636,11 @@ class VppMplsRoute(VppObject):
% (self.table_id,
self.local_label,
20+self.eos_bit))
+
+ def get_stats_to(self):
+ c = self._test.statistics.get_counter("/net/route/to")
+ return c[0][self.stats_index]
+
+ def get_stats_via(self):
+ c = self._test.statistics.get_counter("/net/route/via")
+ return c[0][self.stats_index]
swap_u32 (r->global_sequence); r->local_sequence = clib_byte_swap_u32 (r->local_sequence); r->n_data_bytes = clib_byte_swap_u32 (r->n_data_bytes); } } /* Sent unicast over ACK channel. */ typedef CLIB_PACKED (struct { mc_peer_id_t peer_id; u32 global_sequence; u32 stream_index; u32 local_sequence; i32 seq_cmp_result;}) mc_msg_user_ack_t; always_inline void mc_byte_swap_msg_user_ack (mc_msg_user_ack_t * r) { if (mc_need_byte_swap ()) { r->peer_id = mc_byte_swap_peer_id (r->peer_id); r->stream_index = clib_byte_swap_u32 (r->stream_index); r->global_sequence = clib_byte_swap_u32 (r->global_sequence); r->local_sequence = clib_byte_swap_u32 (r->local_sequence); r->seq_cmp_result = clib_byte_swap_i32 (r->seq_cmp_result); } } /* Sent/received unicast over catchup channel (e.g. using TCP). */ typedef CLIB_PACKED (struct { mc_peer_id_t peer_id; u32 stream_index;}) mc_msg_catchup_request_t; always_inline void mc_byte_swap_msg_catchup_request (mc_msg_catchup_request_t * r) { if (mc_need_byte_swap ()) { r->peer_id = mc_byte_swap_peer_id (r->peer_id); r->stream_index = clib_byte_swap_u32 (r->stream_index); } } /* Sent/received unicast over catchup channel. */ typedef CLIB_PACKED (struct { mc_peer_id_t peer_id; u32 stream_index; /* Last global sequence number included in catchup data. */ u32 last_global_sequence_included; /* Size of catchup data. */ u32 n_data_bytes; /* Catchup data. */ u8 data[0];}) mc_msg_catchup_reply_t; always_inline void mc_byte_swap_msg_catchup_reply (mc_msg_catchup_reply_t * r) { if (mc_need_byte_swap ()) { r->peer_id = mc_byte_swap_peer_id (r->peer_id); r->stream_index = clib_byte_swap_u32 (r->stream_index); r->last_global_sequence_included = clib_byte_swap_u32 (r->last_global_sequence_included); r->n_data_bytes = clib_byte_swap_u32 (r->n_data_bytes); } } typedef struct _mc_serialize_msg { /* Name for this type. */ char *name; /* Functions to serialize/unserialize data. */ serialize_function_t *serialize; serialize_function_t *unserialize; /* Maximum message size in bytes when serialized. If zero then this will be set to the largest sent message. */ u32 max_n_bytes_serialized; /* Opaque to use for first argument to serialize/unserialize function. */ u32 opaque; /* Index in global message vector. */ u32 global_index; /* Registration list */ struct _mc_serialize_msg *next_registration; } mc_serialize_msg_t; typedef struct { /* Index into global message vector. */ u32 global_index; } mc_serialize_stream_msg_t; #define MC_SERIALIZE_MSG(x,...) \ __VA_ARGS__ mc_serialize_msg_t x; \ static void __mc_serialize_msg_registration_##x (void) \ __attribute__((__constructor__)) ; \ static void __mc_serialize_msg_registration_##x (void) \ { \ vlib_main_t * vm = vlib_get_main(); \ x.next_registration = vm->mc_msg_registrations; \ vm->mc_msg_registrations = &x; \ } \ static void __mc_serialize_msg_unregistration_##x (void) \ __attribute__((__destructor__)) ; \ static void __mc_serialize_msg_unregistration_##x (void) \ { \ vlib_main_t * vm = vlib_get_main(); \ VLIB_REMOVE_FROM_LINKED_LIST (vm->mc_msg_registrations, &x, \ next_registration); \ } \ __VA_ARGS__ mc_serialize_msg_t x typedef enum { MC_TRANSPORT_MASTERSHIP, MC_TRANSPORT_JOIN, MC_TRANSPORT_USER_REQUEST_TO_RELAY, MC_TRANSPORT_USER_REQUEST_FROM_RELAY, MC_N_TRANSPORT_TYPE, } mc_transport_type_t; typedef struct { clib_error_t *(*tx_buffer) (void *opaque, mc_transport_type_t type, u32 buffer_index); clib_error_t *(*tx_ack) (void *opaque, mc_peer_id_t peer_id, u32 buffer_index); /* Returns catchup opaque. */ uword (*catchup_request_fun) (void *opaque, u32 stream_index, mc_peer_id_t catchup_peer_id); void (*catchup_send_fun) (void *opaque, uword catchup_opaque, u8 * data_vector); /* Opaque passed to callbacks. */ void *opaque; mc_peer_id_t our_ack_peer_id; mc_peer_id_t our_catchup_peer_id; /* Max packet size (MTU) for this transport. For IP this is interface MTU less IP + UDP header size. */ u32 max_packet_size; format_function_t *format_peer_id; } mc_transport_t; typedef struct { /* Count of messages received from this peer from the past/future (with seq_cmp != 0). */ u64 n_msgs_from_past; u64 n_msgs_from_future; } mc_stream_peer_stats_t; typedef struct { /* ID of this peer. */ mc_peer_id_t id; /* The last sequence we received from this peer. */ u32 last_sequence_received; mc_stream_peer_stats_t stats, stats_last_clear; } mc_stream_peer_t; typedef struct { u32 buffer_index; /* Cached copy of local sequence number from buffer. */ u32 local_sequence; /* Number of times this buffer has been sent (retried). */ u32 n_retries; /* Previous/next retries in doubly-linked list. */ u32 prev_index, next_index; /* Bitmap of all peers which have acked this msg */ uword *unacked_by_peer_bitmap; /* Message send or resend time */ f64 sent_at; } mc_retry_t; typedef struct { /* Number of retries sent for this stream. */ u64 n_retries; } mc_stream_stats_t; struct mc_main_t; struct mc_stream_t; typedef struct { /* Stream name. */ char *name; /* Number of outstanding messages. */ u32 window_size; /* Retry interval, in seconds */ f64 retry_interval; /* Retry limit */ u32 retry_limit; /* User rx buffer callback */ void (*rx_buffer) (struct mc_main_t * mc_main, struct mc_stream_t * stream, mc_peer_id_t peer_id, u32 buffer_index); /* User callback to create a snapshot */ u8 *(*catchup_snapshot) (struct mc_main_t * mc_main, u8 * snapshot_vector, u32 last_global_sequence_included); /* User callback to replay a snapshot */ void (*catchup) (struct mc_main_t * mc_main, u8 * snapshot_data, u32 n_snapshot_data_bytes); /* Callback to save a snapshot for offline replay */ void (*save_snapshot) (struct mc_main_t * mc_main, u32 is_catchup, u8 * snapshot_data, u32 n_snapshot_data_bytes); /* Called when a peer dies */ void (*peer_died) (struct mc_main_t * mc_main, struct mc_stream_t * stream, mc_peer_id_t peer_id); } mc_stream_config_t; #define foreach_mc_stream_state \ _ (invalid) \ _ (name_known) \ _ (join_in_progress) \ _ (catchup) \ _ (ready) typedef enum { #define _(f) MC_STREAM_STATE_##f, foreach_mc_stream_state #undef _ } mc_stream_state_t; typedef struct mc_stream_t { mc_stream_config_t config; mc_stream_state_t state; /* Index in stream pool. */ u32 index; /* Stream index 0 is always for MC internal use. */ #define MC_STREAM_INDEX_INTERNAL 0 mc_retry_t *retry_pool; /* Head and tail index of retry pool. */ u32 retry_head_index, retry_tail_index; /* * Country club for recently retired messages * If the set of peers is expanding and a new peer * misses a message, we can easily retire the FIFO * element before we even know about the new peer */ mc_retry_t *retired_fifo; /* Hash mapping local sequence to retry pool index. */ uword *retry_index_by_local_sequence; /* catch-up fifo of VLIB buffer indices. start recording when catching up. */ u32 *catchup_fifo; mc_stream_stats_t stats, stats_last_clear; /* Peer pool. */ mc_stream_peer_t *peers; /* Bitmap with ones for all peers in peer pool. */ uword *all_peer_bitmap; /* Map of 64 bit id to index in stream pool. */ mhash_t peer_index_by_id; /* Timeout, in case we're alone in the world */ f64 join_timeout; vlib_one_time_waiting_process_t *procs_waiting_for_join_done; vlib_one_time_waiting_process_t *procs_waiting_for_open_window; /* Next sequence number to use */ u32 our_local_sequence; /* * Last global sequence we processed. * When supplying catchup data, we need to tell * the client precisely where to start replaying */ u32 last_global_sequence_processed; /* Vector of unique messages we've sent on this stream. */ mc_serialize_stream_msg_t *stream_msgs; /* Vector global message index into per stream message index. */ u32 *stream_msg_index_by_global_index; /* Hashed by message name. */ uword *stream_msg_index_by_name; u64 user_requests_sent; u64 user_requests_received; } mc_stream_t; always_inline void mc_stream_free (mc_stream_t * s) { pool_free (s->retry_pool); hash_free (s->retry_index_by_local_sequence); clib_fifo_free (s->catchup_fifo); pool_free (s->peers); mhash_free (&s->peer_index_by_id); vec_free (s->procs_waiting_for_join_done); vec_free (s->procs_waiting_for_open_window); } always_inline void mc_stream_init (mc_stream_t * s) { memset (s, 0, sizeof (s[0])); s->retry_head_index = s->retry_tail_index = ~0; } typedef struct { u32 stream_index; u32 catchup_opaque; u8 *catchup_snapshot; } mc_catchup_process_arg_t; typedef enum { MC_RELAY_STATE_NEGOTIATE, MC_RELAY_STATE_MASTER, MC_RELAY_STATE_SLAVE, } mc_relay_state_t; typedef struct { mc_peer_id_t peer_id; f64 time_last_master_assert_received; } mc_mastership_peer_t; typedef struct { u32 stream_index; u32 buffer_index; } mc_stream_and_buffer_t; typedef struct mc_main_t { mc_relay_state_t relay_state; /* Mastership */ u32 we_can_be_relay_master; u64 relay_master_peer_id; mc_mastership_peer_t *mastership_peers; /* Map of 64 bit id to index in stream pool. */ mhash_t mastership_peer_index_by_id; /* The transport we're using. */ mc_transport_t transport; /* Last-used global sequence number. */ u32 relay_global_sequence; /* Vector of streams. */ mc_stream_t *stream_vector; /* Hash table mapping stream name to pool index. */ uword *stream_index_by_name; uword *procs_waiting_for_stream_name_by_name; vlib_one_time_waiting_process_t **procs_waiting_for_stream_name_pool; int joins_in_progress; mc_catchup_process_arg_t *catchup_process_args; /* Node indices for mastership, join ager, retry and catchup processes. */ u32 mastership_process; u32 join_ager_process; u32 retry_process; u32 catchup_process; u32 unserialize_process; /* Global vector of messages. */ mc_serialize_msg_t **global_msgs; /* Hash table mapping message name to index. */ uword *global_msg_index_by_name; /* Shared serialize/unserialize main. */ serialize_main_t serialize_mains[VLIB_N_RX_TX]; vlib_serialize_buffer_main_t serialize_buffer_mains[VLIB_N_RX_TX]; /* Convenience variables */ struct vlib_main_t *vlib_main; elog_main_t *elog_main; /* Maps 64 bit peer id to elog string table offset for this formatted peer id. */ mhash_t elog_id_by_peer_id; uword *elog_id_by_msg_name; /* For mc_unserialize. */ mc_stream_and_buffer_t *mc_unserialize_stream_and_buffers; } mc_main_t; always_inline mc_stream_t * mc_stream_by_name (mc_main_t * m, char *name) { uword *p = hash_get (m->stream_index_by_name, name); return p ? vec_elt_at_index (m->stream_vector, p[0]) : 0; } always_inline mc_stream_t * mc_stream_by_index (mc_main_t * m, u32 i) { return i < vec_len (m->stream_vector) ? m->stream_vector + i : 0; } always_inline void mc_clear_stream_stats (mc_main_t * m) { mc_stream_t *s; mc_stream_peer_t *p; vec_foreach (s, m->stream_vector) { s->stats_last_clear = s->stats; /* *INDENT-OFF* */ pool_foreach (p, s->peers, ({ p->stats_last_clear = p->stats; })); /* *INDENT-ON* */ } } /* Declare all message handlers. */ #define _(f) void mc_msg_##f##_handler (mc_main_t * mcm, mc_msg_##f##_t * msg, u32 buffer_index); foreach_mc_msg_type #undef _ u32 mc_stream_join (mc_main_t * mcm, mc_stream_config_t *); void mc_stream_leave (mc_main_t * mcm, u32 stream_index); void mc_wait_for_stream_ready (mc_main_t * m, char *stream_name); u32 mc_stream_send (mc_main_t * mcm, u32 stream_index, u32 buffer_index); void mc_main_init (mc_main_t * mcm, char *tag); void mc_enable_disable_mastership (mc_main_t * mcm, int we_can_be_master); void *mc_get_vlib_buffer (struct vlib_main_t *vm, u32 n_bytes, u32 * bi_return); format_function_t format_mc_main; clib_error_t *mc_serialize_internal (mc_main_t * mc, u32 stream_index, u32 multiple_messages_per_vlib_buffer, mc_serialize_msg_t * msg, ...); clib_error_t *mc_serialize_va (mc_main_t * mc, u32 stream_index, u32 multiple_messages_per_vlib_buffer, mc_serialize_msg_t * msg, va_list * va); #define mc_serialize_stream(mc,si,msg,args...) \ mc_serialize_internal((mc),(si),(0),(msg),(msg)->serialize,args) #define mc_serialize(mc,msg,args...) \ mc_serialize_internal((mc),(~0),(0),(msg),(msg)->serialize,args) #define mc_serialize2(mc,add,msg,args...) \ mc_serialize_internal((mc),(~0),(add),(msg),(msg)->serialize,args) void mc_unserialize (mc_main_t * mcm, mc_stream_t * s, u32 buffer_index); uword mc_unserialize_message (mc_main_t * mcm, mc_stream_t * s, serialize_main_t * m); serialize_function_t serialize_mc_main, unserialize_mc_main; always_inline uword mc_max_message_size_in_bytes (mc_main_t * mcm) { return mcm->transport.max_packet_size - sizeof (mc_msg_user_request_t); } always_inline word mc_serialize_n_bytes_left (mc_main_t * mcm, serialize_main_t * m) { return mc_max_message_size_in_bytes (mcm) - serialize_vlib_buffer_n_bytes (m); } void unserialize_mc_stream (serialize_main_t * m, va_list * va); void mc_stream_join_process_hold (void); #endif /* included_vlib_mc_h */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */