aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins
diff options
context:
space:
mode:
authorOle Troan <ot@cisco.com>2020-05-20 15:47:06 +0200
committerNeale Ranns <nranns@cisco.com>2020-05-25 11:22:34 +0000
commitf5db3711b28db4e364ac01be8b124dd24d573782 (patch)
treeeee3c8aabae4287bf89c0e545e2400770fc223cb /src/plugins
parentafc233aa93c3f23b30b756cb4ae2967f968bbbb1 (diff)
api: add new stream message convention
Instead of having to wrap dump/detail calls in control ping, send details messages in between a normal reply / request pair. As expressed in the below service statement. Example: service { rpc map_domains_gets returns map_domains_get_reply stream map_domain_details; }; define map_domains_get { u32 client_index; u32 context; u32 cursor; }; define map_domains_get_reply { u32 context; i32 retval; u32 cursor; }; To avoid blocking the main thread for too long, the replies are now sent in client message queue size chunks. The reply message returns VNET_API_ERROR_EAGAIN when there is more to read. The API handler must also include a "cursor" that is used to the next call to the get function. API handler example: REPLY_AND_DETAILS_MACRO (VL_API_MAP_DOMAINS_GET_REPLY, mm->domains, ({ send_domain_details (cursor, rp, mp->context); })); The macro starts from cursor and iterates through the pool until vl_api_process_may_suspend() returns true or the iteration reaches the end of the list. Client Example: cursor = 0 d = [] while True: rv, details = map_domains_get(cursor=cursor) d += details if rv.retval == 0 or rv.retval != -165: break cursor = rv.cursor or the convenience iterator: for x in vpp.details_iter(vpp.api.map_domains_get): pass or list(details_iter(map_domains_get)) Change-Id: Iad9f6b41b0ef886adb584c97708dd91cf552749e Type: feature Signed-off-by: Ole Troan <ot@cisco.com>
Diffstat (limited to 'src/plugins')
-rw-r--r--src/plugins/map/map.api22
-rw-r--r--src/plugins/map/map_api.c87
-rw-r--r--src/plugins/map/test/test_map.py43
3 files changed, 122 insertions, 30 deletions
diff --git a/src/plugins/map/map.api b/src/plugins/map/map.api
index 79deac86f8f..0ae1901e07a 100644
--- a/src/plugins/map/map.api
+++ b/src/plugins/map/map.api
@@ -13,7 +13,7 @@
* limitations under the License.
*/
-option version = "4.1.1";
+option version = "4.2.1";
import "vnet/ip/ip_types.api";
import "vnet/interface_types.api";
@@ -91,8 +91,28 @@ autoreply define map_add_del_rule
/** \brief Get list of map domains
@param client_index - opaque cookie to identify the sender
*/
+service {
+ rpc map_domains_get returns map_domains_get_reply
+ stream map_domain_details;
+};
+
+define map_domains_get
+{
+ u32 client_index;
+ u32 context;
+ u32 cursor;
+};
+
+define map_domains_get_reply
+{
+ u32 context;
+ i32 retval;
+ u32 cursor;
+};
+
define map_domain_dump
{
+ option deprecated="v20.12";
u32 client_index;
u32 context;
};
diff --git a/src/plugins/map/map_api.c b/src/plugins/map/map_api.c
index 7327732c6a7..13f05526afa 100644
--- a/src/plugins/map/map_api.c
+++ b/src/plugins/map/map_api.c
@@ -86,14 +86,48 @@ vl_api_map_add_del_rule_t_handler (vl_api_map_add_del_rule_t * mp)
}
static void
-vl_api_map_domain_dump_t_handler (vl_api_map_domain_dump_t * mp)
+send_domain_details (u32 map_domain_index, vl_api_registration_t * rp,
+ u32 context)
{
+ map_main_t *mm = &map_main;
vl_api_map_domain_details_t *rmp;
+ map_domain_t *d = pool_elt_at_index (mm->domains, map_domain_index);
+
+ /* Make sure every field is initiated (or don't skip the clib_memset()) */
+ map_domain_extra_t *de =
+ vec_elt_at_index (mm->domain_extras, map_domain_index);
+ int tag_len = clib_min (ARRAY_LEN (rmp->tag), vec_len (de->tag) + 1);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO_DETAILS4(VL_API_MAP_DOMAIN_DETAILS, rp, context,
+ ({
+ rmp->domain_index = htonl (map_domain_index);
+ clib_memcpy (&rmp->ip6_prefix.address, &d->ip6_prefix,
+ sizeof (rmp->ip6_prefix.address));
+ clib_memcpy (&rmp->ip4_prefix.address, &d->ip4_prefix,
+ sizeof (rmp->ip4_prefix.address));
+ clib_memcpy (&rmp->ip6_src.address, &d->ip6_src,
+ sizeof (rmp->ip6_src.address));
+ rmp->ip6_prefix.len = d->ip6_prefix_len;
+ rmp->ip4_prefix.len = d->ip4_prefix_len;
+ rmp->ip6_src.len = d->ip6_src_len;
+ rmp->ea_bits_len = d->ea_bits_len;
+ rmp->psid_offset = d->psid_offset;
+ rmp->psid_length = d->psid_length;
+ rmp->flags = d->flags;
+ rmp->mtu = htons (d->mtu);
+ memcpy (rmp->tag, de->tag, tag_len - 1);
+ rmp->tag[tag_len - 1] = '\0';
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_map_domain_dump_t_handler (vl_api_map_domain_dump_t * mp)
+{
map_main_t *mm = &map_main;
- map_domain_t *d;
- map_domain_extra_t *de;
+ int i;
vl_api_registration_t *reg;
- u32 map_domain_index;
if (pool_elts (mm->domains) == 0)
return;
@@ -103,33 +137,28 @@ vl_api_map_domain_dump_t_handler (vl_api_map_domain_dump_t * mp)
return;
/* *INDENT-OFF* */
- pool_foreach(d, mm->domains,
+ pool_foreach_index(i, mm->domains,
({
- map_domain_index = d - mm->domains;
- de = vec_elt_at_index(mm->domain_extras, map_domain_index);
- int tag_len = clib_min(ARRAY_LEN(rmp->tag), vec_len(de->tag) + 1);
-
- /* Make sure every field is initiated (or don't skip the clib_memset()) */
- rmp = vl_msg_api_alloc (sizeof (*rmp) + tag_len);
-
- rmp->_vl_msg_id = htons(VL_API_MAP_DOMAIN_DETAILS + mm->msg_id_base);
- rmp->context = mp->context;
- rmp->domain_index = htonl(map_domain_index);
- clib_memcpy(&rmp->ip6_prefix.address, &d->ip6_prefix, sizeof(rmp->ip6_prefix.address));
- clib_memcpy(&rmp->ip4_prefix.address, &d->ip4_prefix, sizeof(rmp->ip4_prefix.address));
- clib_memcpy(&rmp->ip6_src.address, &d->ip6_src, sizeof(rmp->ip6_src.address));
- rmp->ip6_prefix.len = d->ip6_prefix_len;
- rmp->ip4_prefix.len = d->ip4_prefix_len;
- rmp->ip6_src.len = d->ip6_src_len;
- rmp->ea_bits_len = d->ea_bits_len;
- rmp->psid_offset = d->psid_offset;
- rmp->psid_length = d->psid_length;
- rmp->flags = d->flags;
- rmp->mtu = htons(d->mtu);
- memcpy(rmp->tag, de->tag, tag_len-1);
- rmp->tag[tag_len-1] = '\0';
+ send_domain_details(i, reg, mp->context);
+ }));
+ /* *INDENT-ON* */
+}
- vl_api_send_msg (reg, (u8 *) rmp);
+static void
+vl_api_map_domains_get_t_handler (vl_api_map_domains_get_t * mp)
+{
+ map_main_t *mm = &map_main;
+ vl_api_map_domains_get_reply_t *rmp;
+
+ i32 rv = 0;
+
+ if (pool_elts (mm->domains) == 0)
+ return;
+
+ /* *INDENT-OFF* */
+ REPLY_AND_DETAILS_MACRO (VL_API_MAP_DOMAINS_GET_REPLY, mm->domains,
+ ({
+ send_domain_details (cursor, rp, mp->context);
}));
/* *INDENT-ON* */
}
diff --git a/src/plugins/map/test/test_map.py b/src/plugins/map/test/test_map.py
index 59c23335052..93ea3f06976 100644
--- a/src/plugins/map/test/test_map.py
+++ b/src/plugins/map/test/test_map.py
@@ -100,6 +100,48 @@ class TestMAP(VppTestCase):
self.assertEqual(rv[0].tag, tag,
"output produced incorrect tag value.")
+ def create_domains(self, ip4_pfx_str, ip6_pfx_str, ip6_src_str):
+ ip4_pfx = ipaddress.ip_network(ip4_pfx_str)
+ ip6_dst = ipaddress.ip_network(ip6_pfx_str)
+ mod = ip4_pfx.num_addresses / 1024
+ indicies = []
+ for i in range(ip4_pfx.num_addresses):
+ rv = self.vapi.map_add_domain(ip6_prefix=ip6_pfx_str,
+ ip4_prefix=str(ip4_pfx[i]) + "/32",
+ ip6_src=ip6_src_str)
+ indicies.append(rv.index)
+ return indicies
+
+ def test_api_map_domains_get(self):
+ # Create a bunch of domains
+ domains = self.create_domains('130.67.0.0/24', '2001::/32',
+ '2001::1/128')
+ self.assertEqual(len(domains), 256)
+
+ d = []
+ cursor = 0
+
+ # Invalid cursor
+ rv, details = self.vapi.map_domains_get(cursor=1234)
+ self.assertEqual(rv.retval, -7)
+
+ # Delete a domain in the middle of walk
+ rv, details = self.vapi.map_domains_get(cursor=0)
+ self.assertEqual(rv.retval, -165)
+ self.vapi.map_del_domain(index=rv.cursor)
+ domains.remove(rv.cursor)
+
+ # Continue at point of deleted cursor
+ rv, details = self.vapi.map_domains_get(cursor=rv.cursor)
+ self.assertEqual(rv.retval, -165)
+
+ d = list(self.vapi.vpp.details_iter(self.vapi.map_domains_get))
+ self.assertEqual(len(d), 255)
+
+ # Clean up
+ for i in domains:
+ self.vapi.map_del_domain(index=i)
+
def test_map_e_udp(self):
""" MAP-E UDP"""
@@ -916,5 +958,6 @@ class TestMAP(VppTestCase):
ip6_nh_address="4001::1",
is_add=0)
+
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)