aboutsummaryrefslogtreecommitdiffstats
path: root/hicn-plugin/src/mapme_ctrl_node.c
diff options
context:
space:
mode:
Diffstat (limited to 'hicn-plugin/src/mapme_ctrl_node.c')
-rw-r--r--hicn-plugin/src/mapme_ctrl_node.c240
1 files changed, 200 insertions, 40 deletions
diff --git a/hicn-plugin/src/mapme_ctrl_node.c b/hicn-plugin/src/mapme_ctrl_node.c
index e3d340e53..5d4fc5c12 100644
--- a/hicn-plugin/src/mapme_ctrl_node.c
+++ b/hicn-plugin/src/mapme_ctrl_node.c
@@ -50,7 +50,7 @@ static char *hicn_mapme_ctrl_error_strings[] = {
};
static_always_inline int
-hicn_mapme_nh_set (hicn_mapme_tfib_t *tfib, hicn_face_id_t face_id)
+hicn_mapme_nh_set (hicn_mapme_tfib_t *tfib, hicn_face_id_t in_face_id)
{
hicn_dpo_ctx_t *strategy_ctx = (hicn_dpo_ctx_t *) tfib;
const fib_prefix_t *prefix =
@@ -58,11 +58,13 @@ hicn_mapme_nh_set (hicn_mapme_tfib_t *tfib, hicn_face_id_t face_id)
int ret = 0;
- if ((tfib->entry_count == 1) && (tfib->next_hops[0] == face_id))
+ if ((tfib->entry_count == 1) && (tfib->next_hops[0] == in_face_id))
return ret;
+ /*
+ * Remove all the existing next hops and set the new one
+ */
u32 n_entries = tfib->entry_count;
- /* Remove all the existing next hops and set the new one */
for (int i = 0; i < n_entries; i++)
{
hicn_face_t *face = hicn_dpoi_get_from_idx (strategy_ctx->next_hops[0]);
@@ -73,8 +75,7 @@ hicn_mapme_nh_set (hicn_mapme_tfib_t *tfib, hicn_face_id_t face_id)
&adj->sub_type.nbr.next_hop, face->sw_if,
0);
}
- else if (face->dpo.dpoi_type == dpo_type_udp_ip4 ||
- face->dpo.dpoi_type == dpo_type_udp_ip6)
+ else if (dpo_is_udp_encap (&face->dpo))
{
ip_nh_udp_tunnel_add_del_helper (prefix->fp_proto, prefix,
face->dpo.dpoi_index,
@@ -87,9 +88,8 @@ hicn_mapme_nh_set (hicn_mapme_tfib_t *tfib, hicn_face_id_t face_id)
}
ret = HICN_ERROR_MAPME_NEXT_HOP_ADDED;
- hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
- if (face->dpo.dpoi_type == dpo_type_udp_ip4 ||
- face->dpo.dpoi_type == dpo_type_udp_ip6)
+ hicn_face_t *face = hicn_dpoi_get_from_idx (in_face_id);
+ if (dpo_is_udp_encap (&face->dpo))
{
ip_nh_udp_tunnel_add_del_helper (prefix->fp_proto, prefix,
face->dpo.dpoi_index,
@@ -137,8 +137,8 @@ hicn_mapme_nh_add (hicn_mapme_tfib_t *tfib, hicn_face_id_t face_id)
const fib_prefix_t *prefix =
fib_entry_get_prefix (strategy_ctx->fib_entry_index);
hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
- if (face->dpo.dpoi_type == dpo_type_udp_ip4 ||
- face->dpo.dpoi_type == dpo_type_udp_ip6)
+
+ if (dpo_is_udp_encap (&face->dpo))
{
ip_nh_udp_tunnel_add_del_helper ((fib_protocol_t) face->dpo.dpoi_proto,
prefix, face->dpo.dpoi_index,
@@ -153,6 +153,80 @@ hicn_mapme_nh_add (hicn_mapme_tfib_t *tfib, hicn_face_id_t face_id)
return 0;
}
+/**
+ * @brief Add a route to the fib.
+ *
+ */
+static_always_inline int
+hicn_mapme_add_fib_entry (const fib_prefix_t *prefix,
+ hicn_face_id_t in_face_id,
+ fib_node_index_t *hicn_fib_node_index)
+{
+ int ret = HICN_ERROR_NONE;
+ dpo_proto_t dpo_proto = DPO_PROTO_NONE;
+
+ hicn_face_t *face = hicn_dpoi_get_from_idx (in_face_id);
+
+ if (face->sw_if == ~0)
+ {
+ // UDP encap case
+ if (face->flags & HICN_FACE_FLAGS_UDP4)
+ {
+ dpo_proto = DPO_PROTO_IP4;
+ }
+ else if (face->flags & HICN_FACE_FLAGS_UDP6)
+ {
+ dpo_proto = DPO_PROTO_IP6;
+ }
+ else
+ {
+ // Invalid
+ return HICN_ERROR_FACE_NOT_VALID;
+ }
+
+ ret = ip_nh_udp_tunnel_add_del_helper (
+ prefix->fp_proto, prefix, face->dpo.dpoi_index, dpo_proto, 1);
+ }
+ else
+ {
+ ret = ip_nh_adj_add_del_helper (prefix->fp_proto, prefix,
+ &face->nat_addr, face->sw_if, 1);
+ }
+
+ if (ret != HICN_ERROR_NONE)
+ {
+ return ret;
+ }
+
+ // Now let's trigger the sync the main table with the hicn table
+ hicn_face_id_t *vec_faces = NULL;
+ ret = hicn_route_enable (prefix, hicn_fib_node_index, &vec_faces);
+
+ if (vec_faces[0] != in_face_id)
+ {
+ HICN_ERROR ("Created new face: new face id: %d, in_face id: %d",
+ vec_faces[0], in_face_id);
+ ret = HICN_ERROR_MAPME_WRONG_FACE_CREATED;
+ }
+
+ return ret;
+}
+
+/**
+ * Convert hicn prefix to fib prefix
+ */
+static_always_inline void
+hicn_prefix_to_fib_prefix (const hicn_prefix_t *prefix_in,
+ fib_prefix_t *prefix_out)
+{
+ clib_memcpy (&prefix_out->fp_addr, &prefix_in->name.as_ip46,
+ sizeof (prefix_out->fp_addr));
+ prefix_out->fp_len = (u16) prefix_in->len;
+ prefix_out->fp_proto = ip46_address_is_ip4 (&prefix_out->fp_addr) ?
+ FIB_PROTOCOL_IP4 :
+ FIB_PROTOCOL_IP6;
+}
+
/*
* @brief Process incoming control messages (Interest Update)
* @param vm vlib main data structure
@@ -167,46 +241,117 @@ hicn_mapme_nh_add (hicn_mapme_tfib_t *tfib, hicn_face_id_t face_id)
*/
static_always_inline bool
hicn_mapme_process_ctrl (vlib_main_t *vm, vlib_buffer_t *b,
- hicn_face_id_t in_face_id)
+ hicn_face_id_t in_face_id, hicn_prefix_t *prefix,
+ u32 *seq, hicn_mapme_type_t *type)
{
seq_t fib_seq;
- const dpo_id_t *dpo;
- hicn_prefix_t prefix;
+ const dpo_id_t *dpo, *dpo_mapme_default_route;
+ fib_prefix_t fib_prefix;
mapme_params_t params;
+ hicn_mapme_tfib_t *tfib;
int rc;
+ hicn_mapme_main_t *mm;
+#ifdef HICN_MAPME_ALLOW_NONEXISTING_FIB_ENTRY
+ fib_node_index_t fib_node_index;
+ hicn_mapme_tfib_t *tfib_less_specific;
+#endif
+
+ mm = hicn_mapme_get_main ();
/* Parse incoming message */
- rc = hicn_mapme_parse_packet (vlib_buffer_get_current (b), &prefix, &params);
+ rc = hicn_mapme_parse_packet (vlib_buffer_get_current (b), prefix, &params);
if (rc < 0)
goto ERR_PARSE;
- vlib_cli_output (vm, "IU - type:%d seq:%d len:%d", params.type, params.seq,
- prefix.len);
+ *seq = params.seq;
+ *type = params.type;
- /* if (params.seq == INVALID_SEQ) */
- /* { */
- /* vlib_log_warn (mapme_main.log_class, */
- /* "Invalid sequence number found in IU"); */
-
- /* return true; */
- /* } */
+ HICN_DEBUG ("IU - type:%d seq:%d prefix:%U len:%d", params.type, params.seq,
+ format_ip46_address, &prefix->name, IP46_TYPE_ANY, prefix->len);
/* We forge the ACK which we be the packet forwarded by the node */
hicn_mapme_create_ack (vlib_buffer_get_current (b), &params);
- dpo = fib_epm_lookup (&prefix.name.as_ip46, prefix.len);
+ dpo = fib_lookup (&prefix->name.as_ip46, prefix->len,
+ HICN_MAPME_FIB_LOOKUP_TYPE_EPM);
if (!dpo)
{
#ifdef HICN_MAPME_ALLOW_NONEXISTING_FIB_ENTRY
- /*
- * This might happen for a node hosting a producer which has moved.
- * Destroying the face has led to removing all corresponding FIB
- * entries. In that case, we need to correctly restore the FIB entries.
- */
- HICN_DEBUG ("Re-creating FIB entry with next hop on connection")
-#error "not implemented"
+ // As the EPM failed (but we received the IU), it means we have another
+ // prefix that is either more or less specific to which forward
+ // the IU. We do not update the NHs for this prefix, and we use the
+ // default mapme route to forward ther IU.
+
+ dpo_mapme_default_route =
+ fib_lookup (&mm->default_route.fp_addr, mm->default_route.fp_len,
+ HICN_MAPME_FIB_LOOKUP_TYPE_EPM);
+
+ if (!dpo_mapme_default_route)
+ {
+ // No path for mapme default route.
+ HICN_ERROR (
+ "No path for mapme default route (%U). Giving up IU forwarding.",
+ format_fib_prefix, &mm->default_route);
+ return false;
+ }
+
+ hicn_prefix_to_fib_prefix (prefix, &fib_prefix);
+ HICN_DEBUG ("Re-creating FIB entry with next hop on connection");
+ rc = hicn_mapme_add_fib_entry (&fib_prefix, in_face_id, &fib_node_index);
+
+ if (rc != HICN_ERROR_NONE)
+ {
+ return false;
+ }
+
+ // Get the DPO from the fib node index
+ dpo = dpo_from_fib_node_index (fib_node_index);
+
+ // This cannot fail
+ ASSERT (dpo);
+
+// Make sure DPO is hicn
+#ifdef HICN_MAPME_ALLOW_LOCATORS
+ if (!dpo_is_hicn ((dpo)))
+ {
+ /* We have an IP DPO */
+ HICN_ERROR ("Not implemented yet.");
+ return false;
+ }
+#endif
+
+ u32 hicn_dpo_ctx_index = dpo->dpoi_index;
+ u32 hicn_dpo_ctx_index_less_specific_route =
+ dpo_mapme_default_route->dpoi_index;
+
+ tfib_less_specific = TFIB (
+ hicn_strategy_dpo_ctx_get (hicn_dpo_ctx_index_less_specific_route));
+ tfib = TFIB (hicn_strategy_dpo_ctx_get (hicn_dpo_ctx_index));
+
+ for (u8 pos = 0; pos < tfib_less_specific->entry_count; pos++)
+ {
+ HICN_DEBUG (
+ "Adding nexthop to the tfib, dpo index in_face %d, dpo index "
+ "tfib %d",
+ in_face_id, tfib_less_specific->next_hops[pos]);
+ hicn_mapme_tfib_add (tfib, tfib_less_specific->next_hops[pos]);
+ }
+
+ // Update sequence number
+ tfib->seq = params.seq;
+
+ retx_t *retx = vlib_process_signal_event_data (
+ vm, hicn_mapme_eventmgr_process_node.index,
+ HICN_MAPME_EVENT_FACE_NH_SET, 1, sizeof (retx_t));
+ *retx = (retx_t){ .prefix = *prefix,
+ .dpo = {
+ .dpoi_index = hicn_dpo_ctx_index,
+ .dpoi_type = DPO_FIRST,
+ } };
+
+ return true;
#else
- // ERROR("Received IU for non-existing FIB entry");
+ HICN_ERROR ("Received IU for non-existing FIB entry");
return false;
#endif /* HICN_MAPME_ALLOW_NONEXISTING_FIB_ENTRY */
}
@@ -221,7 +366,7 @@ hicn_mapme_process_ctrl (vlib_main_t *vm, vlib_buffer_t *b,
#endif
/* Process the hICN DPO */
- hicn_mapme_tfib_t *tfib = TFIB (hicn_strategy_dpo_ctx_get (dpo->dpoi_index));
+ tfib = TFIB (hicn_strategy_dpo_ctx_get (dpo->dpoi_index));
if (tfib == NULL)
{
@@ -274,7 +419,7 @@ hicn_mapme_process_ctrl (vlib_main_t *vm, vlib_buffer_t *b,
retx_t *retx = vlib_process_signal_event_data (
vm, hicn_mapme_eventmgr_process_node.index,
HICN_MAPME_EVENT_FACE_NH_SET, 1, sizeof (retx_t));
- *retx = (retx_t){ .prefix = prefix, .dpo = *dpo };
+ *retx = (retx_t){ .prefix = *prefix, .dpo = *dpo };
}
else if (params.seq == fib_seq)
{
@@ -298,7 +443,7 @@ hicn_mapme_process_ctrl (vlib_main_t *vm, vlib_buffer_t *b,
retx_t *retx = vlib_process_signal_event_data (
vm, hicn_mapme_eventmgr_process_node.index,
HICN_MAPME_EVENT_FACE_NH_ADD, 1, sizeof (retx_t));
- *retx = (retx_t){ .prefix = prefix, .dpo = *dpo };
+ *retx = (retx_t){ .prefix = *prefix, .dpo = *dpo };
}
else // params.seq < fib_seq
{
@@ -320,7 +465,7 @@ hicn_mapme_process_ctrl (vlib_main_t *vm, vlib_buffer_t *b,
retx_t *retx = vlib_process_signal_event_data (
vm, hicn_mapme_eventmgr_process_node.index,
HICN_MAPME_EVENT_FACE_PH_ADD, 1, sizeof (retx_t));
- *retx = (retx_t){ .prefix = prefix, .dpo = *dpo };
+ *retx = (retx_t){ .prefix = *prefix, .dpo = *dpo };
}
/* We just raise events, the event_mgr is in charge of forging packet. */
@@ -343,6 +488,9 @@ hicn_mapme_ctrl_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
u32 n_left_from, *from, *to_next;
n_left_from = frame->n_vectors;
// hicn_face_id_t in_face;
+ hicn_prefix_t prefix;
+ u32 seq;
+ hicn_mapme_type_t type;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
@@ -370,12 +518,23 @@ hicn_mapme_ctrl_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
/* This determines the next node on which the ack will be sent back
*/
- u32 next0 = hicn_mapme_ctrl_get_iface_node (hb->face_id);
+ u32 next0 = HICN_MAPME_CTRL_NEXT_IP6_OUTPUT;
- hicn_mapme_process_ctrl (vm, b0, hb->face_id);
+ hicn_mapme_process_ctrl (vm, b0, hb->face_id, &prefix, &seq, &type);
vnet_buffer (b0)->ip.adj_index[VLIB_TX] = hb->face_id;
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_mapme_ctrl_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->prefix = prefix;
+ t->next_index = next0;
+ t->seq = seq;
+ t->type = type;
+ }
+
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
n_left_to_next, bi0, next0);
}
@@ -395,8 +554,9 @@ hicn_mapme_ctrl_format_trace (u8 *s, va_list *args)
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
hicn_mapme_ctrl_trace_t *t = va_arg (*args, hicn_mapme_ctrl_trace_t *);
- s = format (s, "MAPME_CTRL: pkt: %d, sw_if_index %d, next index %d",
- (int) t->pkt_type, t->sw_if_index, t->next_index);
+ s = format (s, "MAPME_CTRL: prefix: %U/%d, next_index %u, seq %u, type %u",
+ format_ip46_address, &t->prefix.name, IP46_TYPE_ANY,
+ t->prefix.len, t->next_index, t->seq, t->type);
return (s);
}